repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
GiladE/birde | venv/lib/python2.7/site-packages/gunicorn/http/wsgi.py | 26 | 13493 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import io
import logging
import os
import re
import sys
from gunicorn._compat import unquote_to_wsgi_str
from gunicorn.six import string_types, binary_type, reraise
from gunicorn import SERVER_SOFTWARE
import gunicorn.six as six
import gunicorn.util as util
try:
# Python 3.3 has os.sendfile().
from os import sendfile
except ImportError:
try:
from ._sendfile import sendfile
except ImportError:
sendfile = None
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
log = logging.getLogger(__name__)
class FileWrapper(object):
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike, 'close'):
self.close = filelike.close
def __getitem__(self, key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
class WSGIErrorsWrapper(io.RawIOBase):
def __init__(self, cfg):
errorlog = logging.getLogger("gunicorn.error")
handlers = errorlog.handlers
self.streams = []
if cfg.errorlog == "-":
self.streams.append(sys.stderr)
handlers = handlers[1:]
for h in handlers:
if hasattr(h, "stream"):
self.streams.append(h.stream)
def write(self, data):
for stream in self.streams:
try:
stream.write(data)
except UnicodeError:
stream.write(data.encode("UTF-8"))
stream.flush()
def base_environ(cfg):
return {
"wsgi.errors": WSGIErrorsWrapper(cfg),
"wsgi.version": (1, 0),
"wsgi.multithread": False,
"wsgi.multiprocess": (cfg.workers > 1),
"wsgi.run_once": False,
"wsgi.file_wrapper": FileWrapper,
"SERVER_SOFTWARE": SERVER_SOFTWARE,
}
def default_environ(req, sock, cfg):
env = base_environ(cfg)
env.update({
"wsgi.input": req.body,
"gunicorn.socket": sock,
"REQUEST_METHOD": req.method,
"QUERY_STRING": req.query,
"RAW_URI": req.uri,
"SERVER_PROTOCOL": "HTTP/%s" % ".".join([str(v) for v in req.version])
})
return env
def proxy_environ(req):
info = req.proxy_protocol_info
if not info:
return {}
return {
"PROXY_PROTOCOL": info["proxy_protocol"],
"REMOTE_ADDR": info["client_addr"],
"REMOTE_PORT": str(info["client_port"]),
"PROXY_ADDR": info["proxy_addr"],
"PROXY_PORT": str(info["proxy_port"]),
}
def create(req, sock, client, server, cfg):
resp = Response(req, sock, cfg)
# set initial environ
environ = default_environ(req, sock, cfg)
# default variables
host = None
url_scheme = "https" if cfg.is_ssl else "http"
script_name = os.environ.get("SCRIPT_NAME", "")
# set secure_headers
secure_headers = cfg.secure_scheme_headers
if client and not isinstance(client, string_types):
if ('*' not in cfg.forwarded_allow_ips
and client[0] not in cfg.forwarded_allow_ips):
secure_headers = {}
# add the headers tot the environ
for hdr_name, hdr_value in req.headers:
if hdr_name == "EXPECT":
# handle expect
if hdr_value.lower() == "100-continue":
sock.send(b"HTTP/1.1 100 Continue\r\n\r\n")
elif secure_headers and (hdr_name in secure_headers and
hdr_value == secure_headers[hdr_name]):
url_scheme = "https"
elif hdr_name == 'HOST':
host = hdr_value
elif hdr_name == "SCRIPT_NAME":
script_name = hdr_value
elif hdr_name == "CONTENT-TYPE":
environ['CONTENT_TYPE'] = hdr_value
continue
elif hdr_name == "CONTENT-LENGTH":
environ['CONTENT_LENGTH'] = hdr_value
continue
key = 'HTTP_' + hdr_name.replace('-', '_')
if key in environ:
hdr_value = "%s,%s" % (environ[key], hdr_value)
environ[key] = hdr_value
# set the url schejeme
environ['wsgi.url_scheme'] = url_scheme
# set the REMOTE_* keys in environ
# authors should be aware that REMOTE_HOST and REMOTE_ADDR
# may not qualify the remote addr:
# http://www.ietf.org/rfc/rfc3875
if isinstance(client, string_types):
environ['REMOTE_ADDR'] = client
elif isinstance(client, binary_type):
environ['REMOTE_ADDR'] = str(client)
else:
environ['REMOTE_ADDR'] = client[0]
environ['REMOTE_PORT'] = str(client[1])
# handle the SERVER_*
# Normally only the application should use the Host header but since the
# WSGI spec doesn't support unix sockets, we are using it to create
# viable SERVER_* if possible.
if isinstance(server, string_types):
server = server.split(":")
if len(server) == 1:
# unix socket
if host and host is not None:
server = host.split(':')
if len(server) == 1:
if url_scheme == "http":
server.append(80),
elif url_scheme == "https":
server.append(443)
else:
server.append('')
else:
# no host header given which means that we are not behind a
# proxy, so append an empty port.
server.append('')
environ['SERVER_NAME'] = server[0]
environ['SERVER_PORT'] = str(server[1])
# set the path and script name
path_info = req.path
if script_name:
path_info = path_info.split(script_name, 1)[1]
environ['PATH_INFO'] = unquote_to_wsgi_str(path_info)
environ['SCRIPT_NAME'] = script_name
# override the environ with the correct remote and server address if
# we are behind a proxy using the proxy protocol.
environ.update(proxy_environ(req))
return resp, environ
class Response(object):
def __init__(self, req, sock, cfg):
self.req = req
self.sock = sock
self.version = SERVER_SOFTWARE
self.status = None
self.chunked = False
self.must_close = False
self.headers = []
self.headers_sent = False
self.response_length = None
self.sent = 0
self.upgrade = False
self.cfg = cfg
def force_close(self):
self.must_close = True
def should_close(self):
if self.must_close or self.req.should_close():
return True
if self.response_length is not None or self.chunked:
return False
if self.status_code < 200 or self.status_code in (204, 304):
return False
return True
def start_response(self, status, headers, exc_info=None):
if exc_info:
try:
if self.status and self.headers_sent:
reraise(exc_info[0], exc_info[1], exc_info[2])
finally:
exc_info = None
elif self.status is not None:
raise AssertionError("Response headers already set!")
self.status = status
# get the status code from the response here so we can use it to check
# the need for the connection header later without parsing the string
# each time.
try:
self.status_code = int(self.status.split()[0])
except ValueError:
self.status_code = None
self.process_headers(headers)
self.chunked = self.is_chunked()
return self.write
def process_headers(self, headers):
for name, value in headers:
if not isinstance(name, string_types):
raise TypeError('%r is not a string' % name)
value = str(value).strip()
lname = name.lower().strip()
if lname == "content-length":
self.response_length = int(value)
elif util.is_hoppish(name):
if lname == "connection":
# handle websocket
if value.lower().strip() == "upgrade":
self.upgrade = True
elif lname == "upgrade":
if value.lower().strip() == "websocket":
self.headers.append((name.strip(), value))
# ignore hopbyhop headers
continue
self.headers.append((name.strip(), value))
def is_chunked(self):
# Only use chunked responses when the client is
# speaking HTTP/1.1 or newer and there was
# no Content-Length header set.
if self.response_length is not None:
return False
elif self.req.version <= (1, 0):
return False
elif self.status_code in (204, 304):
# Do not use chunked responses when the response is guaranteed to
# not have a response body.
return False
return True
def default_headers(self):
# set the connection header
if self.upgrade:
connection = "upgrade"
elif self.should_close():
connection = "close"
else:
connection = "keep-alive"
headers = [
"HTTP/%s.%s %s\r\n" % (self.req.version[0],
self.req.version[1], self.status),
"Server: %s\r\n" % self.version,
"Date: %s\r\n" % util.http_date(),
"Connection: %s\r\n" % connection
]
if self.chunked:
headers.append("Transfer-Encoding: chunked\r\n")
return headers
def send_headers(self):
if self.headers_sent:
return
tosend = self.default_headers()
tosend.extend(["%s: %s\r\n" % (k, v) for k, v in self.headers])
header_str = "%s\r\n" % "".join(tosend)
util.write(self.sock, util.to_bytestring(header_str))
self.headers_sent = True
def write(self, arg):
self.send_headers()
if not isinstance(arg, binary_type):
raise TypeError('%r is not a byte' % arg)
arglen = len(arg)
tosend = arglen
if self.response_length is not None:
if self.sent >= self.response_length:
# Never write more than self.response_length bytes
return
tosend = min(self.response_length - self.sent, tosend)
if tosend < arglen:
arg = arg[:tosend]
# Sending an empty chunk signals the end of the
# response and prematurely closes the response
if self.chunked and tosend == 0:
return
self.sent += tosend
util.write(self.sock, arg, self.chunked)
def can_sendfile(self):
return (self.cfg.sendfile and (sendfile is not None))
def sendfile_all(self, fileno, sockno, offset, nbytes):
# Send file in at most 1GB blocks as some operating
# systems can have problems with sending files in blocks
# over 2GB.
BLKSIZE = 0x3FFFFFFF
if nbytes > BLKSIZE:
for m in range(0, nbytes, BLKSIZE):
self.sendfile_all(fileno, sockno, offset, min(nbytes, BLKSIZE))
offset += BLKSIZE
nbytes -= BLKSIZE
else:
sent = 0
sent += sendfile(sockno, fileno, offset + sent, nbytes - sent)
while sent != nbytes:
sent += sendfile(sockno, fileno, offset + sent, nbytes - sent)
def sendfile_use_send(self, fileno, fo_offset, nbytes):
# send file in blocks of 8182 bytes
BLKSIZE = 8192
sent = 0
while sent != nbytes:
data = os.read(fileno, BLKSIZE)
if not data:
break
sent += len(data)
if sent > nbytes:
data = data[:nbytes - sent]
util.write(self.sock, data, self.chunked)
def write_file(self, respiter):
if self.can_sendfile() and util.is_fileobject(respiter.filelike):
# sometimes the fileno isn't a callable
if six.callable(respiter.filelike.fileno):
fileno = respiter.filelike.fileno()
else:
fileno = respiter.filelike.fileno
fd_offset = os.lseek(fileno, 0, os.SEEK_CUR)
fo_offset = respiter.filelike.tell()
nbytes = max(os.fstat(fileno).st_size - fo_offset, 0)
if self.response_length:
nbytes = min(nbytes, self.response_length)
if nbytes == 0:
return
self.send_headers()
if self.cfg.is_ssl:
self.sendfile_use_send(fileno, fo_offset, nbytes)
else:
if self.is_chunked():
chunk_size = "%X\r\n" % nbytes
self.sock.sendall(chunk_size.encode('utf-8'))
self.sendfile_all(fileno, self.sock.fileno(), fo_offset, nbytes)
if self.is_chunked():
self.sock.sendall(b"\r\n")
os.lseek(fileno, fd_offset, os.SEEK_SET)
else:
for item in respiter:
self.write(item)
def close(self):
if not self.headers_sent:
self.send_headers()
if self.chunked:
util.write_chunk(self.sock, b"")
| mit | -7,591,034,154,781,787,000 | 30.748235 | 80 | 0.55362 | false |
quang-ha/lammps | tools/i-pi/ipi/utils/io/io_xml.py | 33 | 15954 | """Contains the functions used to read the input file and print the checkpoint
files with xml formatting.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Functions:
xml_node: Class to handle a particular xml tag.
xml_handler: Class giving general xml data reading methods.
xml_parse_string: Parses a string made from a section of a xml input file.
xml_parse_file: Parses an entire xml input file.
read_type: Reads a string and outputs data of a specified type.
read_float: Reads a string and outputs a float.
read_int: Reads a string and outputs an integer.
read_bool: Reads a string and outputs a boolean.
read_list: Reads a string and outputs a list.
read_array: Reads a string and outputs an array.
read_tuple: Reads a string and outputs a tuple.
read_dict: Reads a string and outputs a dictionary.
write_type: Writes a string from data of a specified type.
write_list: Writes a string from a list.
write_tuple: Writes a string from a tuple.
write_float: Writes a string from a float.
write_bool: Writes a string from a boolean.
write_dict: Writes a string from a dictionary.
"""
__all__ = ['xml_node', 'xml_handler', 'xml_parse_string', 'xml_parse_file',
'read_type', 'read_float', 'read_int', 'read_bool', 'read_list',
'read_array', 'read_tuple', 'read_dict', 'write_type', 'write_list',
'write_tuple', 'write_float', 'write_bool', 'write_dict']
from xml.sax import parseString, parse
from xml.sax.handler import ContentHandler
import numpy as np
import string
class xml_node(object):
"""Class to handle a particular xml tag.
Tags are generally written in the form
<tag_name attribs="attrib_data"> main_data </tag_name>. This class holds
tag_name, attrib_data and main_data separately so they can be used to
create the objects with the appropriate names and data.
Attributes:
attribs: The attribute data for the tag.
fields: The rest of the data.
name: The tag name.
"""
def __init__(self, attribs=None, name="", fields=None):
"""Initialises xml_node.
Args:
attribs: An optional dictionary giving attribute data. Defaults to {}.
fields: An optional dictionary holding all the data between the start
and end tags, including information about other nodes.
Defaults to {}.
name: An optional string giving the tag name. Defaults to ''.
"""
if attribs is None:
attribs = {}
if fields is None:
fields = []
self.attribs = attribs
self.name = name
self.fields = fields
class xml_handler(ContentHandler):
"""Class giving general xml_reading methods.
Uses the standard python xml_reader to read the different kinds of data.
Keeps track of the heirarchial nature of an xml file by recording the level
of nesting, so that the correct data and attributes can be associated with
the correct tag name.
Attributes:
root: An xml_node object for the root node.
open: The list of the tags that the parser is currently between the start
and end tags of.
level: The level of nesting that the parser is currently at.
buffer: A list of the data found between the tags at the different levels
of nesting.
"""
def __init__(self):
"""Initialises xml_handler."""
#root xml node with all the data
self.root = xml_node(name="root", fields=[])
self.open = [self.root]
#current level of the hierarchy
self.level = 0
#Holds all the data between each of the tags.
#If level = 1, then buffer[0] holds all the data collected between the
#root tags, and buffer[1] holds all the data collected between the
#first child tag.
self.buffer = [[""]]
def startElement(self, name, attrs):
"""Reads an opening tag.
Adds the opening tag to the list of open tags, adds a new space in the
buffer, reads the appropriate attributes and adds a new level to the
heirarchy.
Args:
name: The tag_name.
attrs: The attribute data.
"""
#creates a new node
newnode = xml_node(attribs=dict((k,attrs[k]) for k in attrs.keys()), name=name, fields=[])
#adds it to the list of open nodes
self.open.append(newnode)
#adds it to the list of fields of the parent tag
self.open[self.level].fields.append((name,newnode))
#gets ready to read new data
self.buffer.append([""])
self.level += 1
def characters(self, data):
"""Reads data.
Adds the data to the buffer of the current level of the heirarchy.
Data is read as a string, and needs to be converted to the required
type later.
Args:
data: The data to be read.
"""
self.buffer[self.level].append(data)
def endElement(self, name):
"""Reads a closing tag.
Once all the data has been read, and the closing tag found, the buffer
is read into the appropriate field.
Args:
name: The tag_name.
"""
#all the text found between the tags stored in the appropriate xml_node
#object
self.buffer[self.level] = ''.join(self.buffer[self.level])
self.open[self.level].fields.append(("_text" , self.buffer[self.level]))
#'closes' the xml_node object, as we are no longer within its tags, so
#there is no more data to be added to it.
#Note that the xml_node is still held within the parent tag, so we
#no longer require this xml node object.
self.buffer.pop(self.level)
self.open.pop(self.level)
self.level -= 1
def xml_parse_string(buf):
"""Parses a string made from a section of a xml input file.
Args:
buf: A string in correct xml format.
Returns:
A xml_node for the root node of the file.
"""
myhandle = xml_handler()
parseString(buf, myhandle)
return myhandle.root
def xml_parse_file(stream):
"""Parses an entire xml input file.
Args:
stream: A string describing a xml formatted file.
Returns:
A xml_node for the root node of the file.
"""
myhandle = xml_handler()
parse(stream, myhandle)
return myhandle.root
def read_type(type, data):
"""Reads a string and outputs data of a specified type.
Args:
type: The data type of the target container.
data: The string to be read in.
Raises:
TypeError: Raised if it tries to read into a data type that has not been
implemented.
Returns:
An object of type type.
"""
if not type in readtype_funcs:
raise TypeError("Conversion not available for given type")
return type(readtype_funcs[type](data))
def read_float(data):
"""Reads a string and outputs a float.
Args:
data: The string to be read in.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
A float.
"""
return float(data)
def read_int(data):
"""Reads a string and outputs a integer.
Args:
data: The string to be read in.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
An integer.
"""
return int(data)
def read_bool(data):
"""Reads a string and outputs a boolean.
Takes a string of the form 'true' or 'false', and returns the appropriate
boolean.
Args:
data: The string to be read in.
Raises:
ValueError: Raised if the string is not 'true' or 'false'.
Returns:
A boolean.
"""
if data.strip().upper() == "TRUE":
return True
elif data.strip().upper() == "FALSE":
return False
else:
raise ValueError(data + " does not represent a bool value")
def read_list(data, delims="[]", split=",", strip=" \n\t'"):
"""Reads a formatted string and outputs a list.
The string must be formatted in the correct way.
The start character must be delimiters[0], the end character
must be delimiters[1] and each element must be split along
the character split. Characters at the beginning or
end of each element in strip are ignored. The standard list format is of the
form '[array[0], array[1],..., array[n]]', which is used for actual lists.
Other formats are used for tuples and dictionaries.
Args:
data: The string to be read in. '[]' by default.
delims: A string of two characters giving the first and last character of
the list format. ',' by default.
split: The character between different elements of the list format.
strip: Characters to be removed from the beginning and end of each
element. ' \n\t' by default.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
A list of strings.
"""
try:
begin = data.index(delims[0])
end = data.index(delims[1])
except ValueError:
raise ValueError("Error in list syntax: could not locate delimiters")
rlist = data[begin+1:end].split(split)
for i in range(len(rlist)):
rlist[i] = rlist[i].strip(strip)
# handles empty lists correctly
if len(rlist) == 1 and rlist[0] == "":
rlist = []
return rlist
def read_array(dtype, data):
"""Reads a formatted string and outputs an array.
The format is as for standard python arrays, which is
[array[0], array[1], ... , array[n]]. Note the use of comma separators, and
the use of square brackets.
Args:
data: The string to be read in.
dtype: The data type of the elements of the target array.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
An array of data type dtype.
"""
rlist = read_list(data)
for i in range(len(rlist)):
rlist[i] = read_type(dtype,rlist[i])
return np.array(rlist, dtype)
def read_tuple(data, delims="()", split=",", strip=" \n\t'", arg_type=int):
"""Reads a formatted string and outputs a tuple.
The format is as for standard python tuples, which is
(tuple[0], tuple[1], ... , tuple[n]). Note the comma
separators, and the use of brackets.
Args:
data: The string to be read in.
delims: A string of two characters giving the first and last character of
the list format. ',' by default.
split: The character between different elements of the list format.
strip: Characters to be removed from the beginning and end of each
element. ' \n\t' by default.
arg_type: The strings in the input will be converted, and a tuple
of ar_type will be returned.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
A tuple of elements of the specified data type.
"""
rlist = read_list(data, delims=delims, split=split, strip=strip)
return tuple([arg_type(i) for i in rlist])
def read_dict(data, delims="{}", split=",", key_split=":", strip=" \n\t"):
"""Reads a formatted string and outputs a dictionary.
The format is as for standard python dictionaries, which is
{keyword[0]: arg[0], keyword[1]: arg[1], ... , keyword[n]: arg[n]}. Note the
comma separators, and the use of curly brackets.
Args:
data: The string to be read in.
delims: A string of two characters giving the first and last character of
the list format. ',' by default.
split: The character between different elements of the list format.
key_split: The character between the key word and the value.
strip: Characters to be removed from the beginning and end of each
element. ' \n\t' by default.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
A dictionary of strings.
"""
rlist = read_list(data, delims=delims, split=split, strip=strip)
def mystrip(data):
return data.strip(strip)
rdict = {}
for s in rlist:
rtuple = map(mystrip,s.split(key_split))
if not len(rtuple) == 2:
raise ValueError("Format for a key:value format is wrong for item " + s)
rdict[rtuple[0]] = rtuple[1]
return rdict
readtype_funcs = {np.ndarray: read_array, dict: read_dict, float: read_float, int: read_int, bool: read_bool, str: string.strip, tuple: read_tuple, np.uint : read_int}
def write_type(type, data):
"""Writes a formatted string from a value of a specified type.
Args:
type: The data type of the value.
data: The value to be read in.
Raises:
TypeError: Raised if it tries to write from a data type that has not been
implemented.
Returns:
A formatted string.
"""
if not type in writetype_funcs:
raise TypeError("Conversion not available for given type")
return writetype_funcs[type](data)
def write_list(data, delims="[]"):
"""Writes a formatted string from a list.
The format of the output is as for a standard python list,
[list[0], list[1],..., list[n]]. Note the space after the commas, and the
use of square brackets.
Args:
data: The value to be read in.
delims: An optional string of two characters giving the first and last
character to be printed. Defaults to "[]".
Returns:
A formatted string.
"""
rstr = delims[0]
for v in data:
rstr += str(v) + ", "
rstr = rstr.rstrip(", ")
rstr += delims[1]
return rstr
def write_tuple(data):
"""Writes a formatted string from a tuple.
The format of the output is as for a standard python tuple,
(tuple[0], tuple[1],..., tuple[n]). Note the space after the commas, and the
use of brackets.
Args:
data: The value to be read in.
Returns:
A formatted string.
"""
return write_list(data, delims="()")
def write_float(data):
"""Writes a formatted string from a float.
Floats are printed out in exponential format, to 8 decimal places and
filling up any spaces under 16 not used with spaces.
For example 1.0 --> ' 1.00000000e+00'
Args:
data: The value to be read in.
Returns:
A formatted string.
"""
return "%16.8e" % (data)
def write_bool(data):
"""Writes a formatted string from a float.
Booleans are printed as a string of either ' true' or 'false'. Note that
both are printed out as exactly 5 characters.
Args:
data: The value to be read in.
Returns:
A formatted string.
"""
return "%5.5s" % (str(data))
def write_dict(data, delims="{}"):
"""Writes a formatted string from a dictionary.
The format of the output is as for a standard python dictionary,
{keyword[0]: arg[0], keyword[1]: arg[1],..., keyword[n]: arg[n]}. Note the
space after the commas, and the use of curly brackets.
Args:
data: The value to be read in.
delims: An optional string of two characters giving the first and last
character to be printed. Defaults to "{}".
Returns:
A formatted string.
"""
rstr = delims[0]
for v in data:
rstr += str(v) + ": " + str(data[v]) + ", "
rstr = rstr.strip(", ")
rstr += delims[1]
return rstr
writetype_funcs = {float: write_float, dict: write_dict, int: str, bool: write_bool, str: string.strip, tuple: write_tuple, np.uint : str}
| gpl-2.0 | -3,314,582,870,946,160,000 | 29.680769 | 167 | 0.652501 | false |
youprofit/zato | code/zato-server/src/zato/server/service/internal/kvdb/__init__.py | 6 | 4387 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2012 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from logging import DEBUG, getLogger
from traceback import format_exc
# gevent
from gevent import sleep
# Redis
from redis.sentinel import MasterNotFoundError
# Zato
from zato.common import ZatoException
from zato.common.kvdb import redis_grammar
from zato.common.util import has_redis_sentinels
from zato.server.service.internal import AdminService, AdminSIO
kvdb_logger = getLogger('zato_kvdb')
class ExecuteCommand(AdminService):
""" Executes a command against the key/value DB.
"""
name = 'zato.kvdb.remote-command.execute'
class SimpleIO(AdminSIO):
request_elem = 'zato_kvdb_remote_command_execute_request'
response_elem = 'zato_kvdb_remote_command_execute_response'
input_required = ('command',)
output_required = ('result',)
def _fixup_parameters(self, parameters):
""" Fix up quotes so stuff like [SISMEMBER key member] and [SISMEMBER key "member"] is treated the same
(brackets used here for clarity only to separate commands).
"""
if parameters:
has_one = len(parameters) == 1
first_elem_idx = 0 if has_one else 1
if parameters[first_elem_idx][0] == '"' and parameters[-1][-1] == '"':
parameters[first_elem_idx] = parameters[first_elem_idx][1:]
parameters[-1] = parameters[-1][:-1]
return parameters
def handle(self):
input_command = self.request.input.command or ''
if not input_command:
msg = 'No command sent'
raise ZatoException(self.cid, msg)
try:
parse_result = redis_grammar.parseString(input_command)
options = {}
command = parse_result.command
parameters = parse_result.parameters if parse_result.parameters else []
parameters = self._fixup_parameters(parameters)
if command == 'CONFIG':
options['parse'] = parameters[0]
elif command == 'OBJECT':
options['infotype'] = parameters[0]
response = self.server.kvdb.conn.execute_command(command, *parameters, **options) or ''
if response and command in('KEYS', 'HKEYS', 'HVALS'):
response = unicode(response).encode('utf-8')
elif command in('HLEN', 'LLEN', 'LRANGE', 'SMEMBERS', 'HGETALL'):
response = str(response)
self.response.payload.result = response or '(None)'
except Exception, e:
msg = 'Command parsing error, command:[{}], e:[{}]'.format(input_command, format_exc(e))
self.logger.error(msg)
raise ZatoException(self.cid, msg)
class LogConnectionInfo(AdminService):
""" Writes outs to logs information regarding current connections to KVDB.
"""
def handle(self):
config = self.server.fs_server_config.kvdb
sleep_time = float(config.log_connection_info_sleep_time)
has_sentinels = has_redis_sentinels(config)
if kvdb_logger.isEnabledFor(DEBUG):
while True:
if has_sentinels:
try:
master_address = self.kvdb.conn.connection_pool.connection_kwargs['connection_pool'].get_master_address()
kvdb_logger.debug(
'Uses sentinels: `%s %r`, master: `%r`', has_sentinels, config.redis_sentinels, master_address)
except MasterNotFoundError, e:
self.logger.warn(format_exc(e))
kvdb_logger.warn(format_exc(e))
else:
kvdb_logger.debug(
'Uses sentinels: `%s`, conn:`%r`', has_sentinels, self.kvdb.conn)
sleep(sleep_time)
# The data browser will most likely be implemented in a future version
'''
class GetList(AdminService):
""" Returns a list of keys, optionally including their values.
"""
# KEYS, then
# HGETALL
# GET
# LRANGE
# SMEMBERS
'''
| gpl-3.0 | -5,174,220,178,563,175,000 | 34.959016 | 129 | 0.587873 | false |
mdublin/Brightcove-Dynamic-Ingest-App | ENV/lib/python2.7/site-packages/sqlalchemy/testing/suite/test_types.py | 12 | 11192 | # coding: utf-8
from .. import fixtures, config
from ..assertions import eq_
from ..config import requirements
from sqlalchemy import Integer, Unicode, UnicodeText, select
from sqlalchemy import Date, DateTime, Time, MetaData, String, \
Text, Numeric, Float
from ..schema import Table, Column
from ... import testing
import decimal
import datetime
class _UnicodeFixture(object):
__requires__ = 'unicode_data',
data = u"Alors vous imaginez ma surprise, au lever du jour, "\
u"quand une drôle de petite voix m’a réveillé. Elle "\
u"disait: « S’il vous plaît… dessine-moi un mouton! »"
@classmethod
def define_tables(cls, metadata):
Table('unicode_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('unicode_data', cls.datatype),
)
def test_round_trip(self):
unicode_table = self.tables.unicode_table
config.db.execute(
unicode_table.insert(),
{
'unicode_data': self.data,
}
)
row = config.db.execute(
select([
unicode_table.c.unicode_data,
])
).first()
eq_(
row,
(self.data, )
)
assert isinstance(row[0], unicode)
def test_round_trip_executemany(self):
unicode_table = self.tables.unicode_table
config.db.execute(
unicode_table.insert(),
[
{
'unicode_data': self.data,
}
for i in xrange(3)
]
)
rows = config.db.execute(
select([
unicode_table.c.unicode_data,
])
).fetchall()
eq_(
rows,
[(self.data, ) for i in xrange(3)]
)
for row in rows:
assert isinstance(row[0], unicode)
def _test_empty_strings(self):
unicode_table = self.tables.unicode_table
config.db.execute(
unicode_table.insert(),
{"unicode_data": u''}
)
row = config.db.execute(
select([unicode_table.c.unicode_data])
).first()
eq_(row, (u'',))
class UnicodeVarcharTest(_UnicodeFixture, fixtures.TablesTest):
__requires__ = 'unicode_data',
datatype = Unicode(255)
@requirements.empty_strings_varchar
def test_empty_strings_varchar(self):
self._test_empty_strings()
class UnicodeTextTest(_UnicodeFixture, fixtures.TablesTest):
__requires__ = 'unicode_data', 'text_type'
datatype = UnicodeText()
@requirements.empty_strings_text
def test_empty_strings_text(self):
self._test_empty_strings()
class TextTest(fixtures.TablesTest):
@classmethod
def define_tables(cls, metadata):
Table('text_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('text_data', Text),
)
def test_text_roundtrip(self):
text_table = self.tables.text_table
config.db.execute(
text_table.insert(),
{"text_data": 'some text'}
)
row = config.db.execute(
select([text_table.c.text_data])
).first()
eq_(row, ('some text',))
def test_text_empty_strings(self):
text_table = self.tables.text_table
config.db.execute(
text_table.insert(),
{"text_data": ''}
)
row = config.db.execute(
select([text_table.c.text_data])
).first()
eq_(row, ('',))
class StringTest(fixtures.TestBase):
@requirements.unbounded_varchar
def test_nolength_string(self):
metadata = MetaData()
foo = Table('foo', metadata,
Column('one', String)
)
foo.create(config.db)
foo.drop(config.db)
class _DateFixture(object):
compare = None
@classmethod
def define_tables(cls, metadata):
Table('date_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('date_data', cls.datatype),
)
def test_round_trip(self):
date_table = self.tables.date_table
config.db.execute(
date_table.insert(),
{'date_data': self.data}
)
row = config.db.execute(
select([
date_table.c.date_data,
])
).first()
compare = self.compare or self.data
eq_(row,
(compare, ))
assert isinstance(row[0], type(compare))
def test_null(self):
date_table = self.tables.date_table
config.db.execute(
date_table.insert(),
{'date_data': None}
)
row = config.db.execute(
select([
date_table.c.date_data,
])
).first()
eq_(row, (None,))
class DateTimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'datetime',
datatype = DateTime
data = datetime.datetime(2012, 10, 15, 12, 57, 18)
class DateTimeMicrosecondsTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'datetime_microseconds',
datatype = DateTime
data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396)
class TimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'time',
datatype = Time
data = datetime.time(12, 57, 18)
class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'time_microseconds',
datatype = Time
data = datetime.time(12, 57, 18, 396)
class DateTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'date',
datatype = Date
data = datetime.date(2012, 10, 15)
class DateTimeCoercedToDateTimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'date',
datatype = Date
data = datetime.datetime(2012, 10, 15, 12, 57, 18)
compare = datetime.date(2012, 10, 15)
class DateTimeHistoricTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'datetime_historic',
datatype = DateTime
data = datetime.datetime(1850, 11, 10, 11, 52, 35)
class DateHistoricTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'date_historic',
datatype = Date
data = datetime.date(1727, 4, 1)
class NumericTest(fixtures.TestBase):
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
@testing.provide_metadata
def _do_test(self, type_, input_, output, filter_=None, check_scale=False):
metadata = self.metadata
t = Table('t', metadata, Column('x', type_))
t.create()
t.insert().execute([{'x':x} for x in input_])
result = set([row[0] for row in t.select().execute()])
output = set(output)
if filter_:
result = set(filter_(x) for x in result)
output = set(filter_(x) for x in output)
eq_(result, output)
if check_scale:
eq_(
[str(x) for x in result],
[str(x) for x in output],
)
def test_numeric_as_decimal(self):
self._do_test(
Numeric(precision=8, scale=4),
[15.7563, decimal.Decimal("15.7563"), None],
[decimal.Decimal("15.7563"), None],
)
def test_numeric_as_float(self):
self._do_test(
Numeric(precision=8, scale=4, asdecimal=False),
[15.7563, decimal.Decimal("15.7563"), None],
[15.7563, None],
)
def test_float_as_decimal(self):
self._do_test(
Float(precision=8, asdecimal=True),
[15.7563, decimal.Decimal("15.7563"), None],
[decimal.Decimal("15.7563"), None],
)
def test_float_as_float(self):
self._do_test(
Float(precision=8),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
filter_=lambda n: n is not None and round(n, 5) or None
)
@testing.requires.precision_numerics_general
def test_precision_decimal(self):
numbers = set([
decimal.Decimal("54.234246451650"),
decimal.Decimal("0.004354"),
decimal.Decimal("900.0"),
])
self._do_test(
Numeric(precision=18, scale=12),
numbers,
numbers,
)
@testing.requires.precision_numerics_enotation_large
def test_enotation_decimal(self):
"""test exceedingly small decimals.
Decimal reports values with E notation when the exponent
is greater than 6.
"""
numbers = set([
decimal.Decimal('1E-2'),
decimal.Decimal('1E-3'),
decimal.Decimal('1E-4'),
decimal.Decimal('1E-5'),
decimal.Decimal('1E-6'),
decimal.Decimal('1E-7'),
decimal.Decimal('1E-8'),
decimal.Decimal("0.01000005940696"),
decimal.Decimal("0.00000005940696"),
decimal.Decimal("0.00000000000696"),
decimal.Decimal("0.70000000000696"),
decimal.Decimal("696E-12"),
])
self._do_test(
Numeric(precision=18, scale=14),
numbers,
numbers
)
@testing.requires.precision_numerics_enotation_large
def test_enotation_decimal_large(self):
"""test exceedingly large decimals.
"""
numbers = set([
decimal.Decimal('4E+8'),
decimal.Decimal("5748E+15"),
decimal.Decimal('1.521E+15'),
decimal.Decimal('00000000000000.1E+12'),
])
self._do_test(
Numeric(precision=25, scale=2),
numbers,
numbers
)
@testing.requires.precision_numerics_many_significant_digits
def test_many_significant_digits(self):
numbers = set([
decimal.Decimal("31943874831932418390.01"),
decimal.Decimal("319438950232418390.273596"),
decimal.Decimal("87673.594069654243"),
])
self._do_test(
Numeric(precision=38, scale=12),
numbers,
numbers
)
@testing.requires.precision_numerics_retains_significant_digits
def test_numeric_no_decimal(self):
numbers = set([
decimal.Decimal("1.000")
])
self._do_test(
Numeric(precision=5, scale=3),
numbers,
numbers,
check_scale=True
)
__all__ = ('UnicodeVarcharTest', 'UnicodeTextTest',
'DateTest', 'DateTimeTest', 'TextTest',
'NumericTest',
'DateTimeHistoricTest', 'DateTimeCoercedToDateTimeTest',
'TimeMicrosecondsTest', 'TimeTest', 'DateTimeMicrosecondsTest',
'DateHistoricTest', 'StringTest')
| mit | -1,782,323,839,487,041,800 | 27.447837 | 79 | 0.541055 | false |
agconti/njode | env/lib/python2.7/site-packages/django/contrib/gis/gdal/srs.py | 35 | 11986 | """
The Spatial Reference class, represensents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input=''):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
srs_type = 'user'
if isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr:
capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name.decode()
return (units, name)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr:
capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause | 227,941,847,547,444,160 | 33.34384 | 97 | 0.605957 | false |
tumbl3w33d/ansible | lib/ansible/modules/cloud/vmware/vmware_guest_disk_info.py | 10 | 13626 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, NAER William Leemans (@bushvin) <[email protected]>
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_guest_disk_info
short_description: Gather info about disks of given virtual machine
description:
- This module can be used to gather information about disks belonging to given virtual machine.
- All parameters and VMware object names are case sensitive.
version_added: '2.9'
author:
- Abhijeet Kasurde (@Akasurde) <[email protected]>
notes:
- Tested on vSphere 6.0 and 6.5.
- Disk UUID information is added in version 2.8.
- Additional information about guest disk backings added in version 2.8.
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the virtual machine.
- This is required parameter, if parameter C(uuid) or C(moid) is not supplied.
type: str
uuid:
description:
- UUID of the instance to gather information if known, this is VMware's unique identifier.
- This is required parameter, if parameter C(name) or C(moid) is not supplied.
type: str
moid:
description:
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
- This is required if C(name) or C(uuid) is not supplied.
type: str
use_instance_uuid:
description:
- Whether to use the VMware instance UUID rather than the BIOS UUID.
default: no
type: bool
folder:
description:
- Destination folder, absolute or relative path to find an existing guest.
- This is required parameter, only if multiple VMs are found with same name.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
type: str
datacenter:
description:
- The datacenter name to which virtual machine belongs to.
required: True
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Gather disk info from virtual machine using UUID
vmware_guest_disk_info:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: ha-datacenter
validate_certs: no
uuid: 421e4592-c069-924d-ce20-7e7533fab926
delegate_to: localhost
register: disk_info
- name: Gather disk info from virtual machine using name
vmware_guest_disk_info:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: ha-datacenter
validate_certs: no
name: VM_225
delegate_to: localhost
register: disk_info
- name: Gather disk info from virtual machine using moid
vmware_guest_disk_info:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: ha-datacenter
validate_certs: no
moid: vm-42
delegate_to: localhost
register: disk_info
'''
RETURN = """
guest_disk_info:
description: metadata about the virtual machine's disks
returned: always
type: dict
sample: {
"0": {
"backing_datastore": "datastore2",
"backing_disk_mode": "persistent",
"backing_eagerlyscrub": false,
"backing_filename": "[datastore2] VM_225/VM_225.vmdk",
"backing_thinprovisioned": false,
"backing_type": "FlatVer2",
"backing_writethrough": false,
"backing_uuid": "200C3A00-f82a-97af-02ff-62a595f0020a",
"capacity_in_bytes": 10485760,
"capacity_in_kb": 10240,
"controller_bus_number": 0,
"controller_key": 1000,
"controller_type": "paravirtual",
"key": 2000,
"label": "Hard disk 1",
"summary": "10,240 KB",
"unit_number": 0
},
"1": {
"backing_datastore": "datastore3",
"backing_devicename": "vml.012345678901234567890123456789012345678901234567890123",
"backing_disk_mode": "independent_persistent",
"backing_filename": "[datastore3] VM_226/VM_226.vmdk",
"backing_lunuuid": "012345678901234567890123456789012345678901234567890123",
"backing_type": "RawDiskMappingVer1",
"backing_uuid": null,
"capacity_in_bytes": 15728640,
"capacity_in_kb": 15360,
"controller_bus_number": 0,
"controller_key": 1000,
"controller_type": "paravirtual",
"key": 2001,
"label": "Hard disk 3",
"summary": "15,360 KB",
"unit_number": 1
},
}
"""
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
class PyVmomiHelper(PyVmomi):
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
def gather_disk_info(self, vm_obj):
"""
Gather information about VM's disks
Args:
vm_obj: Managed object of virtual machine
Returns: A list of dict containing disks information
"""
controller_info = dict()
disks_info = dict()
if vm_obj is None:
return disks_info
controller_types = {
vim.vm.device.VirtualLsiLogicController: 'lsilogic',
vim.vm.device.ParaVirtualSCSIController: 'paravirtual',
vim.vm.device.VirtualBusLogicController: 'buslogic',
vim.vm.device.VirtualLsiLogicSASController: 'lsilogicsas',
vim.vm.device.VirtualIDEController: 'ide'
}
controller_index = 0
for controller in vm_obj.config.hardware.device:
if isinstance(controller, tuple(controller_types.keys())):
controller_info[controller_index] = dict(
key=controller.key,
controller_type=controller_types[type(controller)],
bus_number=controller.busNumber,
devices=controller.device
)
controller_index += 1
disk_index = 0
for disk in vm_obj.config.hardware.device:
if isinstance(disk, vim.vm.device.VirtualDisk):
disks_info[disk_index] = dict(
key=disk.key,
label=disk.deviceInfo.label,
summary=disk.deviceInfo.summary,
backing_filename=disk.backing.fileName,
backing_datastore=disk.backing.datastore.name,
controller_key=disk.controllerKey,
unit_number=disk.unitNumber,
capacity_in_kb=disk.capacityInKB,
capacity_in_bytes=disk.capacityInBytes,
)
if isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer1BackingInfo):
disks_info[disk_index]['backing_type'] = 'FlatVer1'
disks_info[disk_index]['backing_writethrough'] = disk.backing.writeThrough
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer2BackingInfo):
disks_info[disk_index]['backing_type'] = 'FlatVer2'
disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
disks_info[disk_index]['backing_thinprovisioned'] = bool(disk.backing.thinProvisioned)
disks_info[disk_index]['backing_eagerlyscrub'] = bool(disk.backing.eagerlyScrub)
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.LocalPMemBackingInfo):
disks_info[disk_index]['backing_type'] = 'LocalPMem'
disks_info[disk_index]['backing_volumeuuid'] = disk.backing.volumeUUID
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.PartitionedRawDiskVer2BackingInfo):
disks_info[disk_index]['backing_type'] = 'PartitionedRawDiskVer2'
disks_info[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo):
disks_info[disk_index]['backing_type'] = 'RawDiskMappingVer1'
disks_info[disk_index]['backing_devicename'] = disk.backing.deviceName
disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode
disks_info[disk_index]['backing_lunuuid'] = disk.backing.lunUuid
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskVer2BackingInfo):
disks_info[disk_index]['backing_type'] = 'RawDiskVer2'
disks_info[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SeSparseBackingInfo):
disks_info[disk_index]['backing_type'] = 'SeSparse'
disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode
disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer1BackingInfo):
disks_info[disk_index]['backing_type'] = 'SparseVer1'
disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode
disks_info[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB
disks_info[disk_index]['backing_split'] = bool(disk.backing.split)
disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer2BackingInfo):
disks_info[disk_index]['backing_type'] = 'SparseVer2'
disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode
disks_info[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB
disks_info[disk_index]['backing_split'] = bool(disk.backing.split)
disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
for controller_index in range(len(controller_info)):
if controller_info[controller_index]['key'] == disks_info[disk_index]['controller_key']:
disks_info[disk_index]['controller_bus_number'] = controller_info[controller_index]['bus_number']
disks_info[disk_index]['controller_type'] = controller_info[controller_index]['controller_type']
disk_index += 1
return disks_info
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
uuid=dict(type='str'),
moid=dict(type='str'),
use_instance_uuid=dict(type='bool', default=False),
folder=dict(type='str'),
datacenter=dict(type='str', required=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['name', 'uuid', 'moid']
],
supports_check_mode=True,
)
if module.params['folder']:
# FindByInventoryPath() does not require an absolute path
# so we should leave the input folder path unmodified
module.params['folder'] = module.params['folder'].rstrip('/')
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.get_vm()
if vm:
# VM exists
try:
module.exit_json(guest_disk_info=pyv.gather_disk_info(vm))
except Exception as exc:
module.fail_json(msg="Failed to gather information with exception : %s" % to_text(exc))
else:
# We unable to find the virtual machine user specified
# Bail out
vm_id = (module.params.get('uuid') or module.params.get('moid') or module.params.get('name'))
module.fail_json(msg="Unable to gather disk information for non-existing VM %s" % vm_id)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,948,222,654,510,567,000 | 40.797546 | 127 | 0.614707 | false |
pramsey/mapnik | scons/scons-local-2.3.4/SCons/Tool/midl.py | 9 | 3034 | """SCons.Tool.midl
Tool-specific initialization for midl (Microsoft IDL compiler).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/midl.py 2014/09/27 12:51:43 garyo"
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Scanner.IDL
import SCons.Util
from MSCommon import msvc_exists
def midl_emitter(target, source, env):
"""Produces a list of outputs from the MIDL compiler"""
base, ext = SCons.Util.splitext(str(target[0]))
tlb = target[0]
incl = base + '.h'
interface = base + '_i.c'
t = [tlb, incl, interface]
midlcom = env['MIDLCOM']
if midlcom.find('/proxy') != -1:
proxy = base + '_p.c'
t.append(proxy)
if midlcom.find('/dlldata') != -1:
dlldata = base + '_data.c'
t.append(dlldata)
return (t,source)
idl_scanner = SCons.Scanner.IDL.IDLScan()
midl_action = SCons.Action.Action('$MIDLCOM', '$MIDLCOMSTR')
midl_builder = SCons.Builder.Builder(action = midl_action,
src_suffix = '.idl',
suffix='.tlb',
emitter = midl_emitter,
source_scanner = idl_scanner)
def generate(env):
"""Add Builders and construction variables for midl to an Environment."""
env['MIDL'] = 'MIDL.EXE'
env['MIDLFLAGS'] = SCons.Util.CLVar('/nologo')
env['MIDLCOM'] = '$MIDL $MIDLFLAGS /tlb ${TARGETS[0]} /h ${TARGETS[1]} /iid ${TARGETS[2]} /proxy ${TARGETS[3]} /dlldata ${TARGETS[4]} $SOURCE 2> NUL'
env['BUILDERS']['TypeLibrary'] = midl_builder
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 | -1,679,153,552,578,502,400 | 33.477273 | 159 | 0.669084 | false |
Tesla-Redux-Devices/kernel_lge_g3 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 | -6,517,811,187,812,056,000 | 26.927536 | 77 | 0.615464 | false |
blue236/ardupilot | Tools/scripts/magfit_flashlog.py | 278 | 4744 | #!/usr/bin/env python
''' fit best estimate of magnetometer offsets from ArduCopter flashlog
using the algorithm from Bill Premerlani
'''
import sys, time, os, math
# command line option handling
from optparse import OptionParser
parser = OptionParser("magfit_flashlog.py [options]")
parser.add_option("--verbose", action='store_true', default=False, help="verbose offset output")
parser.add_option("--gain", type='float', default=0.01, help="algorithm gain")
parser.add_option("--noise", type='float', default=0, help="noise to add")
parser.add_option("--max-change", type='float', default=10, help="max step change")
parser.add_option("--min-diff", type='float', default=50, help="min mag vector delta")
parser.add_option("--history", type='int', default=20, help="how many points to keep")
parser.add_option("--repeat", type='int', default=1, help="number of repeats through the data")
(opts, args) = parser.parse_args()
from rotmat import Vector3, Matrix3
if len(args) < 1:
print("Usage: magfit_flashlog.py [options] <LOGFILE...>")
sys.exit(1)
def noise():
'''a noise vector'''
from random import gauss
v = Vector3(gauss(0, 1), gauss(0, 1), gauss(0, 1))
v.normalize()
return v * opts.noise
def find_offsets(data, ofs):
'''find mag offsets by applying Bills "offsets revisited" algorithm
on the data
This is an implementation of the algorithm from:
http://gentlenav.googlecode.com/files/MagnetometerOffsetNullingRevisited.pdf
'''
# a limit on the maximum change in each step
max_change = opts.max_change
# the gain factor for the algorithm
gain = opts.gain
data2 = []
for d in data:
d = d.copy() + noise()
d.x = float(int(d.x + 0.5))
d.y = float(int(d.y + 0.5))
d.z = float(int(d.z + 0.5))
data2.append(d)
data = data2
history_idx = 0
mag_history = data[0:opts.history]
for i in range(opts.history, len(data)):
B1 = mag_history[history_idx] + ofs
B2 = data[i] + ofs
diff = B2 - B1
diff_length = diff.length()
if diff_length <= opts.min_diff:
# the mag vector hasn't changed enough - we don't get any
# information from this
history_idx = (history_idx+1) % opts.history
continue
mag_history[history_idx] = data[i]
history_idx = (history_idx+1) % opts.history
# equation 6 of Bills paper
delta = diff * (gain * (B2.length() - B1.length()) / diff_length)
# limit the change from any one reading. This is to prevent
# single crazy readings from throwing off the offsets for a long
# time
delta_length = delta.length()
if max_change != 0 and delta_length > max_change:
delta *= max_change / delta_length
# set the new offsets
ofs = ofs - delta
if opts.verbose:
print ofs
return ofs
def plot_corrected_field(filename, data, offsets):
f = open(filename, mode='w')
for d in data:
corrected = d + offsets
f.write("%.1f\n" % corrected.length())
f.close()
def magfit(logfile):
'''find best magnetometer offset fit to a log file'''
print("Processing log %s" % filename)
# open the log file
flog = open(filename, mode='r')
data = []
data_no_motors = []
mag = None
offsets = None
# now gather all the data
for line in flog:
if not line.startswith('COMPASS,'):
continue
line = line.rstrip()
line = line.replace(' ', '')
a = line.split(',')
ofs = Vector3(float(a[4]), float(a[5]), float(a[6]))
if offsets is None:
initial_offsets = ofs
offsets = ofs
motor_ofs = Vector3(float(a[7]), float(a[8]), float(a[9]))
mag = Vector3(float(a[1]), float(a[2]), float(a[3]))
mag = mag - offsets
data.append(mag)
data_no_motors.append(mag - motor_ofs)
print("Extracted %u data points" % len(data))
print("Current offsets: %s" % initial_offsets)
# run the fitting algorithm
ofs = initial_offsets
for r in range(opts.repeat):
ofs = find_offsets(data, ofs)
plot_corrected_field('plot.dat', data, ofs)
plot_corrected_field('initial.dat', data, initial_offsets)
plot_corrected_field('zero.dat', data, Vector3(0,0,0))
plot_corrected_field('hand.dat', data, Vector3(-25,-8,-2))
plot_corrected_field('zero-no-motors.dat', data_no_motors, Vector3(0,0,0))
print('Loop %u offsets %s' % (r, ofs))
sys.stdout.flush()
print("New offsets: %s" % ofs)
total = 0.0
for filename in args:
magfit(filename)
| gpl-3.0 | -4,921,021,941,491,636,000 | 30.838926 | 96 | 0.60371 | false |
marcoarruda/MissionPlanner | Lib/site-packages/numpy/testing/utils.py | 53 | 47161 | """
Utility function to facilitate testing.
"""
import os
import sys
import re
import operator
import types
import warnings
from nosetester import import_nose
__all__ = ['assert_equal', 'assert_almost_equal','assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp',
'assert_array_max_ulp', 'assert_warns', 'assert_allclose']
verbose = 0
def assert_(val, msg='') :
"""
Assert that works in release mode.
The Python built-in ``assert`` does not work when executing code in
optimized mode (the ``-O`` flag) - no byte-code is generated for it.
For documentation on usage, refer to the Python documentation.
"""
if not val :
raise AssertionError(msg)
def gisnan(x):
"""like isnan, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isnan and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isnan
st = isnan(x)
if isinstance(st, types.NotImplementedType):
raise TypeError("isnan not supported for this type")
return st
def gisfinite(x):
"""like isfinite, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isfinite and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isfinite, seterr
err = seterr(invalid='ignore')
try:
st = isfinite(x)
if isinstance(st, types.NotImplementedType):
raise TypeError("isfinite not supported for this type")
finally:
seterr(**err)
return st
def gisinf(x):
"""like isinf, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isinf and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isinf, seterr
err = seterr(invalid='ignore')
try:
st = isinf(x)
if isinstance(st, types.NotImplementedType):
raise TypeError("isinf not supported for this type")
finally:
seterr(**err)
return st
def rand(*args):
"""Returns an array of random numbers with the given shape.
This only uses the standard library, so it is useful for testing purposes.
"""
import random
from numpy.core import zeros, float64
results = zeros(args, float64)
f = results.flat
for i in range(len(f)):
f[i] = random.random()
return results
if sys.platform[:5]=='linux':
def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()),
_load_time=[]):
""" Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc. """
import time
if not _load_time:
_load_time.append(time.time())
try:
f=open(_proc_pid_stat,'r')
l = f.readline().split(' ')
f.close()
return int(l[13])
except:
return int(100*(time.time()-_load_time[0]))
def memusage(_proc_pid_stat = '/proc/%s/stat'%(os.getpid())):
""" Return virtual memory size in bytes of the running python.
"""
try:
f=open(_proc_pid_stat,'r')
l = f.readline().split(' ')
f.close()
return int(l[22])
except:
return
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
""" Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. [Emulation with time.time]. """
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
def memusage():
""" Return memory usage of running python. [Not implemented]"""
raise NotImplementedError
if os.name=='nt' and sys.version[:3] > '2.3':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance = None,
inum=-1, format = None, machine=None):
# NOTE: Many counters require 2 samples to give accurate results,
# including "% Processor Time" (as by definition, at any instant, a
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
# See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
# My older explanation for this was that the "AddCounter" process forced
# the CPU to 100%, but the above makes more sense :)
import win32pdh
if format is None: format = win32pdh.PDH_FMT_LONG
path = win32pdh.MakeCounterPath( (machine,object,instance, None, inum,counter) )
hq = win32pdh.OpenQuery()
try:
hc = win32pdh.AddCounter(hq, path)
try:
win32pdh.CollectQueryData(hq)
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
finally:
win32pdh.RemoveCounter(hc)
finally:
win32pdh.CloseQuery(hq)
def memusage(processName="python", instance=0):
# from win32pdhutil, part of the win32all package
import win32pdh
return GetPerformanceAttributes("Process", "Virtual Bytes",
processName, instance,
win32pdh.PDH_FMT_LONG, None)
def build_err_msg(arrays, err_msg, header='Items are not equal:',
verbose=True,
names=('ACTUAL', 'DESIRED')):
msg = ['\n' + header]
if err_msg:
if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
msg = [msg[0] + ' ' + err_msg]
else:
msg.append(err_msg)
if verbose:
for i, a in enumerate(arrays):
try:
r = repr(a)
except:
r = '[repr failed]'
if r.count('\n') > 3:
r = '\n'.join(r.splitlines()[:3])
r += '...'
msg.append(' %s: %s' % (names[i], r))
return '\n'.join(msg)
def assert_equal(actual,desired,err_msg='',verbose=True):
"""
Raise an assertion if two objects are not equal.
Given two objects (lists, tuples, dictionaries or numpy arrays), check
that all elements of these objects are equal. An exception is raised at
the first conflicting values.
Parameters
----------
actual : list, tuple, dict or ndarray
The object to check.
desired : list, tuple, dict or ndarray
The expected object.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal.
Examples
--------
>>> np.testing.assert_equal([4,5], [4,6])
...
<type 'exceptions.AssertionError'>:
Items are not equal:
item=1
ACTUAL: 5
DESIRED: 6
"""
if isinstance(desired, dict):
if not isinstance(actual, dict) :
raise AssertionError(repr(type(actual)))
assert_equal(len(actual),len(desired),err_msg,verbose)
for k,i in desired.items():
if k not in actual :
raise AssertionError(repr(k))
assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k,err_msg), verbose)
return
if isinstance(desired, (list,tuple)) and isinstance(actual, (list,tuple)):
assert_equal(len(actual),len(desired),err_msg,verbose)
for k in range(len(desired)):
assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k,err_msg), verbose)
return
from numpy.core import ndarray, isscalar, signbit
from numpy.lib import iscomplexobj, real, imag
if isinstance(actual, ndarray) or isinstance(desired, ndarray):
return assert_array_equal(actual, desired, err_msg, verbose)
msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except ValueError:
usecomplex = False
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_equal(actualr, desiredr)
assert_equal(actuali, desiredi)
except AssertionError:
raise AssertionError(msg)
# Inf/nan/negative zero handling
try:
# isscalar test to check cases such as [np.nan] != np.nan
if isscalar(desired) != isscalar(actual):
raise AssertionError(msg)
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
isdesnan = gisnan(desired)
isactnan = gisnan(actual)
if isdesnan or isactnan:
if not (isdesnan and isactnan):
raise AssertionError(msg)
else:
if not desired == actual:
raise AssertionError(msg)
return
elif desired == 0 and actual == 0:
if not signbit(desired) == signbit(actual):
raise AssertionError(msg)
# If TypeError or ValueError raised while using isnan and co, just handle
# as before
except (TypeError, ValueError, NotImplementedError):
pass
if desired != actual :
raise AssertionError(msg)
def print_assert_equal(test_string,actual,desired):
"""
Test if two objects are equal, and print an error message if test fails.
The test is performed with ``actual == desired``.
Parameters
----------
test_string : str
The message supplied to AssertionError.
actual : object
The object to test for equality against `desired`.
desired : object
The expected result.
Examples
--------
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
Traceback (most recent call last):
...
AssertionError: Test XYZ of func xyz failed
ACTUAL:
[0, 1]
DESIRED:
[0, 2]
"""
import pprint
try:
assert(actual == desired)
except AssertionError:
import cStringIO
msg = cStringIO.StringIO()
msg.write(test_string)
msg.write(' failed\nACTUAL: \n')
pprint.pprint(actual,msg)
msg.write('DESIRED: \n')
pprint.pprint(desired,msg)
raise AssertionError(msg.getvalue())
def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
"""
Raise an assertion if two items are not equal up to desired precision.
The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal)
Given two objects (numbers or ndarrays), check that all elements of these
objects are almost equal. An exception is raised at conflicting values.
For ndarrays this delegates to assert_array_almost_equal
Parameters
----------
actual : number or ndarray
The object to check.
desired : number or ndarray
The expected object.
decimal : integer (decimal=7)
desired precision
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_array_almost_equal: compares array_like objects
assert_equal: tests objects for equality
Examples
--------
>>> import numpy.testing as npt
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
...
<type 'exceptions.AssertionError'>:
Items are not equal:
ACTUAL: 2.3333333333333002
DESIRED: 2.3333333399999998
>>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
\t\t\tnp.array([1.0,2.33333334]), decimal=9)
...
<type 'exceptions.AssertionError'>:
Arrays are not almost equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 2.33333333])
y: array([ 1. , 2.33333334])
"""
from numpy.core import ndarray
from numpy.lib import iscomplexobj, real, imag
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except ValueError:
usecomplex = False
msg = build_err_msg([actual, desired], err_msg, verbose=verbose,
header='Arrays are not almost equal')
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_almost_equal(actualr, desiredr, decimal=decimal)
assert_almost_equal(actuali, desiredi, decimal=decimal)
except AssertionError:
raise AssertionError(msg)
if isinstance(actual, (ndarray, tuple, list)) \
or isinstance(desired, (ndarray, tuple, list)):
return assert_array_almost_equal(actual, desired, decimal, err_msg)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
if gisnan(desired) or gisnan(actual):
if not (gisnan(desired) and gisnan(actual)):
raise AssertionError(msg)
else:
if not desired == actual:
raise AssertionError(msg)
return
except (NotImplementedError, TypeError):
pass
if round(abs(desired - actual),decimal) != 0 :
raise AssertionError(msg)
def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
"""
Raise an assertion if two items are not equal up to significant digits.
Given two numbers, check that they are approximately equal.
Approximately equal is defined as the number of significant digits
that agree.
Parameters
----------
actual : number
The object to check.
desired : number
The expected object.
significant : integer (significant=7)
desired precision
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_almost_equal: compares objects by decimals
assert_array_almost_equal: compares array_like objects by decimals
assert_equal: tests objects for equality
Examples
--------
>>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
significant=8)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
significant=8)
...
<type 'exceptions.AssertionError'>:
Items are not equal to 8 significant digits:
ACTUAL: 1.234567e-021
DESIRED: 1.2345672000000001e-021
the evaluated condition that raises the exception is
>>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
True
"""
import numpy as np
actual, desired = map(float, (actual, desired))
if desired==actual:
return
# Normalized the numbers to be in range (-10.0,10.0)
# scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
err = np.seterr(invalid='ignore')
try:
scale = 0.5*(np.abs(desired) + np.abs(actual))
scale = np.power(10,np.floor(np.log10(scale)))
finally:
np.seterr(**err)
try:
sc_desired = desired/scale
except ZeroDivisionError:
sc_desired = 0.0
try:
sc_actual = actual/scale
except ZeroDivisionError:
sc_actual = 0.0
msg = build_err_msg([actual, desired], err_msg,
header='Items are not equal to %d significant digits:' %
significant,
verbose=verbose)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
if gisnan(desired) or gisnan(actual):
if not (gisnan(desired) and gisnan(actual)):
raise AssertionError(msg)
else:
if not desired == actual:
raise AssertionError(msg)
return
except (TypeError, NotImplementedError):
pass
if np.abs(sc_desired - sc_actual) >= np.power(10.,-(significant-1)) :
raise AssertionError(msg)
def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
header=''):
from numpy.core import array, isnan, any
x = array(x, copy=False, subok=True)
y = array(y, copy=False, subok=True)
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPfdgFDG'
try:
cond = (x.shape==() or y.shape==()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ '\n(shapes %s, %s mismatch)' % (x.shape,
y.shape),
verbose=verbose, header=header,
names=('x', 'y'))
if not cond :
raise AssertionError(msg)
if (isnumber(x) and isnumber(y)) and (any(isnan(x)) or any(isnan(y))):
# Handling nan: we first check that x and y have the nan at the
# same locations, and then we mask the nan and do the comparison as
# usual.
xnanid = isnan(x)
ynanid = isnan(y)
try:
assert_array_equal(xnanid, ynanid)
except AssertionError:
msg = build_err_msg([x, y],
err_msg
+ '\n(x and y nan location mismatch %s, ' \
'%s mismatch)' % (xnanid, ynanid),
verbose=verbose, header=header,
names=('x', 'y'))
raise AssertionError(msg)
# If only one item, it was a nan, so just return
if x.size == y.size == 1:
return
val = comparison(x[~xnanid], y[~ynanid])
else:
val = comparison(x,y)
if isinstance(val, bool):
cond = val
reduced = [0]
else:
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
if not cond:
match = 100-100.0*reduced.count(1)/len(reduced)
msg = build_err_msg([x, y],
err_msg
+ '\n(mismatch %s%%)' % (match,),
verbose=verbose, header=header,
names=('x', 'y'))
if not cond :
raise AssertionError(msg)
except ValueError:
msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
names=('x', 'y'))
raise ValueError(msg)
def assert_array_equal(x, y, err_msg='', verbose=True):
"""
Raise an assertion if two array_like objects are not equal.
Given two array_like objects, check that the shape is equal and all
elements of these objects are equal. An exception is raised at
shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if
both objects have NaNs in the same positions.
The usual caution for verifying equality with floating point numbers is
advised.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_array_almost_equal: test objects for equality up to precision
assert_equal: tests objects for equality
Examples
--------
the first assert does not raise an exception
>>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
\t\t\t[np.exp(0),2.33333, np.nan])
assert fails with numerical inprecision with floats
>>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
\t\t\t[1, np.sqrt(np.pi)**2, np.nan])
...
<type 'exceptions.ValueError'>:
AssertionError:
Arrays are not equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 3.14159265, NaN])
y: array([ 1. , 3.14159265, NaN])
use assert_array_almost_equal for these cases instead
>>> np.testing.assert_array_almost_equal([1.0,np.pi,np.nan],
\t\t\t[1, np.sqrt(np.pi)**2, np.nan], decimal=15)
"""
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
verbose=verbose, header='Arrays are not equal')
def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
Raise an assertion if two objects are not equal up to desired precision.
The test verifies identical shapes and verifies values with
abs(desired-actual) < 0.5 * 10**(-decimal)
Given two array_like objects, check that the shape is equal and all
elements of these objects are almost equal. An exception is raised at
shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if
both objects have NaNs in the same positions.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
decimal : integer (decimal=6)
desired precision
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_almost_equal: simple version for comparing numbers
assert_array_equal: tests objects for equality
Examples
--------
the first assert does not raise an exception
>>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
[1.0,2.333,np.nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
\t\t\t[1.0,2.33339,np.nan], decimal=5)
...
<type 'exceptions.AssertionError'>:
AssertionError:
Arrays are not almost equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 2.33333, NaN])
y: array([ 1. , 2.33339, NaN])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
\t\t\t[1.0,2.33333, 5], decimal=5)
<type 'exceptions.ValueError'>:
ValueError:
Arrays are not almost equal
x: array([ 1. , 2.33333, NaN])
y: array([ 1. , 2.33333, 5. ])
"""
from numpy.core import around, number, float_
from numpy.core.numerictypes import issubdtype
from numpy.core.fromnumeric import any as npany
def compare(x, y):
try:
if npany(gisinf(x)) or npany( gisinf(y)):
xinfid = gisinf(x)
yinfid = gisinf(y)
if not xinfid == yinfid:
return False
# if one item, x and y is +- inf
if x.size == y.size == 1:
return x == y
x = x[~xinfid]
y = y[~yinfid]
except (TypeError, NotImplementedError):
pass
z = abs(x-y)
if not issubdtype(z.dtype, number):
z = z.astype(float_) # handle object arrays
return around(z, decimal) <= 10.0**(-decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header='Arrays are not almost equal')
def assert_array_less(x, y, err_msg='', verbose=True):
"""
Raise an assertion if two array_like objects are not ordered by less than.
Given two array_like objects, check that the shape is equal and all
elements of the first object are strictly smaller than those of the
second object. An exception is raised at shape mismatch or incorrectly
ordered values. Shape mismatch does not raise if an object has zero
dimension. In contrast to the standard usage in numpy, NaNs are
compared, no assertion is raised if both objects have NaNs in the same
positions.
Parameters
----------
x : array_like
The smaller object to check.
y : array_like
The larger object to compare.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_array_equal: tests objects for equality
assert_array_almost_equal: test objects for equality up to precision
Examples
--------
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(mismatch 50.0%)
x: array([ 1., 1., NaN])
y: array([ 1., 2., NaN])
>>> np.testing.assert_array_less([1.0, 4.0], 3)
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(mismatch 50.0%)
x: array([ 1., 4.])
y: array(3)
>>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(shapes (3,), (1,) mismatch)
x: array([ 1., 2., 3.])
y: array([4])
"""
assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
verbose=verbose,
header='Arrays are not less-ordered')
def runstring(astr, dict):
exec astr in dict
def assert_string_equal(actual, desired):
"""
Test if two strings are equal.
If the given strings are equal, `assert_string_equal` does nothing.
If they are not equal, an AssertionError is raised, and the diff
between the strings is shown.
Parameters
----------
actual : str
The string to test for equality against the expected string.
desired : str
The expected string.
Examples
--------
>>> np.testing.assert_string_equal('abc', 'abc')
>>> np.testing.assert_string_equal('abc', 'abcd')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
...
AssertionError: Differences in strings:
- abc+ abcd? +
"""
# delay import of difflib to reduce startup time
import difflib
if not isinstance(actual, str) :
raise AssertionError(`type(actual)`)
if not isinstance(desired, str):
raise AssertionError(`type(desired)`)
if re.match(r'\A'+desired+r'\Z', actual, re.M): return
diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
diff_list = []
while diff:
d1 = diff.pop(0)
if d1.startswith(' '):
continue
if d1.startswith('- '):
l = [d1]
d2 = diff.pop(0)
if d2.startswith('? '):
l.append(d2)
d2 = diff.pop(0)
if not d2.startswith('+ ') :
raise AssertionError(`d2`)
l.append(d2)
d3 = diff.pop(0)
if d3.startswith('? '):
l.append(d3)
else:
diff.insert(0, d3)
if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
continue
diff_list.extend(l)
continue
raise AssertionError(`d1`)
if not diff_list:
return
msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
if actual != desired :
raise AssertionError(msg)
def rundocs(filename=None, raise_on_error=True):
"""
Run doctests found in the given file.
By default `rundocs` raises an AssertionError on failure.
Parameters
----------
filename : str
The path to the file for which the doctests are run.
raise_on_error : bool
Whether to raise an AssertionError when a doctest fails. Default is
True.
Notes
-----
The doctests can be run by the user/developer by adding the ``doctests``
argument to the ``test()`` call. For example, to run all tests (including
doctests) for `numpy.lib`::
>>> np.lib.test(doctests=True)
"""
import doctest, imp
if filename is None:
f = sys._getframe(1)
filename = f.f_globals['__file__']
name = os.path.splitext(os.path.basename(filename))[0]
path = [os.path.dirname(filename)]
file, pathname, description = imp.find_module(name, path)
try:
m = imp.load_module(name, file, pathname, description)
finally:
file.close()
tests = doctest.DocTestFinder().find(m)
runner = doctest.DocTestRunner(verbose=False)
msg = []
if raise_on_error:
out = lambda s: msg.append(s)
else:
out = None
for test in tests:
runner.run(test, out=out)
if runner.failures > 0 and raise_on_error:
raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
def raises(*args,**kwargs):
nose = import_nose()
return nose.tools.raises(*args,**kwargs)
def assert_raises(*args,**kwargs):
"""
assert_raises(exception_class, callable, *args, **kwargs)
Fail unless an exception of class exception_class is thrown
by callable when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
"""
nose = import_nose()
return nose.tools.assert_raises(*args,**kwargs)
def decorate_methods(cls, decorator, testmatch=None):
"""
Apply a decorator to all methods in a class matching a regular expression.
The given decorator is applied to all public methods of `cls` that are
matched by the regular expression `testmatch`
(``testmatch.search(methodname)``). Methods that are private, i.e. start
with an underscore, are ignored.
Parameters
----------
cls : class
Class whose methods to decorate.
decorator : function
Decorator to apply to methods
testmatch : compiled regexp or str, optional
The regular expression. Default value is None, in which case the
nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
is used.
If `testmatch` is a string, it is compiled to a regular expression
first.
"""
if testmatch is None:
testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
else:
testmatch = re.compile(testmatch)
cls_attr = cls.__dict__
# delayed import to reduce startup time
from inspect import isfunction
methods = filter(isfunction, cls_attr.values())
for function in methods:
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
continue
if testmatch.search(funcname) and not funcname.startswith('_'):
setattr(cls, funcname, decorator(function))
return
def measure(code_str,times=1,label=None):
"""
Return elapsed time for executing code in the namespace of the caller.
The supplied code string is compiled with the Python builtin ``compile``.
The precision of the timing is 10 milli-seconds. If the code will execute
fast on this timescale, it can be executed many times to get reasonable
timing accuracy.
Parameters
----------
code_str : str
The code to be timed.
times : int, optional
The number of times the code is executed. Default is 1. The code is
only compiled once.
label : str, optional
A label to identify `code_str` with. This is passed into ``compile``
as the second argument (for run-time error messages).
Returns
-------
elapsed : float
Total elapsed time in seconds for executing `code_str` `times` times.
Examples
--------
>>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
... times=times)
>>> print "Time for a single execution : ", etime / times, "s"
Time for a single execution : 0.005 s
"""
frame = sys._getframe(1)
locs,globs = frame.f_locals,frame.f_globals
code = compile(code_str,
'Test name: %s ' % label,
'exec')
i = 0
elapsed = jiffies()
while i < times:
i += 1
exec code in globs,locs
elapsed = jiffies() - elapsed
return 0.01*elapsed
def _assert_valid_refcount(op):
"""
Check that ufuncs don't mishandle refcount of object `1`.
Used in a few regression tests.
"""
import numpy as np
a = np.arange(100 * 100)
b = np.arange(100*100).reshape(100, 100)
c = b
i = 1
rc = sys.getrefcount(i)
for j in range(15):
d = op(b,c)
assert(sys.getrefcount(i) >= rc)
def assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
"""
Raise an assertion if two objects are not equal up to desired tolerance.
The test is equivalent to ``allclose(actual, desired, rtol, atol)``
Parameters
----------
actual : array_like
Array obtained.
desired : array_like
Array desired
rtol : float, optional
Relative tolerance
atol : float, optional
Absolute tolerance
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
"""
import numpy as np
def compare(x, y):
return np.allclose(x, y, rtol=rtol, atol=atol)
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
verbose=verbose, header=header)
def assert_array_almost_equal_nulp(x, y, nulp=1):
"""
Compare two arrays relatively to their spacing.
This is a relatively robust method to compare two arrays whose amplitude
is variable.
Parameters
----------
x, y : array_like
Input arrays.
nulp : int, optional
The maximum number of unit in the last place for tolerance (see Notes).
Default is 1.
Returns
-------
None
Raises
------
AssertionError
If the spacing between `x` and `y` for one or more elements is larger
than `nulp`.
See Also
--------
assert_array_max_ulp : Check that all items of arrays differ in at most
N Units in the Last Place.
spacing : Return the distance between x and the nearest adjacent number.
Notes
-----
An assertion is raised if the following condition is not met::
abs(x - y) <= nulps * spacing(max(abs(x), abs(y)))
Examples
--------
>>> x = np.array([1., 1e-10, 1e-20])
>>> eps = np.finfo(x.dtype).eps
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
------------------------------------------------------------
Traceback (most recent call last):
...
AssertionError: X and Y are not equal to 1 ULP (max is 2)
"""
import numpy as np
ax = np.abs(x)
ay = np.abs(y)
ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
if not np.all(np.abs(x-y) <= ref):
if np.iscomplexobj(x) or np.iscomplexobj(y):
msg = "X and Y are not equal to %d ULP" % nulp
else:
max_nulp = np.max(nulp_diff(x, y))
msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
raise AssertionError(msg)
def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
"""
Check that all items of arrays differ in at most N Units in the Last Place.
Parameters
----------
a, b : array_like
Input arrays to be compared.
maxulp : int, optional
The maximum number of units in the last place that elements of `a` and
`b` can differ. Default is 1.
dtype : dtype, optional
Data-type to convert `a` and `b` to if given. Default is None.
Returns
-------
ret : ndarray
Array containing number of representable floating point numbers between
items in `a` and `b`.
Raises
------
AssertionError
If one or more elements differ by more than `maxulp`.
See Also
--------
assert_array_almost_equal_nulp : Compare two arrays relatively to their
spacing.
Examples
--------
>>> a = np.linspace(0., 1., 100)
>>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
"""
import numpy as np
ret = nulp_diff(a, b, dtype)
if not np.all(ret <= maxulp):
raise AssertionError("Arrays are not almost equal up to %g ULP" % \
maxulp)
return ret
def nulp_diff(x, y, dtype=None):
"""For each item in x and y, eeturn the number of representable floating
points between them.
Parameters
----------
x : array_like
first input array
y : array_like
second input array
Returns
-------
nulp: array_like
number of representable floating point numbers between each item in x
and y.
Examples
--------
# By definition, epsilon is the smallest number such as 1 + eps != 1, so
# there should be exactly one ULP between 1 and 1 + eps
>>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
1.0
"""
import numpy as np
if dtype:
x = np.array(x, dtype=dtype)
y = np.array(y, dtype=dtype)
else:
x = np.array(x)
y = np.array(y)
t = np.common_type(x, y)
if np.iscomplexobj(x) or np.iscomplexobj(y):
raise NotImplementedError("_nulp not implemented for complex array")
x = np.array(x, dtype=t)
y = np.array(y, dtype=t)
if not x.shape == y.shape:
raise ValueError("x and y do not have the same shape: %s - %s" % \
(x.shape, y.shape))
def _diff(rx, ry, vdt):
diff = np.array(rx-ry, dtype=vdt)
return np.abs(diff)
rx = integer_repr(x)
ry = integer_repr(y)
return _diff(rx, ry, t)
def _integer_repr(x, vdt, comp):
# Reinterpret binary representation of the float as sign-magnitude:
# take into account two-complement representation
# See also
# http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
rx = x.view(vdt)
if not (rx.size == 1):
rx[rx < 0] = comp - rx[rx<0]
else:
if rx < 0:
rx = comp - rx
return rx
def integer_repr(x):
"""Return the signed-magnitude interpretation of the binary representation of
x."""
import numpy as np
if x.dtype == np.float32:
return _integer_repr(x, np.int32, np.int32(-2**31))
elif x.dtype == np.float64:
return _integer_repr(x, np.int64, np.int64(-2**63))
else:
raise ValueError("Unsupported dtype %s" % x.dtype)
# The following two classes are copied from python 2.6 warnings module (context
# manager)
class WarningMessage(object):
"""
Holds the result of a single showwarning() call.
Notes
-----
`WarningMessage` is copied from the Python 2.6 warnings module,
so it can be used in NumPy with older Python versions.
"""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
if category:
self._category_name = category.__name__
else:
self._category_name = None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class WarningManager:
"""
A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of ``warnings.showwarning()`` and be appended to a
list returned by the context manager. Otherwise None is returned by the
context manager. The objects appended to the list are arguments whose
attributes mirror the arguments to ``showwarning()``.
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
Notes
-----
`WarningManager` is a copy of the ``catch_warnings`` context manager
from the Python 2.6 warnings module, with slight modifications.
It is copied so it can be used in NumPy with older Python versions.
"""
def __init__(self, record=False, module=None):
self._record = record
if module is None:
self._module = sys.modules['warnings']
else:
self._module = module
self._entered = False
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
def assert_warns(warning_class, func, *args, **kw):
"""
Fail unless the given callable throws the specified warning.
A warning of class warning_class should be thrown by the callable when
invoked with arguments args and keyword arguments kwargs.
If a different type of warning is thrown, it will not be caught, and the
test case will be deemed to have suffered an error.
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable
The callable to test.
\\*args : Arguments
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
None
"""
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
ctx = WarningManager(record=True)
l = ctx.__enter__()
warnings.simplefilter('always')
try:
func(*args, **kw)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not l[0].category is warning_class:
raise AssertionError("First warning for %s is not a " \
"%s( is %s)" % (func.__name__, warning_class, l[0]))
finally:
ctx.__exit__()
| gpl-3.0 | 8,794,330,758,132,366,000 | 31.819068 | 88 | 0.590806 | false |
auready/django | tests/gis_tests/tests.py | 22 | 4106 | import unittest
from django.core.exceptions import ImproperlyConfigured
from django.db import ProgrammingError
try:
from django.contrib.gis.db.backends.postgis.operations import PostGISOperations
HAS_POSTGRES = True
except ImportError:
HAS_POSTGRES = False
except ImproperlyConfigured as e:
# If psycopg is installed but not geos, the import path hits
# django.contrib.gis.geometry.backend which will "helpfully" convert
# an ImportError into an ImproperlyConfigured.
# Here, we make sure we're only catching this specific case and not another
# ImproperlyConfigured one.
if e.args and e.args[0].startswith('Could not import user-defined GEOMETRY_BACKEND'):
HAS_POSTGRES = False
else:
raise
if HAS_POSTGRES:
class FakeConnection:
def __init__(self):
self.settings_dict = {
'NAME': 'test',
}
class FakePostGISOperations(PostGISOperations):
def __init__(self, version=None):
self.version = version
self.connection = FakeConnection()
def _get_postgis_func(self, func):
if func == 'postgis_lib_version':
if self.version is None:
raise ProgrammingError
else:
return self.version
elif func == 'version':
pass
else:
raise NotImplementedError('This function was not expected to be called')
@unittest.skipUnless(HAS_POSTGRES, "The psycopg2 driver is needed for these tests")
class TestPostGISVersionCheck(unittest.TestCase):
"""
The PostGIS version check parses correctly the version numbers
"""
def test_get_version(self):
expect = '1.0.0'
ops = FakePostGISOperations(expect)
actual = ops.postgis_lib_version()
self.assertEqual(expect, actual)
def test_version_classic_tuple(self):
expect = ('1.2.3', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_version_dev_tuple(self):
expect = ('1.2.3dev', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_valid_version_numbers(self):
versions = [
('1.3.0', 1, 3, 0),
('2.1.1', 2, 1, 1),
('2.2.0dev', 2, 2, 0),
]
for version in versions:
ops = FakePostGISOperations(version[0])
actual = ops.spatial_version
self.assertEqual(version[1:], actual)
def test_invalid_version_numbers(self):
versions = ['nope', '123']
for version in versions:
ops = FakePostGISOperations(version)
with self.assertRaises(Exception):
ops.spatial_version
def test_no_version_number(self):
ops = FakePostGISOperations()
with self.assertRaises(ImproperlyConfigured):
ops.spatial_version
def test_version_dependent_funcs(self):
"""
Resolve names of functions renamed and deprecated in PostGIS 2.2.0
depending on PostGIS version.
Remove when dropping support for PostGIS 2.1.
"""
ops = FakePostGISOperations('2.2.0')
self.assertEqual(ops.spatial_function_name('DistanceSphere'), 'ST_DistanceSphere')
self.assertEqual(ops.spatial_function_name('DistanceSpheroid'), 'ST_DistanceSpheroid')
self.assertEqual(ops.spatial_function_name('LengthSpheroid'), 'ST_LengthSpheroid')
self.assertEqual(ops.spatial_function_name('MemSize'), 'ST_MemSize')
ops = FakePostGISOperations('2.1.0')
self.assertEqual(ops.spatial_function_name('DistanceSphere'), 'ST_distance_sphere')
self.assertEqual(ops.spatial_function_name('DistanceSpheroid'), 'ST_distance_spheroid')
self.assertEqual(ops.spatial_function_name('LengthSpheroid'), 'ST_length_spheroid')
self.assertEqual(ops.spatial_function_name('MemSize'), 'ST_mem_size')
| bsd-3-clause | -1,522,743,656,720,012,300 | 35.660714 | 95 | 0.632976 | false |
Nosferatul/coala | tests/results/AbsolutePositionTest.py | 35 | 2511 | import unittest
from coalib.results.AbsolutePosition import AbsolutePosition, calc_line_col
from coalib.misc.Constants import COMPLEX_TEST_STRING
class AbsolutePositionTest(unittest.TestCase):
def test_calc_line_col_newlines(self):
# no newlines
text = ("find position of 'z'",)
z_pos = text[0].find('z')
self.assertEqual(
calc_line_col(text, z_pos), (1, z_pos + 1))
# newline
text = ('find position of\n', "'z'",)
string_text = ''.join(text)
z_pos = string_text.find('z')
self.assertEqual(calc_line_col(text, z_pos), (2, 2))
def test_calc_line_col_unicode(self):
uni_pos = COMPLEX_TEST_STRING.find('↑')
self.assertEqual(
calc_line_col((COMPLEX_TEST_STRING,), uni_pos),
(1, uni_pos + 1))
def test_calc_line_col_rawstrings(self):
for raw in [(r'a\b',), (r'a\n',), ('a\\n',)]:
pos = raw[0].find(raw[0][-1])
self.assertEqual(calc_line_col(raw, pos), (1, 3))
def test_calc_line_col_extremes(self):
# End of Line
text = ('Fitst Line\n', 'End of sencond line z')
string_text = ''.join(text)
z_pos = string_text.find('z')
self.assertEqual(calc_line_col(text, z_pos),
(2, len(text[1])))
# Out of text
with self.assertRaises(ValueError):
text = ('Some line')
calc_line_col(text, 50)
# start of line
text = ('First Line\n', 'zEnd of sencond line')
string_text = ''.join(text)
z_pos = string_text.find('z')
self.assertEqual(calc_line_col(text, z_pos), (2, 1))
def test_property(self):
uut = AbsolutePosition(('1', '2'), 1)
self.assertEqual(uut.position, 1)
self.assertEqual(uut.line, 2)
self.assertEqual(uut.column, 1)
uut = AbsolutePosition()
self.assertEqual(uut.position, None)
self.assertEqual(uut.line, None)
self.assertEqual(uut.column, None)
uut = AbsolutePosition(('a\n', 'b\n'), 0)
self.assertEqual(uut.position, 0)
self.assertEqual(uut.line, 1)
self.assertEqual(uut.column, 1)
def test_instantiation(self):
with self.assertRaises(ValueError):
uut = AbsolutePosition((), 0)
uut = AbsolutePosition(position=5)
self.assertEqual(uut.position, 5)
self.assertEqual(uut.line, None)
self.assertEqual(uut.column, None)
| agpl-3.0 | -2,547,792,976,751,387,000 | 32.453333 | 75 | 0.567955 | false |
40223225/2015-cdb_g3-40223225 | static/Brython3.1.1-20150328-091302/Lib/fnmatch.py | 894 | 3163 | """Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import posixpath
import re
import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
@functools.lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat):
if isinstance(pat, bytes):
pat_str = str(pat, 'ISO-8859-1')
res_str = translate(pat_str)
res = bytes(res_str, 'ISO-8859-1')
else:
res = translate(pat)
return re.compile(res).match
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat)
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return res + '\Z(?ms)'
| gpl-3.0 | 4,629,086,278,621,916,000 | 28.018349 | 69 | 0.555801 | false |
alsrgv/tensorflow | tensorflow/contrib/learn/python/learn/utils/export.py | 28 | 13975 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export utilities (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.session_bundle import exporter
from tensorflow.contrib.session_bundle import gc
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_first_op_from_collection(collection_name):
"""Get first element from the collection."""
elements = ops.get_collection(collection_name)
if elements is not None:
if elements:
return elements[0]
return None
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is not None:
if saver:
saver = saver[0]
else:
saver = None
if saver is None and variables.global_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_graph(graph, saver, checkpoint_path, export_dir,
default_graph_signature, named_graph_signatures,
exports_to_keep):
"""Exports graph via session_bundle, by creating a Session."""
with graph.as_default():
with tf_session.Session('') as session:
variables.local_variables_initializer()
lookup_ops.tables_initializer()
saver.restore(session, checkpoint_path)
export = exporter.Exporter(saver)
export.init(
init_op=control_flow_ops.group(
variables.local_variables_initializer(),
lookup_ops.tables_initializer()),
default_graph_signature=default_graph_signature,
named_graph_signatures=named_graph_signatures,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS))
return export.export(export_dir, training_util.get_global_step(),
session, exports_to_keep=exports_to_keep)
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def generic_signature_fn(examples, unused_features, predictions):
"""Creates generic signature from given examples and predictions.
This is needed for backward compatibility with default behavior of
export_estimator.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or `dict` of `Tensor`s.
Returns:
Tuple of default signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
tensors = {'inputs': examples}
if not isinstance(predictions, dict):
predictions = {'outputs': predictions}
tensors.update(predictions)
default_signature = exporter.generic_signature(tensors)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or dict of tensors that contains the classes tensor
as in {'classes': `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions['classes'])
else:
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn_with_prob(
examples, unused_features, predictions):
"""Classification signature from given examples and predicted probabilities.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of predicted probabilities or dict that contains the
probabilities tensor as in {'probabilities', `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions['probabilities'])
else:
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def regression_signature_fn(examples, unused_features, predictions):
"""Creates regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor`.
Returns:
Tuple of default regression signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def logistic_regression_signature_fn(examples, unused_features, predictions):
"""Creates logistic regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of shape [batch_size, 2] of predicted probabilities or
dict that contains the probabilities tensor as in
{'probabilities', `Tensor`}.
Returns:
Tuple of default regression signature and named signature.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
predictions_tensor = predictions['probabilities']
else:
predictions_tensor = predictions
# predictions should have shape [batch_size, 2] where first column is P(Y=0|x)
# while second column is P(Y=1|x). We are only interested in the second
# column for inference.
predictions_shape = predictions_tensor.get_shape()
predictions_rank = len(predictions_shape)
if predictions_rank != 2:
logging.fatal(
'Expected predictions to have rank 2, but received predictions with '
'rank: {} and shape: {}'.format(predictions_rank, predictions_shape))
if predictions_shape[1] != 2:
logging.fatal(
'Expected predictions to have 2nd dimension: 2, but received '
'predictions with 2nd dimension: {} and shape: {}. Did you mean to use '
'regression_signature_fn or classification_signature_fn_with_prob '
'instead?'.format(predictions_shape[1], predictions_shape))
positive_predictions = predictions_tensor[:, 1]
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=positive_predictions)
return default_signature, {}
# pylint: disable=protected-access
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _default_input_fn(estimator, examples):
"""Creates default input parsing using Estimator's feature signatures."""
return estimator._get_feature_ops_from_example(examples)
@deprecated('2016-09-23', 'Please use Estimator.export_savedmodel() instead.')
def export_estimator(estimator,
export_dir,
signature_fn=None,
input_fn=_default_input_fn,
default_batch_size=1,
exports_to_keep=None):
"""Deprecated, please use Estimator.export_savedmodel()."""
_export_estimator(estimator=estimator,
export_dir=export_dir,
signature_fn=signature_fn,
input_fn=input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_estimator(estimator,
export_dir,
signature_fn,
input_fn,
default_batch_size,
exports_to_keep,
input_feature_key=None,
use_deprecated_input_fn=True,
prediction_key=None,
checkpoint_path=None):
if use_deprecated_input_fn:
input_fn = input_fn or _default_input_fn
elif input_fn is None:
raise ValueError('input_fn must be defined.')
# If checkpoint_path is specified, use the specified checkpoint path.
checkpoint_path = (checkpoint_path or
checkpoint_management.latest_checkpoint(
estimator._model_dir))
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
if use_deprecated_input_fn:
examples = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
features = input_fn(estimator, examples)
else:
features, _ = input_fn()
examples = None
if input_feature_key is not None:
examples = features.pop(input_feature_key)
if (not features) and (examples is None):
raise ValueError('Either features or examples must be defined.')
predictions = estimator._get_predict_ops(features).predictions
if prediction_key is not None:
predictions = predictions[prediction_key]
# Explicit signature_fn takes priority
if signature_fn:
default_signature, named_graph_signatures = signature_fn(examples,
features,
predictions)
else:
try:
# Some estimators provide a signature function.
# TODO(zakaria): check if the estimator has this function,
# raise helpful error if not
signature_fn = estimator._create_signature_fn()
default_signature, named_graph_signatures = (
signature_fn(examples, features, predictions))
except AttributeError:
logging.warn(
'Change warning: `signature_fn` will be required after'
'2016-08-01.\n'
'Using generic signatures for now. To maintain this behavior, '
'pass:\n'
' signature_fn=export.generic_signature_fn\n'
'Also consider passing a regression or classification signature; '
'see cl/126430915 for an example.')
default_signature, named_graph_signatures = generic_signature_fn(
examples, features, predictions)
if exports_to_keep is not None:
exports_to_keep = gc.largest_export_versions(exports_to_keep)
return _export_graph(
g,
_get_saver(),
checkpoint_path,
export_dir,
default_graph_signature=default_signature,
named_graph_signatures=named_graph_signatures,
exports_to_keep=exports_to_keep)
# pylint: enable=protected-access
| apache-2.0 | 2,058,942,878,412,650,200 | 37.711911 | 93 | 0.67585 | false |
hauxir/OpenBazaar-Server | daemon.py | 4 | 3692 | __author__ = 'chris'
import sys, os, time, atexit
from signal import SIGTERM
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
# pylint: disable=file-builtin
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self, *args):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run(*args)
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self, *args):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
| mit | -2,168,159,017,127,064,800 | 27.620155 | 110 | 0.504875 | false |
kernsuite-debian/lofar | LCU/StationTest/test/hbatest/determinepeak.py | 1 | 4055 | """ script for determing the peak in the spectrum
Andre 10 July 2009
Usage python3 ./determinepeak.py [# of RCUs]
"""
# INIT
import array
import operator
import os
import time
import sys
import math
import numpy
# Read directory with the files to processs
def open_dir(dirname) :
files = list(filter(os.path.isfile, os.listdir('.')))
# files.sort(key=lambda x: os.path.getmtime(x))
return files
def rm_files(dir_name, file) :
cmdstr = 'rm ' + file
os.popen3(cmdstr)
return
def rec_stat(dirname, num_rcu) :
os.popen("rspctl --statistics --duration=10 --integration=10 --select=0:" + str(num_rcu - 1) + " 2>/dev/null")
return
# Open file for processsing
def open_file(files, file_nr) :
# check if file is data file, no junk
if files[file_nr][-3:] == 'dat':
file_name = files[file_nr]
fileinfo = os.stat(file_name)
size = int(fileinfo.st_size)
f = open(file_name, 'rb')
max_frames = size / (512 * 8)
frames_to_process = max_frames
rcu_nr = int(files[file_nr][-6:-4])
# print 'File nr ' + str(file_nr) + ' RCU nr ' + str(rcu_nr) + ' ' + files[file_nr][-6:-4]
else :
frames_to_process = 0
f = open(files[file_nr], 'rb')
rcu_nr = 0
return f, frames_to_process, rcu_nr
# Read single frame from file
def read_frame(f):
sst_data = array.array('d')
sst_data.fromfile(f, 512)
sst_data = sst_data.tolist()
return sst_data
# switch on HBA tiles gentle
def switchon_hba() :
try:
os.popen3("rspctl --rcumode=5 --sel=0:31")
time.sleep(1)
os.popen3("rspctl --rcumode=5 --sel=32:63")
time.sleep(1)
os.popen3("rspctl --rcumode=5 --sel=64:95")
time.sleep(1)
os.popen3("rspctl --rcumode=5 --sel=96:127")
time.sleep(1)
os.popen3("rspctl --rcumode=5 --sel=128:159")
time.sleep(1)
os.popen3("rspctl --rcumode=5 --sel=160:191")
time.sleep(1)
except:
print("NL station")
os.popen("rspctl --rcuenable=1")
return
# Main loop
def main() :
sub_time = []
sub_file = []
dir_name = './hbadatatest/' # Work directory will be cleaned
if not(os.path.exists(dir_name)):
os.mkdir(dir_name)
rmfile = '*.log'
hba_elements = 16
sleeptime = 1
ctrl_string = '='
# read in arguments
if len(sys.argv) < 2 :
num_rcu = 96
else :
num_rcu = int(sys.argv[2])
print(' Number of RCUs is ' + str(num_rcu))
max_subband = list(range(0, num_rcu))
max_rfi = list(range(0, num_rcu))
os.chdir(dir_name)
# os.popen("rspctl --clock=200")
# print 'Clock is set to 200 MHz'
# time.sleep(10)
#---------------------------------------------
# capture reference data (all HBA elements off)
rm_files(dir_name, '*')
switchon_hba()
# os.popen("rspctl --rcumode=5 2>/dev/null")
# os.popen("rspctl --rcuenable=1 2>/dev/null")
for ind in range(hba_elements) :
ctrl_string = ctrl_string + '128,'
strlength = len(ctrl_string)
ctrl_string = ctrl_string[0:strlength - 1]
cmd_str = 'rspctl --hbadelay' + ctrl_string + ' 2>/dev/null'
os.popen(cmd_str)
print('Setting all HBA elements on (128)')
time.sleep(sleeptime)
print('Capture data')
rec_stat(dir_name, num_rcu)
# rm_files(dir_name,rmfile)
# get list of all files in dir_name
files = open_dir(dir_name)
# start searching for maxima for each RCU
for file_cnt in range(len(files)) :
f, frames_to_process, rcu_nr = open_file(files, file_cnt)
if frames_to_process > 0 :
sst_data = read_frame(f)
[maxval, subband_nr] = max((x, i) for i, x in enumerate(sst_data[1:]))
max_rfi[rcu_nr] = 10 * numpy.log10(maxval)
max_subband[rcu_nr] = subband_nr + 1
f.close
for rcuind in range(num_rcu) :
print('RCU ' + str(rcuind) + ' has max. RFI (' + str(round(max_rfi[rcuind], 1)) + ' dB) in subband ' + str(max_subband[rcuind]))
main()
| gpl-3.0 | 3,359,976,794,862,514,700 | 30.192308 | 140 | 0.578052 | false |
lixiangning888/whole_project | modules/signatures_merge_tmp/rat_pcclient.py | 3 | 1675 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 @threatlead
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class PcClientMutexes(Signature):
name = "rat_pcclient"
description = "创建常见PcClient互斥量(mutex)或相应的文件变动"
severity = 3
categories = ["rat"]
families = ["pcclient", "nex"]
authors = ["threatlead"]
references = ["https://malwr.com/analysis/MDIxN2NhMjg4MTg2NDY4MWIyNTE0Zjk5MTY1OGU4YzE/"]
minimum = "0.5"
def run(self):
indicators = [
"BKLANG.*",
"VSLANG.*",
]
for indicator in indicators:
if self.check_mutex(pattern=indicator, regex=True):
return True
indicators = [
".*\\\\syslog.dat",
".*\\\\.*_lang.ini",
".*\\\\[0-9]+_lang.dll",
".*\\\\[0-9]+_res.tmp",
]
for indicator in indicators:
if self.check_file(pattern=indicator, regex=True):
return True
return False
| lgpl-3.0 | 2,899,283,527,154,909,700 | 31.82 | 92 | 0.622791 | false |
40223145c2g18/c2g18 | wsgi/static/Brython2.1.0-20140419-113919/Lib/xml/dom/expatbuilder.py | 733 | 35733 | """Facility to use the Expat parser to load a minidom instance
from a string or file.
This avoids all the overhead of SAX and pulldom to gain performance.
"""
# Warning!
#
# This module is tightly bound to the implementation details of the
# minidom DOM and can't be used with other DOM implementations. This
# is due, in part, to a lack of appropriate methods in the DOM (there is
# no way to create Entity and Notation nodes via the DOM Level 2
# interface), and for performance. The later is the cause of some fairly
# cryptic code.
#
# Performance hacks:
#
# - .character_data_handler() has an extra case in which continuing
# data is appended to an existing Text node; this can be a
# speedup since pyexpat can break up character data into multiple
# callbacks even though we set the buffer_text attribute on the
# parser. This also gives us the advantage that we don't need a
# separate normalization pass.
#
# - Determining that a node exists is done using an identity comparison
# with None rather than a truth test; this avoids searching for and
# calling any methods on the node object if it exists. (A rather
# nice speedup is achieved this way as well!)
from xml.dom import xmlbuilder, minidom, Node
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE
from xml.parsers import expat
from xml.dom.minidom import _append_child, _set_attribute_node
from xml.dom.NodeFilter import NodeFilter
TEXT_NODE = Node.TEXT_NODE
CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE
DOCUMENT_NODE = Node.DOCUMENT_NODE
FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT
FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP
FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT
theDOMImplementation = minidom.getDOMImplementation()
# Expat typename -> TypeInfo
_typeinfo_map = {
"CDATA": minidom.TypeInfo(None, "cdata"),
"ENUM": minidom.TypeInfo(None, "enumeration"),
"ENTITY": minidom.TypeInfo(None, "entity"),
"ENTITIES": minidom.TypeInfo(None, "entities"),
"ID": minidom.TypeInfo(None, "id"),
"IDREF": minidom.TypeInfo(None, "idref"),
"IDREFS": minidom.TypeInfo(None, "idrefs"),
"NMTOKEN": minidom.TypeInfo(None, "nmtoken"),
"NMTOKENS": minidom.TypeInfo(None, "nmtokens"),
}
class ElementInfo(object):
__slots__ = '_attr_info', '_model', 'tagName'
def __init__(self, tagName, model=None):
self.tagName = tagName
self._attr_info = []
self._model = model
def __getstate__(self):
return self._attr_info, self._model, self.tagName
def __setstate__(self, state):
self._attr_info, self._model, self.tagName = state
def getAttributeType(self, aname):
for info in self._attr_info:
if info[1] == aname:
t = info[-2]
if t[0] == "(":
return _typeinfo_map["ENUM"]
else:
return _typeinfo_map[info[-2]]
return minidom._no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return minidom._no_type
def isElementContent(self):
if self._model:
type = self._model[0]
return type not in (expat.model.XML_CTYPE_ANY,
expat.model.XML_CTYPE_MIXED)
else:
return False
def isEmpty(self):
if self._model:
return self._model[0] == expat.model.XML_CTYPE_EMPTY
else:
return False
def isId(self, aname):
for info in self._attr_info:
if info[1] == aname:
return info[-2] == "ID"
return False
def isIdNS(self, euri, ename, auri, aname):
# not sure this is meaningful
return self.isId((auri, aname))
def _intern(builder, s):
return builder._intern_setdefault(s, s)
def _parse_ns_name(builder, name):
assert ' ' in name
parts = name.split(' ')
intern = builder._intern_setdefault
if len(parts) == 3:
uri, localname, prefix = parts
prefix = intern(prefix, prefix)
qname = "%s:%s" % (prefix, localname)
qname = intern(qname, qname)
localname = intern(localname, localname)
else:
uri, localname = parts
prefix = EMPTY_PREFIX
qname = localname = intern(localname, localname)
return intern(uri, uri), localname, prefix, qname
class ExpatBuilder:
"""Document builder that uses Expat to build a ParsedXML.DOM document
instance."""
def __init__(self, options=None):
if options is None:
options = xmlbuilder.Options()
self._options = options
if self._options.filter is not None:
self._filter = FilterVisibilityController(self._options.filter)
else:
self._filter = None
# This *really* doesn't do anything in this case, so
# override it with something fast & minimal.
self._finish_start_element = id
self._parser = None
self.reset()
def createParser(self):
"""Create a new parser object."""
return expat.ParserCreate()
def getParser(self):
"""Return the parser object, creating a new one if needed."""
if not self._parser:
self._parser = self.createParser()
self._intern_setdefault = self._parser.intern.setdefault
self._parser.buffer_text = True
self._parser.ordered_attributes = True
self._parser.specified_attributes = True
self.install(self._parser)
return self._parser
def reset(self):
"""Free all data structures used during DOM construction."""
self.document = theDOMImplementation.createDocument(
EMPTY_NAMESPACE, None, None)
self.curNode = self.document
self._elem_info = self.document._elem_info
self._cdata = False
def install(self, parser):
"""Install the callbacks needed to build the DOM into the parser."""
# This creates circular references!
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.first_element_handler
parser.EndElementHandler = self.end_element_handler
parser.ProcessingInstructionHandler = self.pi_handler
if self._options.entities:
parser.EntityDeclHandler = self.entity_decl_handler
parser.NotationDeclHandler = self.notation_decl_handler
if self._options.comments:
parser.CommentHandler = self.comment_handler
if self._options.cdata_sections:
parser.StartCdataSectionHandler = self.start_cdata_section_handler
parser.EndCdataSectionHandler = self.end_cdata_section_handler
parser.CharacterDataHandler = self.character_data_handler_cdata
else:
parser.CharacterDataHandler = self.character_data_handler
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
parser.XmlDeclHandler = self.xml_decl_handler
parser.ElementDeclHandler = self.element_decl_handler
parser.AttlistDeclHandler = self.attlist_decl_handler
def parseFile(self, file):
"""Parse a document from a file object, returning the document
node."""
parser = self.getParser()
first_buffer = True
try:
while 1:
buffer = file.read(16*1024)
if not buffer:
break
parser.Parse(buffer, 0)
if first_buffer and self.document.documentElement:
self._setup_subset(buffer)
first_buffer = False
parser.Parse("", True)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def parseString(self, string):
"""Parse a document from a string, returning the document node."""
parser = self.getParser()
try:
parser.Parse(string, True)
self._setup_subset(string)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def _setup_subset(self, buffer):
"""Load the internal subset if there might be one."""
if self.document.doctype:
extractor = InternalSubsetExtractor()
extractor.parseString(buffer)
subset = extractor.getSubset()
self.document.doctype.internalSubset = subset
def start_doctype_decl_handler(self, doctypeName, systemId, publicId,
has_internal_subset):
doctype = self.document.implementation.createDocumentType(
doctypeName, publicId, systemId)
doctype.ownerDocument = self.document
_append_child(self.document, doctype)
self.document.doctype = doctype
if self._filter and self._filter.acceptNode(doctype) == FILTER_REJECT:
self.document.doctype = None
del self.document.childNodes[-1]
doctype = None
self._parser.EntityDeclHandler = None
self._parser.NotationDeclHandler = None
if has_internal_subset:
if doctype is not None:
doctype.entities._seq = []
doctype.notations._seq = []
self._parser.CommentHandler = None
self._parser.ProcessingInstructionHandler = None
self._parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
def end_doctype_decl_handler(self):
if self._options.comments:
self._parser.CommentHandler = self.comment_handler
self._parser.ProcessingInstructionHandler = self.pi_handler
if not (self._elem_info or self._filter):
self._finish_end_element = id
def pi_handler(self, target, data):
node = self.document.createProcessingInstruction(target, data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def character_data_handler_cdata(self, data):
childNodes = self.curNode.childNodes
if self._cdata:
if ( self._cdata_continue
and childNodes[-1].nodeType == CDATA_SECTION_NODE):
childNodes[-1].appendData(data)
return
node = self.document.createCDATASection(data)
self._cdata_continue = True
elif childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
value = node.data + data
node.data = value
return
else:
node = minidom.Text()
node.data = data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def character_data_handler(self, data):
childNodes = self.curNode.childNodes
if childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
node.data = node.data + data
return
node = minidom.Text()
node.data = node.data + data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def entity_decl_handler(self, entityName, is_parameter_entity, value,
base, systemId, publicId, notationName):
if is_parameter_entity:
# we don't care about parameter entities for the DOM
return
if not self._options.entities:
return
node = self.document._create_entity(entityName, publicId,
systemId, notationName)
if value is not None:
# internal entity
# node *should* be readonly, but we'll cheat
child = self.document.createTextNode(value)
node.childNodes.append(child)
self.document.doctype.entities._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
del self.document.doctype.entities._seq[-1]
def notation_decl_handler(self, notationName, base, systemId, publicId):
node = self.document._create_notation(notationName, publicId, systemId)
self.document.doctype.notations._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_ACCEPT:
del self.document.doctype.notations._seq[-1]
def comment_handler(self, data):
node = self.document.createComment(data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def start_cdata_section_handler(self):
self._cdata = True
self._cdata_continue = False
def end_cdata_section_handler(self):
self._cdata = False
self._cdata_continue = False
def external_entity_ref_handler(self, context, base, systemId, publicId):
return 1
def first_element_handler(self, name, attributes):
if self._filter is None and not self._elem_info:
self._finish_end_element = id
self.getParser().StartElementHandler = self.start_element_handler
self.start_element_handler(name, attributes)
def start_element_handler(self, name, attributes):
node = self.document.createElement(name)
_append_child(self.curNode, node)
self.curNode = node
if attributes:
for i in range(0, len(attributes), 2):
a = minidom.Attr(attributes[i], EMPTY_NAMESPACE,
None, EMPTY_PREFIX)
value = attributes[i+1]
a.value = value
a.ownerDocument = self.document
_set_attribute_node(node, a)
if node is not self.document.documentElement:
self._finish_start_element(node)
def _finish_start_element(self, node):
if self._filter:
# To be general, we'd have to call isSameNode(), but this
# is sufficient for minidom:
if node is self.document.documentElement:
return
filt = self._filter.startContainer(node)
if filt == FILTER_REJECT:
# ignore this node & all descendents
Rejecter(self)
elif filt == FILTER_SKIP:
# ignore this node, but make it's children become
# children of the parent node
Skipper(self)
else:
return
self.curNode = node.parentNode
node.parentNode.removeChild(node)
node.unlink()
# If this ever changes, Namespaces.end_element_handler() needs to
# be changed to match.
#
def end_element_handler(self, name):
curNode = self.curNode
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
def _finish_end_element(self, curNode):
info = self._elem_info.get(curNode.tagName)
if info:
self._handle_white_text_nodes(curNode, info)
if self._filter:
if curNode is self.document.documentElement:
return
if self._filter.acceptNode(curNode) == FILTER_REJECT:
self.curNode.removeChild(curNode)
curNode.unlink()
def _handle_white_text_nodes(self, node, info):
if (self._options.whitespace_in_element_content
or not info.isElementContent()):
return
# We have element type information and should remove ignorable
# whitespace; identify for text nodes which contain only
# whitespace.
L = []
for child in node.childNodes:
if child.nodeType == TEXT_NODE and not child.data.strip():
L.append(child)
# Remove ignorable whitespace from the tree.
for child in L:
node.removeChild(child)
def element_decl_handler(self, name, model):
info = self._elem_info.get(name)
if info is None:
self._elem_info[name] = ElementInfo(name, model)
else:
assert info._model is None
info._model = model
def attlist_decl_handler(self, elem, name, type, default, required):
info = self._elem_info.get(elem)
if info is None:
info = ElementInfo(elem)
self._elem_info[elem] = info
info._attr_info.append(
[None, name, None, None, default, 0, type, required])
def xml_decl_handler(self, version, encoding, standalone):
self.document.version = version
self.document.encoding = encoding
# This is still a little ugly, thanks to the pyexpat API. ;-(
if standalone >= 0:
if standalone:
self.document.standalone = True
else:
self.document.standalone = False
# Don't include FILTER_INTERRUPT, since that's checked separately
# where allowed.
_ALLOWED_FILTER_RETURNS = (FILTER_ACCEPT, FILTER_REJECT, FILTER_SKIP)
class FilterVisibilityController(object):
"""Wrapper around a DOMBuilderFilter which implements the checks
to make the whatToShow filter attribute work."""
__slots__ = 'filter',
def __init__(self, filter):
self.filter = filter
def startContainer(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.startContainer(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"startContainer() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
def acceptNode(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.acceptNode(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val == FILTER_SKIP:
# move all child nodes to the parent, and remove this node
parent = node.parentNode
for child in node.childNodes[:]:
parent.appendChild(child)
# node is handled by the caller
return FILTER_REJECT
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"acceptNode() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
_nodetype_mask = {
Node.ELEMENT_NODE: NodeFilter.SHOW_ELEMENT,
Node.ATTRIBUTE_NODE: NodeFilter.SHOW_ATTRIBUTE,
Node.TEXT_NODE: NodeFilter.SHOW_TEXT,
Node.CDATA_SECTION_NODE: NodeFilter.SHOW_CDATA_SECTION,
Node.ENTITY_REFERENCE_NODE: NodeFilter.SHOW_ENTITY_REFERENCE,
Node.ENTITY_NODE: NodeFilter.SHOW_ENTITY,
Node.PROCESSING_INSTRUCTION_NODE: NodeFilter.SHOW_PROCESSING_INSTRUCTION,
Node.COMMENT_NODE: NodeFilter.SHOW_COMMENT,
Node.DOCUMENT_NODE: NodeFilter.SHOW_DOCUMENT,
Node.DOCUMENT_TYPE_NODE: NodeFilter.SHOW_DOCUMENT_TYPE,
Node.DOCUMENT_FRAGMENT_NODE: NodeFilter.SHOW_DOCUMENT_FRAGMENT,
Node.NOTATION_NODE: NodeFilter.SHOW_NOTATION,
}
class FilterCrutch(object):
__slots__ = '_builder', '_level', '_old_start', '_old_end'
def __init__(self, builder):
self._level = 0
self._builder = builder
parser = builder._parser
self._old_start = parser.StartElementHandler
self._old_end = parser.EndElementHandler
parser.StartElementHandler = self.start_element_handler
parser.EndElementHandler = self.end_element_handler
class Rejecter(FilterCrutch):
__slots__ = ()
def __init__(self, builder):
FilterCrutch.__init__(self, builder)
parser = builder._parser
for name in ("ProcessingInstructionHandler",
"CommentHandler",
"CharacterDataHandler",
"StartCdataSectionHandler",
"EndCdataSectionHandler",
"ExternalEntityRefHandler",
):
setattr(parser, name, None)
def start_element_handler(self, *args):
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# restore the old handlers
parser = self._builder._parser
self._builder.install(parser)
parser.StartElementHandler = self._old_start
parser.EndElementHandler = self._old_end
else:
self._level = self._level - 1
class Skipper(FilterCrutch):
__slots__ = ()
def start_element_handler(self, *args):
node = self._builder.curNode
self._old_start(*args)
if self._builder.curNode is not node:
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# We're popping back out of the node we're skipping, so we
# shouldn't need to do anything but reset the handlers.
self._builder._parser.StartElementHandler = self._old_start
self._builder._parser.EndElementHandler = self._old_end
self._builder = None
else:
self._level = self._level - 1
self._old_end(*args)
# framework document used by the fragment builder.
# Takes a string for the doctype, subset string, and namespace attrs string.
_FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID = \
"http://xml.python.org/entities/fragment-builder/internal"
_FRAGMENT_BUILDER_TEMPLATE = (
'''\
<!DOCTYPE wrapper
%%s [
<!ENTITY fragment-builder-internal
SYSTEM "%s">
%%s
]>
<wrapper %%s
>&fragment-builder-internal;</wrapper>'''
% _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID)
class FragmentBuilder(ExpatBuilder):
"""Builder which constructs document fragments given XML source
text and a context node.
The context node is expected to provide information about the
namespace declarations which are in scope at the start of the
fragment.
"""
def __init__(self, context, options=None):
if context.nodeType == DOCUMENT_NODE:
self.originalDocument = context
self.context = context
else:
self.originalDocument = context.ownerDocument
self.context = context
ExpatBuilder.__init__(self, options)
def reset(self):
ExpatBuilder.reset(self)
self.fragment = None
def parseFile(self, file):
"""Parse a document fragment from a file object, returning the
fragment node."""
return self.parseString(file.read())
def parseString(self, string):
"""Parse a document fragment from a string, returning the
fragment node."""
self._source = string
parser = self.getParser()
doctype = self.originalDocument.doctype
ident = ""
if doctype:
subset = doctype.internalSubset or self._getDeclarations()
if doctype.publicId:
ident = ('PUBLIC "%s" "%s"'
% (doctype.publicId, doctype.systemId))
elif doctype.systemId:
ident = 'SYSTEM "%s"' % doctype.systemId
else:
subset = ""
nsattrs = self._getNSattrs() # get ns decls from node's ancestors
document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs)
try:
parser.Parse(document, 1)
except:
self.reset()
raise
fragment = self.fragment
self.reset()
## self._parser = None
return fragment
def _getDeclarations(self):
"""Re-create the internal subset from the DocumentType node.
This is only needed if we don't already have the
internalSubset as a string.
"""
doctype = self.context.ownerDocument.doctype
s = ""
if doctype:
for i in range(doctype.notations.length):
notation = doctype.notations.item(i)
if s:
s = s + "\n "
s = "%s<!NOTATION %s" % (s, notation.nodeName)
if notation.publicId:
s = '%s PUBLIC "%s"\n "%s">' \
% (s, notation.publicId, notation.systemId)
else:
s = '%s SYSTEM "%s">' % (s, notation.systemId)
for i in range(doctype.entities.length):
entity = doctype.entities.item(i)
if s:
s = s + "\n "
s = "%s<!ENTITY %s" % (s, entity.nodeName)
if entity.publicId:
s = '%s PUBLIC "%s"\n "%s"' \
% (s, entity.publicId, entity.systemId)
elif entity.systemId:
s = '%s SYSTEM "%s"' % (s, entity.systemId)
else:
s = '%s "%s"' % (s, entity.firstChild.data)
if entity.notationName:
s = "%s NOTATION %s" % (s, entity.notationName)
s = s + ">"
return s
def _getNSattrs(self):
return ""
def external_entity_ref_handler(self, context, base, systemId, publicId):
if systemId == _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID:
# this entref is the one that we made to put the subtree
# in; all of our given input is parsed in here.
old_document = self.document
old_cur_node = self.curNode
parser = self._parser.ExternalEntityParserCreate(context)
# put the real document back, parse into the fragment to return
self.document = self.originalDocument
self.fragment = self.document.createDocumentFragment()
self.curNode = self.fragment
try:
parser.Parse(self._source, 1)
finally:
self.curNode = old_cur_node
self.document = old_document
self._source = None
return -1
else:
return ExpatBuilder.external_entity_ref_handler(
self, context, base, systemId, publicId)
class Namespaces:
"""Mix-in class for builders; adds support for namespaces."""
def _initNamespaces(self):
# list of (prefix, uri) ns declarations. Namespace attrs are
# constructed from this and added to the element's attrs.
self._ns_ordered_prefixes = []
def createParser(self):
"""Create a new namespace-handling parser."""
parser = expat.ParserCreate(namespace_separator=" ")
parser.namespace_prefixes = True
return parser
def install(self, parser):
"""Insert the namespace-handlers onto the parser."""
ExpatBuilder.install(self, parser)
if self._options.namespace_declarations:
parser.StartNamespaceDeclHandler = (
self.start_namespace_decl_handler)
def start_namespace_decl_handler(self, prefix, uri):
"""Push this namespace declaration on our storage."""
self._ns_ordered_prefixes.append((prefix, uri))
def start_element_handler(self, name, attributes):
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
else:
uri = EMPTY_NAMESPACE
qname = name
localname = None
prefix = EMPTY_PREFIX
node = minidom.Element(qname, uri, prefix, localname)
node.ownerDocument = self.document
_append_child(self.curNode, node)
self.curNode = node
if self._ns_ordered_prefixes:
for prefix, uri in self._ns_ordered_prefixes:
if prefix:
a = minidom.Attr(_intern(self, 'xmlns:' + prefix),
XMLNS_NAMESPACE, prefix, "xmlns")
else:
a = minidom.Attr("xmlns", XMLNS_NAMESPACE,
"xmlns", EMPTY_PREFIX)
a.value = uri
a.ownerDocument = self.document
_set_attribute_node(node, a)
del self._ns_ordered_prefixes[:]
if attributes:
node._ensure_attributes()
_attrs = node._attrs
_attrsNS = node._attrsNS
for i in range(0, len(attributes), 2):
aname = attributes[i]
value = attributes[i+1]
if ' ' in aname:
uri, localname, prefix, qname = _parse_ns_name(self, aname)
a = minidom.Attr(qname, uri, localname, prefix)
_attrs[qname] = a
_attrsNS[(uri, localname)] = a
else:
a = minidom.Attr(aname, EMPTY_NAMESPACE,
aname, EMPTY_PREFIX)
_attrs[aname] = a
_attrsNS[(EMPTY_NAMESPACE, aname)] = a
a.ownerDocument = self.document
a.value = value
a.ownerElement = node
if __debug__:
# This only adds some asserts to the original
# end_element_handler(), so we only define this when -O is not
# used. If changing one, be sure to check the other to see if
# it needs to be changed as well.
#
def end_element_handler(self, name):
curNode = self.curNode
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
assert (curNode.namespaceURI == uri
and curNode.localName == localname
and curNode.prefix == prefix), \
"element stack messed up! (namespace)"
else:
assert curNode.nodeName == name, \
"element stack messed up - bad nodeName"
assert curNode.namespaceURI == EMPTY_NAMESPACE, \
"element stack messed up - bad namespaceURI"
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
class ExpatBuilderNS(Namespaces, ExpatBuilder):
"""Document builder that supports namespaces."""
def reset(self):
ExpatBuilder.reset(self)
self._initNamespaces()
class FragmentBuilderNS(Namespaces, FragmentBuilder):
"""Fragment builder that supports namespaces."""
def reset(self):
FragmentBuilder.reset(self)
self._initNamespaces()
def _getNSattrs(self):
"""Return string of namespace attributes from this element and
ancestors."""
# XXX This needs to be re-written to walk the ancestors of the
# context to build up the namespace information from
# declarations, elements, and attributes found in context.
# Otherwise we have to store a bunch more data on the DOM
# (though that *might* be more reliable -- not clear).
attrs = ""
context = self.context
L = []
while context:
if hasattr(context, '_ns_prefix_uri'):
for prefix, uri in context._ns_prefix_uri.items():
# add every new NS decl from context to L and attrs string
if prefix in L:
continue
L.append(prefix)
if prefix:
declname = "xmlns:" + prefix
else:
declname = "xmlns"
if attrs:
attrs = "%s\n %s='%s'" % (attrs, declname, uri)
else:
attrs = " %s='%s'" % (declname, uri)
context = context.parentNode
return attrs
class ParseEscape(Exception):
"""Exception raised to short-circuit parsing in InternalSubsetExtractor."""
pass
class InternalSubsetExtractor(ExpatBuilder):
"""XML processor which can rip out the internal document type subset."""
subset = None
def getSubset(self):
"""Return the internal subset as a string."""
return self.subset
def parseFile(self, file):
try:
ExpatBuilder.parseFile(self, file)
except ParseEscape:
pass
def parseString(self, string):
try:
ExpatBuilder.parseString(self, string)
except ParseEscape:
pass
def install(self, parser):
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.start_element_handler
def start_doctype_decl_handler(self, name, publicId, systemId,
has_internal_subset):
if has_internal_subset:
parser = self.getParser()
self.subset = []
parser.DefaultHandler = self.subset.append
parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
else:
raise ParseEscape()
def end_doctype_decl_handler(self):
s = ''.join(self.subset).replace('\r\n', '\n').replace('\r', '\n')
self.subset = s
raise ParseEscape()
def start_element_handler(self, name, attrs):
raise ParseEscape()
def parse(file, namespaces=True):
"""Parse a document, returning the resulting Document node.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
if isinstance(file, str):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseString(string, namespaces=True):
"""Parse a document from a string, returning the resulting
Document node.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
return builder.parseString(string)
def parseFragment(file, context, namespaces=True):
"""Parse a fragment of a document, given the context from which it
was originally extracted. context should be the parent of the
node(s) which are in the fragment.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
if isinstance(file, str):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseFragmentString(string, context, namespaces=True):
"""Parse a fragment of a document from a string, given the context
from which it was originally extracted. context should be the
parent of the node(s) which are in the fragment.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
return builder.parseString(string)
def makeBuilder(options):
"""Create a builder based on an Options object."""
if options.namespaces:
return ExpatBuilderNS(options)
else:
return ExpatBuilder(options)
| gpl-2.0 | -7,385,274,793,542,316,000 | 35.876161 | 81 | 0.586293 | false |
kwrobert/heat-templates | hot/software-config/elements/heat-config-ansible/install.d/hook-ansible.py | 6 | 3615 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import os
import subprocess
import sys
WORKING_DIR = os.environ.get('HEAT_ANSIBLE_WORKING',
'/var/lib/heat-config/heat-config-ansible')
OUTPUTS_DIR = os.environ.get('HEAT_ANSIBLE_OUTPUTS',
'/var/run/heat-config/heat-config-ansible')
def prepare_dir(path):
if not os.path.isdir(path):
os.makedirs(path, 0o700)
def main(argv=sys.argv):
log = logging.getLogger('heat-config')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(
logging.Formatter(
'[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s'))
log.addHandler(handler)
log.setLevel('DEBUG')
prepare_dir(OUTPUTS_DIR)
prepare_dir(WORKING_DIR)
os.chdir(WORKING_DIR)
c = json.load(sys.stdin)
variables = {}
for input in c['inputs']:
variables[input['name']] = input.get('value', '')
fn = os.path.join(WORKING_DIR, '%s_playbook.yaml' % c['id'])
vars_filename = os.path.join(WORKING_DIR, '%s_variables.json' % c['id'])
heat_outputs_path = os.path.join(OUTPUTS_DIR, c['id'])
variables['heat_outputs_path'] = heat_outputs_path
config_text = c.get('config', '')
if not config_text:
log.warn("No 'config' input found, nothing to do.")
return
# Write 'variables' to file
with os.fdopen(os.open(
vars_filename, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as var_file:
json.dump(variables, var_file)
# Write the executable, 'config', to file
with os.fdopen(os.open(fn, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f:
f.write(c.get('config', '').encode('utf-8'))
cmd = [
'ansible-playbook',
'-i',
'localhost,',
fn,
'--extra-vars',
'@%s' % vars_filename
]
log.debug('Running %s' % (' '.join(cmd),))
try:
subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
log.warn("ansible not installed yet")
return
stdout, stderr = subproc.communicate()
log.info('Return code %s' % subproc.returncode)
if stdout:
log.info(stdout)
if stderr:
log.info(stderr)
# TODO(stevebaker): Test if ansible returns any non-zero
# return codes in success.
if subproc.returncode:
log.error("Error running %s. [%s]\n" % (fn, subproc.returncode))
else:
log.info('Completed %s' % fn)
response = {}
for output in c.get('outputs') or []:
output_name = output['name']
try:
with open('%s.%s' % (heat_outputs_path, output_name)) as out:
response[output_name] = out.read()
except IOError:
pass
response.update({
'deploy_stdout': stdout,
'deploy_stderr': stderr,
'deploy_status_code': subproc.returncode,
})
json.dump(response, sys.stdout)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| apache-2.0 | -5,781,442,539,924,070,000 | 29.897436 | 78 | 0.601107 | false |
palashahuja/myhdl | myhdl/test/core/test_misc.py | 3 | 1922 | # This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Run the unit tests for Signal """
import random
from random import randrange
random.seed(1) # random, but deterministic
from types import GeneratorType
import unittest
from unittest import TestCase
from myhdl import instance, instances
def A(n):
@instance
def logic():
yield None
return logic
def B(n):
@instance
def logic():
yield None
return logic
def C(n):
A_1 = A(1)
A_2 = A(2)
B_1 = B(1)
return A_1, A_2, B_1
g = 3
class InstancesTest(TestCase):
def testInstances(self):
@instance
def D_1():
yield None
d = 1
A_1 = A(1)
a = [1, 2]
B_1 = B(1)
b = "string"
C_1 = C(1)
c = {}
i = instances()
# can't just construct an expected list;
# that would become part of the instances also!
self.assertEqual(len(i), 4)
for e in (D_1, A_1, B_1, C_1):
self.assert_(e in i)
if __name__ == "__main__":
unittest.main()
| lgpl-2.1 | 1,455,729,469,256,915,700 | 23.641026 | 74 | 0.638398 | false |
Azenwrath/codeguild-labs | lab_distance_converstion.py | 1 | 1643 | #Lab: Distance Converter
#Student: Dana Stubkjaer
def convert(distance, unit1, unit2):
if unit1 == "mi":
if unit2 == "mi":
print (distance + " " + unit2)
if unit2 == "km":
print ((float(distance) * 1.60934))
if unit2 == "ft":
print ((float(distance) * 5280))
if unit2 == "m":
print ((float(distance) * 1609.34))
if unit1 == "km":
if unit2 == "km":
print (distance + " " + unit2)
if unit2 == "mi":
print ((float(distance) / 1.60934))
if unit2 == "ft":
print ((float(distance) * 5280))
if unit2 == "m":
print ((float(distance) * 1609.34))
if unit1 == "ft":
if unit2 == "ft":
print (distance + " " + unit2)
if unit2 == "mi":
print ((float(distance) * 0.000189394))
if unit2 == "km":
print ((float(distance) * 0.0003048))
if unit2 == "m":
print ((float(distance) * 0.3048))
if unit1 == "m":
if unit2 == "m":
print (distance + " " + unit2)
if unit2 == "mi":
print ((float(distance) * 0.000621371))
if unit2 == "ft":
print ((float(distance) * 3.28084))
if unit2 == "km":
print ((float(distance) * 1000))
distance = ""
unit1 = ""
unit2 = ""
distance = input("Please enter a distance: ")
unit1 = input ("Please enter the unit of distance: ")
unit2 = input("Please enter the desired unit of conversion: ")
convert(distance, unit1, unit2)
| gpl-3.0 | -2,776,401,310,696,413,000 | 24.5 | 62 | 0.466829 | false |
spahan/unixdmoain | admin/janitor.py | 1 | 3452 | #!/usr/bin/env python2
# coding: utf-8
# THIS SOFTWARE IS LICENSED UNDER THE BSD LICENSE CONDITIONS.
# FOR LICENCE DETAILS SEE share/LICENSE.TXT
#
# (c) 2005-2009, Marco Hoehle <[email protected]>
# (c) 2010, Hanspeter Spalinger <[email protected]>
"""
housekeeping jobs, run this script as cronjob.
Do not forget to change KEYTAB to the location where
your janitor.keytab file is.
"""
from UniDomain import Classes
import re
def detect_bad_hosts(authen, db):
"""Searches for hosts which are missing from ldap or kerberos.
returns a array with problems."""
problems = []
krb_result = authen.list_hosts()
ldap_result = db.conn.search_s(config.ldapbase, ldap.SCOPE_SUBTREE, '(ObjectClass=udHost)', ['USID', 'FQDN', 'cn'])
ldap_hosts = set()
for id,atts in ldap_result:
# check primary attributes have single values. multiple ones indicate a unsuccessfull copy.
for at in atts:
if len(atts[at]) != 1:
problems.append( "Warning: Host %s has multiple %s Attributes!" % (id,at) )
if not id.startswith('cn=%s,' % atts['cn'][0]):
problems.append( "Warning: Host id and cn differ for %s!" % id )
if not atts['FQDN'][0].startswith('%s.' % atts['cn'][0]):
problems.append( "Warning: FQDN (%s) does not start with hostname (%s) for %s!" % (atts['FQDN'][0],atts['cn'][0],id) )
if not atts['FQDN'][0].endswith('.unibas.ch'):
problems.append( "Info: Host %s (%s) is not in domain unibas.ch." % (id, atts['FQDN'][0]) )
if not atts['USID'][0].startswith('host/%s@' % atts['FQDN'][0]):
problems.append( "Warning: Host USID (%s) and hostname (%s) different for %s!" % (atts['USID'][0], atts['cn'][0], id) )
if atts['FQDN'][0] in ldap_hosts:
problems.append( "ERROR!!: FQDN of %s (%s) is already taken by another host!" % (id, atts['FQDN'][0]) )
else:
ldap_hosts.add(atts['FQDN'][0])
krb_hosts = set()
for host in krb_result:
mo = re.match(r'host/([a-z0-9-.]*\.unibas\.ch)@UD.UNIBAS.CH', host)
if mo:
krb_hosts.add(mo.group(1))
else:
problems.append( "Warning: bad principal name for %s." % host )
for bad in krb_hosts-ldap_hosts:
problems.append( "Warning: host %s in kerberos but not in ldap!" % bad )
for bad in ldap_hosts-krb_hosts:
problems.append( "Warning: host %s in ldap but not in kerberos!" % bad )
return problems
def main():
config = Classes.Config(krb5keytab="/root/janitor/janitor.keytab",plugin_author='ldapdbadmin')
authen = Classes.Authen(config)
if not authen:
print "bad auth"
return
userid = authen.authenticate(user='janitor/admin')
if not userid: return
authen.kadmin()
author = Classes.Author(config)
db = author.authorize('janitor/admin')
config = Classes.Config(krb5keytab="/root/janitor/janitor.keytab",plugin_author='ldapdbadmin')
authen = Classes.Authen(config)
if not authen:
sys.exit(3)
userid = authen.authenticate()
if not userid:
sys.exit(4)
authen.kadmin()
author = Classes.Author(config)
if not author:
sys.exit(3)
db = author.authorize(userid.split('@')[0])
if not db:
sys.exit(4)
db.update_dnsSOA()
#FIXME: implement this.
#roger.search_expiredHosts()
if __name__ == "__main__":
main()
| bsd-3-clause | -8,016,550,323,606,674,000 | 38.215909 | 131 | 0.611997 | false |
DANA-Laboratory/CoolProp | Web/scripts/fluid_properties.Incompressibles.py | 3 | 6445 | from __future__ import print_function, division
import os.path
import CoolProp
import CoolProp.CoolProp
import subprocess
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg') #Force mpl to use a non-GUI backend
import matplotlib.pyplot as plt
web_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
#plots_path = os.path.join(web_dir,'fluid_properties','incompressibles_consistency')
plots_path = os.path.join(web_dir,'scripts','incompressibles_consistency')
checked = ["TVP1869", "T66"]
N = 50
p = 100e5
Pr = np.empty(N)
la = np.empty(N)
mu = np.empty(N)
cp = np.empty(N)
fig = plt.figure(tight_layout=True)
Pr_axis = fig.add_subplot(221)
la_axis = fig.add_subplot(222)
mu_axis = fig.add_subplot(223)
cp_axis = fig.add_subplot(224)
#Pr_axis = plt.subplot2grid((3,2), (0,0), rowspan=3)
#la_axis = plt.subplot2grid((3,2), (0,1))
#mu_axis = plt.subplot2grid((3,2), (1,1))
#cp_axis = plt.subplot2grid((3,2), (2,1))
Pr_axis.set_xlabel("Temperature $T$ / deg C")
Pr_axis.set_ylabel("Prandtl Number $Pr$")
#Pr_axis.set_ylim([0,10000])
#Pr_axis.set_yscale("log")
la_axis.set_xlabel("Temperature $T$ / deg C")
la_axis.set_ylabel("Thermal Conductivity $\lambda$ / W/m/K")
#la_axis.set_ylim([0,1])
mu_axis.set_xlabel("Temperature $T$ / deg C")
mu_axis.set_ylabel("Dynamic Viscosity $\mu$ / Pa s")
#mu_axis.set_ylim([0,1])
#mu_axis.set_yscale("log")
cp_axis.set_xlabel("Temperature $T$ / deg C")
cp_axis.set_ylabel("Isobaric Heat Capacity $c_p$ / J/kg/K")
#cp_axis.set_ylim([0,5000])
for fluid in CoolProp.__incompressibles_pure__ + CoolProp.__incompressibles_solution__:
#for fluid in CoolProp.__incompressibles_solution__:
#for fluid in CoolProp.__incompressibles_pure__:
skip_fluid = False
for ignored in ["example","iceea","icena","icepg"]:
if ignored in fluid.lower():
skip_fluid = True
if skip_fluid:
continue
state = CoolProp.AbstractState("INCOMP",fluid)
error = ""
for frac in [0.5,0.2,0.8,0.1,0.9]:
error = ""
try:
state.set_mass_fractions([frac])
state.update(CoolProp.PT_INPUTS,p,state.Tmax())
break
except Exception as e:
error = e.message
try:
state.set_volu_fractions([frac])
state.update(CoolProp.PT_INPUTS,p,state.Tmax())
break
except Exception as e:
error = e.message
try:
state.set_mole_fractions([frac])
state.update(CoolProp.PT_INPUTS,p,state.Tmax())
break
except Exception as e:
error = e.message
pass
Tmin = 0.0
try:
Tmin = state.keyed_output(CoolProp.iT_freeze)
except:
pass
Tmin = max(state.Tmin(), Tmin)+1
Tmax = state.Tmax()
T = np.linspace(Tmin,Tmax, N)
for i, Ti in enumerate(T):
state.update(CoolProp.PT_INPUTS, p, Ti)
Pr[i] = state.Prandtl()
la[i] = state.conductivity()
mu[i] = state.viscosity()
cp[i] = state.cpmass()
#print(np.min(Pr), np.max(Pr))
Pr_axis.plot(T-273.15,Pr)
la_axis.plot(T-273.15,la)
mu_axis.plot(T-273.15,mu)
cp_axis.plot(T-273.15,cp)
if np.max(Pr)>10000:
if fluid not in checked:
print("Very high Prandtl number for {0:s} of {1:f}".format(fluid,np.max(Pr)))
if np.min(Pr)<0.0:
if fluid not in checked:
print("Very low Prandtl number for {0:s} of {1:f}".format(fluid,np.min(Pr)))
if np.max(la)>0.8:
if fluid not in checked:
print("Very high thermal conductivity for {0:s} of {1:f}".format(fluid,np.max(la)))
if np.min(la)<0.3:
if fluid not in checked:
print("Very low thermal conductivity for {0:s} of {1:f}".format(fluid,np.min(la)))
if np.max(mu)>0.2:
if fluid not in checked:
print("Very high viscosity for {0:s} of {1:f}".format(fluid,np.max(mu)))
if np.min(mu)<1e-8:
if fluid not in checked:
print("Very low viscosity for {0:s} of {1:f}".format(fluid,np.min(mu)))
if np.max(cp)>5000:
if fluid not in checked:
print("Very high heat capacity for {0:s} of {1:f}".format(fluid,np.max(cp)))
if np.min(cp)<1000:
if fluid not in checked:
print("Very low heat capacity for {0:s} of {1:f}".format(fluid,np.min(cp)))
#for fluid in CoolProp.__fluids__:
for fluid in ["Water"]:
state = CoolProp.AbstractState("HEOS",fluid)
Tmin = max(state.Tmin(), Pr_axis.get_xlim()[0]+273.15)
Tmax = min(state.Tmax(), Pr_axis.get_xlim()[1]+273.15)
T = np.linspace(Tmin, Tmax, N)
for i, Ti in enumerate(T):
try:
state.update(CoolProp.QT_INPUTS, 0, Ti)
p = state.p() + 1e5
except:
p = state.p_critical() + 1e5
Pr[i] = np.nan
la[i] = np.nan
mu[i] = np.nan
cp[i] = np.nan
try:
state.update(CoolProp.PT_INPUTS, p, Ti)
try:
Pr[i] = state.Prandtl()
except Exception as e:
print(e.message)
try:
la[i] = state.conductivity()
except Exception as e:
print(e.message)
try:
mu[i] = state.viscosity()
except Exception as e:
print(e.message)
try:
cp[i] = state.cpmass()
except Exception as e:
print(e.message)
except:
pass
#print(np.min(Pr), np.max(Pr))
if np.sum(np.isnan(Pr)) == 0:
Pr_axis.plot(T-273.15,Pr,alpha=0.5,ls=":")
else:
#print("Error: Prandtl undefined for "+fluid)
pass
if np.sum(np.isnan(la)) == 0:
la_axis.plot(T-273.15,la,alpha=0.5,ls=":")
else:
#print("Error: Conductivuty undefined for "+fluid)
pass
if np.sum(np.isnan(mu)) == 0:
mu_axis.plot(T-273.15,mu,alpha=0.5,ls=":")
else:
#print("Error: Viscosity undefined for "+fluid)
pass
if np.sum(np.isnan(cp)) == 0:
cp_axis.plot(T-273.15,cp,alpha=0.5,ls=":")
else:
#print("Error: Heat capacity undefined for "+fluid)
pass
fig.tight_layout()
fig.savefig(plots_path+'.pdf')
#fig.savefig(plots_path+'.png')
sys.exit(0)
| mit | -5,283,005,536,496,454,000 | 32.051282 | 95 | 0.561365 | false |
Azure/azure-sdk-for-python | sdk/storage/azure-storage-blob/samples/blob_samples_copy_blob.py | 1 | 1906 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: blob_samples_copy_blob.py
DESCRIPTION:
This sample demos how to copy a blob from a URL.
USAGE: python blob_samples_copy_blob.py
Set the environment variables with your own values before running the sample.
1) AZURE_STORAGE_CONNECTION_STRING - the connection string to your storage account
"""
from __future__ import print_function
import os
import sys
import time
from azure.storage.blob import BlobServiceClient
def main():
try:
CONNECTION_STRING = os.environ['AZURE_STORAGE_CONNECTION_STRING']
except KeyError:
print("AZURE_STORAGE_CONNECTION_STRING must be set.")
sys.exit(1)
status = None
blob_service_client = BlobServiceClient.from_connection_string(CONNECTION_STRING)
source_blob = "https://www.gutenberg.org/files/59466/59466-0.txt"
copied_blob = blob_service_client.get_blob_client("mycontainer", '59466-0.txt')
# Copy started
copied_blob.start_copy_from_url(source_blob)
for i in range(10):
props = copied_blob.get_blob_properties()
status = props.copy.status
print("Copy status: " + status)
if status == "success":
# Copy finished
break
time.sleep(10)
if status != "success":
# if not finished after 100s, cancel the operation
props = copied_blob.get_blob_properties()
print(props.copy.status)
copy_id = props.copy.id
copied_blob.abort_copy(copy_id)
props = copied_blob.get_blob_properties()
print(props.copy.status)
if __name__ == "__main__":
main()
| mit | 8,189,060,452,475,976,000 | 32.438596 | 86 | 0.618048 | false |
davenovak/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/hard_dependency/gyptest-no-exported-hard-dependency.py | 350 | 1226 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that a hard_dependency that is not exported is not pulled in as a
dependency for a target if the target does not explicitly specify a dependency
and none of its dependencies export the hard_dependency.
"""
import TestGyp
test = TestGyp.TestGyp()
if test.format == 'dump_dependency_json':
test.skip_test('Skipping test; dependency JSON does not adjust ' \
'static libaries.\n')
test.run_gyp('hard_dependency.gyp', chdir='src')
chdir = 'relocate/src'
test.relocate('src', chdir)
test.build('hard_dependency.gyp', 'd', chdir=chdir)
# Because 'c' does not export a hard_dependency, only the target 'd' should
# be built. This is because the 'd' target does not need the generated headers
# in order to be compiled.
test.built_file_must_not_exist('a', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_not_exist('b', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_not_exist('c', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_exist('d', type=test.STATIC_LIB, chdir=chdir)
test.pass_test()
| gpl-3.0 | 7,539,759,156,189,733,000 | 33.055556 | 78 | 0.729201 | false |
danakj/chromium | third_party/logilab/logilab/common/interface.py | 137 | 2593 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Bases class for interfaces to provide 'light' interface handling.
TODO:
_ implements a check method which check that an object implements the
interface
_ Attribute objects
This module requires at least python 2.2
"""
__docformat__ = "restructuredtext en"
class Interface(object):
"""Base class for interfaces."""
def is_implemented_by(cls, instance):
return implements(instance, cls)
is_implemented_by = classmethod(is_implemented_by)
def implements(obj, interface):
"""Return true if the give object (maybe an instance or class) implements
the interface.
"""
kimplements = getattr(obj, '__implements__', ())
if not isinstance(kimplements, (list, tuple)):
kimplements = (kimplements,)
for implementedinterface in kimplements:
if issubclass(implementedinterface, interface):
return True
return False
def extend(klass, interface, _recurs=False):
"""Add interface to klass'__implements__ if not already implemented in.
If klass is subclassed, ensure subclasses __implements__ it as well.
NOTE: klass should be e new class.
"""
if not implements(klass, interface):
try:
kimplements = klass.__implements__
kimplementsklass = type(kimplements)
kimplements = list(kimplements)
except AttributeError:
kimplementsklass = tuple
kimplements = []
kimplements.append(interface)
klass.__implements__ = kimplementsklass(kimplements)
for subklass in klass.__subclasses__():
extend(subklass, interface, _recurs=True)
elif _recurs:
for subklass in klass.__subclasses__():
extend(subklass, interface, _recurs=True)
| bsd-3-clause | 4,087,415,851,489,420,300 | 35.521127 | 81 | 0.691091 | false |
kingvuplus/BH-SH4 | lib/python/Components/Sources/List.py | 39 | 2840 | from Source import Source
from Components.Element import cached
class List(Source, object):
"""The datasource of a listbox. Currently, the format depends on the used converter. So
if you put a simple string list in here, you need to use a StringList converter, if you are
using a "multi content list styled"-list, you need to use the StaticMultiList converter, and
setup the "fonts".
This has been done so another converter could convert the list to a different format, for example
to generate HTML."""
def __init__(self, list = [ ], enableWrapAround = False, item_height = 25, fonts = [ ]):
Source.__init__(self)
self.__list = list
self.onSelectionChanged = [ ]
self.item_height = item_height
self.fonts = fonts
self.disable_callbacks = False
self.enableWrapAround = enableWrapAround
self.__style = "default" # style might be an optional string which can be used to define different visualisations in the skin
def setList(self, list):
self.__list = list
self.changed((self.CHANGED_ALL,))
list = property(lambda self: self.__list, setList)
def entry_changed(self, index):
if not self.disable_callbacks:
self.downstream_elements.entry_changed(index)
def modifyEntry(self, index, data):
self.__list[index] = data
self.entry_changed(index)
def count(self):
return len(self.__list)
def selectionChanged(self, index):
if self.disable_callbacks:
return
# update all non-master targets
for x in self.downstream_elements:
if x is not self.master:
x.index = index
for x in self.onSelectionChanged:
x()
@cached
def getCurrent(self):
return self.master is not None and self.master.current
current = property(getCurrent)
def setIndex(self, index):
if self.master is not None:
self.master.index = index
self.selectionChanged(index)
@cached
def getIndex(self):
if self.master is not None:
return self.master.index
else:
return None
setCurrentIndex = setIndex
index = property(getIndex, setIndex)
def selectNext(self):
if self.getIndex() + 1 >= self.count():
if self.enableWrapAround:
self.index = 0
else:
self.index += 1
self.setIndex(self.index)
def selectPrevious(self):
if self.getIndex() - 1 < 0:
if self.enableWrapAround:
self.index = self.count() - 1
else:
self.index -= 1
self.setIndex(self.index)
@cached
def getStyle(self):
return self.__style
def setStyle(self, style):
if self.__style != style:
self.__style = style
self.changed((self.CHANGED_SPECIFIC, "style"))
style = property(getStyle, setStyle)
def updateList(self, list):
"""Changes the list without changing the selection or emitting changed Events"""
assert len(list) == len(self.__list)
old_index = self.index
self.disable_callbacks = True
self.list = list
self.index = old_index
self.disable_callbacks = False
| gpl-2.0 | 2,690,742,583,095,458,300 | 25.542056 | 127 | 0.710563 | false |
SzTk/Get-Mid-Point | get_mid_point/geocoding.py | 1 | 1561 | #coding: UTF-8
import sys
import traceback
from pygmapslib import PyGMaps, PyGMapsError
__all__ = ['GeocodingError', 'Geocoding', 'request']
class GeocodingError(Exception):
def __init__(self, error_status, params):
self.error_status = error_status
self.params = params
def __str__(self):
return self.error_status + '\n' + str(self.params)
def __unicode__(self):
return unicode(self.__str__())
class Geocoding(object):
def __init__(self, data):
self.data = data
def __unicode__(self):
addresses = ''
for result in self.data:
addresses = addresses + result['formatted_address'] + '\n'
return addresses
if sys.version_info[0] >= 3: # Python 3
def __str__(self):
return self.__unicode__()
else: # Python 2
def __str__(self):
return self.__unicode__().encode('utf8')
def request(address, sensor='false', gmaps = None):
query_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
params = {
'address': address,
'sensor' : sensor
}
try:
if gmaps is None:
gmap_result = PyGMaps().get_data(query_url, params)
else:
gmap_result = gmaps.get_data(query_url, params)
except PyGMapsError as e:
print traceback.format_exc()
raise GeocodingError('HTTP STATUS ERROR', params)
if gmap_result['status'] != 'OK':
raise GeocodingError(gmap_result['status'], params)
return Geocoding(gmap_result['results'])
| lgpl-3.0 | -1,562,113,674,655,537,700 | 25.457627 | 70 | 0.586803 | false |
MakeHer/edx-platform | cms/djangoapps/contentstore/management/commands/tests/test_git_export.py | 66 | 7362 | """
Unittests for exporting to git via management command.
"""
import copy
import os
import shutil
import StringIO
import subprocess
import unittest
from uuid import uuid4
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test.utils import override_settings
from contentstore.tests.utils import CourseTestCase
import contentstore.git_export_utils as git_export_utils
from contentstore.git_export_utils import GitExportError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
FEATURES_WITH_EXPORT_GIT = settings.FEATURES.copy()
FEATURES_WITH_EXPORT_GIT['ENABLE_EXPORT_GIT'] = True
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
@override_settings(FEATURES=FEATURES_WITH_EXPORT_GIT)
class TestGitExport(CourseTestCase):
"""
Excercise the git_export django management command with various inputs.
"""
def setUp(self):
"""
Create/reinitialize bare repo and folders needed
"""
super(TestGitExport, self).setUp()
if not os.path.isdir(git_export_utils.GIT_REPO_EXPORT_DIR):
os.mkdir(git_export_utils.GIT_REPO_EXPORT_DIR)
self.addCleanup(shutil.rmtree, git_export_utils.GIT_REPO_EXPORT_DIR)
self.bare_repo_dir = '{0}/data/test_bare.git'.format(
os.path.abspath(settings.TEST_ROOT))
if not os.path.isdir(self.bare_repo_dir):
os.mkdir(self.bare_repo_dir)
self.addCleanup(shutil.rmtree, self.bare_repo_dir)
subprocess.check_output(['git', '--bare', 'init'],
cwd=self.bare_repo_dir)
def test_command(self):
"""
Test that the command interface works. Ignore stderr for clean
test output.
"""
with self.assertRaisesRegexp(CommandError, 'This script requires.*'):
call_command('git_export', 'blah', 'blah', 'blah', stderr=StringIO.StringIO())
with self.assertRaisesRegexp(CommandError, 'This script requires.*'):
call_command('git_export', stderr=StringIO.StringIO())
# Send bad url to get course not exported
with self.assertRaisesRegexp(CommandError, unicode(GitExportError.URL_BAD)):
call_command('git_export', 'foo/bar/baz', 'silly', stderr=StringIO.StringIO())
# Send bad course_id to get course not exported
with self.assertRaisesRegexp(CommandError, unicode(GitExportError.BAD_COURSE)):
call_command('git_export', 'foo/bar:baz', 'silly', stderr=StringIO.StringIO())
def test_error_output(self):
"""
Verify that error output is actually resolved as the correct string
"""
with self.assertRaisesRegexp(CommandError, unicode(GitExportError.BAD_COURSE)):
call_command(
'git_export', 'foo/bar:baz', 'silly'
)
with self.assertRaisesRegexp(CommandError, unicode(GitExportError.URL_BAD)):
call_command(
'git_export', 'foo/bar/baz', 'silly'
)
def test_bad_git_url(self):
"""
Test several bad URLs for validation
"""
course_key = SlashSeparatedCourseKey('org', 'course', 'run')
with self.assertRaisesRegexp(GitExportError, unicode(GitExportError.URL_BAD)):
git_export_utils.export_to_git(course_key, 'Sillyness')
with self.assertRaisesRegexp(GitExportError, unicode(GitExportError.URL_BAD)):
git_export_utils.export_to_git(course_key, 'example.com:edx/notreal')
with self.assertRaisesRegexp(GitExportError,
unicode(GitExportError.URL_NO_AUTH)):
git_export_utils.export_to_git(course_key, 'http://blah')
def test_bad_git_repos(self):
"""
Test invalid git repos
"""
test_repo_path = '{}/test_repo'.format(git_export_utils.GIT_REPO_EXPORT_DIR)
self.assertFalse(os.path.isdir(test_repo_path))
course_key = SlashSeparatedCourseKey('foo', 'blah', '100-')
# Test bad clones
with self.assertRaisesRegexp(GitExportError,
unicode(GitExportError.CANNOT_PULL)):
git_export_utils.export_to_git(
course_key,
'https://user:[email protected]/test_repo.git')
self.assertFalse(os.path.isdir(test_repo_path))
# Setup good repo with bad course to test xml export
with self.assertRaisesRegexp(GitExportError,
unicode(GitExportError.XML_EXPORT_FAIL)):
git_export_utils.export_to_git(
course_key,
'file://{0}'.format(self.bare_repo_dir))
# Test bad git remote after successful clone
with self.assertRaisesRegexp(GitExportError,
unicode(GitExportError.CANNOT_PULL)):
git_export_utils.export_to_git(
course_key,
'https://user:[email protected]/r.git')
@unittest.skipIf(os.environ.get('GIT_CONFIG') or
os.environ.get('GIT_AUTHOR_EMAIL') or
os.environ.get('GIT_AUTHOR_NAME') or
os.environ.get('GIT_COMMITTER_EMAIL') or
os.environ.get('GIT_COMMITTER_NAME'),
'Global git override set')
def test_git_ident(self):
"""
Test valid course with and without user specified.
Test skipped if git global config override environment variable GIT_CONFIG
is set.
"""
git_export_utils.export_to_git(
self.course.id,
'file://{0}'.format(self.bare_repo_dir),
'enigma'
)
expect_string = '{0}|{1}\n'.format(
git_export_utils.GIT_EXPORT_DEFAULT_IDENT['name'],
git_export_utils.GIT_EXPORT_DEFAULT_IDENT['email']
)
cwd = os.path.abspath(git_export_utils.GIT_REPO_EXPORT_DIR / 'test_bare')
git_log = subprocess.check_output(['git', 'log', '-1',
'--format=%an|%ae'], cwd=cwd)
self.assertEqual(expect_string, git_log)
# Make changes to course so there is something to commit
self.populate_course()
git_export_utils.export_to_git(
self.course.id,
'file://{0}'.format(self.bare_repo_dir),
self.user.username
)
expect_string = '{0}|{1}\n'.format(
self.user.username,
self.user.email,
)
git_log = subprocess.check_output(
['git', 'log', '-1', '--format=%an|%ae'], cwd=cwd)
self.assertEqual(expect_string, git_log)
def test_no_change(self):
"""
Test response if there are no changes
"""
git_export_utils.export_to_git(
self.course.id,
'file://{0}'.format(self.bare_repo_dir)
)
with self.assertRaisesRegexp(GitExportError,
unicode(GitExportError.CANNOT_COMMIT)):
git_export_utils.export_to_git(
self.course.id, 'file://{0}'.format(self.bare_repo_dir))
| agpl-3.0 | 1,096,287,261,778,393,600 | 38.794595 | 90 | 0.605949 | false |
Endika/c2c-rd-addons | c2c_account_payment_extension/wizard/__init__.py | 4 | 1478 | # -*- coding: utf-8 -*-
##############################################
#
# Swing Entwicklung betrieblicher Informationssysteme GmbH
# (<http://www.swing-system.com>)
# Copyright (C) ChriCar Beteiligungs- und Beratungs- GmbH
# all rights reserved
# 08-JUN-2012 (GK) created
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/> or
# write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1.17, USA.
#
###############################################
import account_payment_order
| agpl-3.0 | 6,203,083,940,809,277,000 | 43.787879 | 74 | 0.715156 | false |
trishnaguha/ansible | lib/ansible/modules/network/checkpoint/checkpoint_run_script.py | 30 | 3057 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: checkpoint_run_script
short_description: Run scripts on Checkpoint devices over Web Services API
description:
- Run scripts on Checkpoint devices.
All operations are performed over Web Services API.
version_added: "2.8"
author: "Ansible by Red Hat (@rcarrillocruz)"
options:
script_name:
description:
- Name of the script.
type: str
required: True
script:
description:
- Script body contents.
type: str
required: True
targets:
description:
- Targets the script should be run against. Can reference either name or UID.
type: list
required: True
"""
EXAMPLES = """
- name: Run script
checkpoint_run_script:
script_name: "List root"
script: ls -l /
targets:
- mycheckpointgw
"""
RETURN = """
checkpoint_run_script:
description: The checkpoint run script output.
returned: always.
type: list
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.checkpoint.checkpoint import publish, install_policy
import json
def run_script(module, connection):
script_name = module.params['script_name']
script = module.params['script']
targets = module.params['targets']
payload = {'script-name': script_name,
'script': script,
'targets': targets}
code, response = connection.send_request('/web_api/run-script', payload)
return code, response
def main():
argument_spec = dict(
script_name=dict(type='str', required=True),
script=dict(type='str', required=True),
targets=dict(type='list', required=True)
)
module = AnsibleModule(argument_spec=argument_spec)
connection = Connection(module._socket_path)
code, response = run_script(module, connection)
result = {'changed': True}
if code == 200:
result['checkpoint_run_script'] = response
else:
module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,493,008,422,648,412,000 | 26.053097 | 108 | 0.679751 | false |
hpcugent/vsc-ldap | lib/vsc/ldap/filters.py | 1 | 9999 | # -*- coding: latin-1 -*-
#
# Copyright 2009-2021 Ghent University
#
# This file is part of vsc-ldap,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# the Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/hpcugent/vsc-ldap
#
# vsc-ldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# vsc-ldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with vsc-ldap. If not, see <http://www.gnu.org/licenses/>.
#
"""
This module contains classes that allow constructing filter for an LDAP search in
a straightforward and intuitive manner.
@author: Andy Georges
@author: Stijn De Weirdt
Examples:
from vsc.ldap.filter import LdapFilter
>>> f = LdapFilter("x=4")
>>> g = LdapFilter("y=5")
>>> h = LdapFilter("z=3")
>>> print "f = %s" % f
f = (x=4)
>>> print "g = %s" % g
g = (y=5)
>>> print "h = %s" % h
h = (z=3)
>>> print "f & g -> %s" % (f & g)
f & g -> (&(x=4)(y=5))
>>> print "f -> %s" % f
f -> (x=4)
>>> print "g -> %s" % g
g -> (y=5)
>>> print "(f & g) | h -> %s" % ((f & g) | h)
(f & g) | h -> (|(&(x=4)(y=5))(z=3))
>>> print "f & g | h -> %s" % (f & g | h)
f & g | h -> (|(&(x=4)(y=5))(z=3))
>>> print "f & (g | h) -> %s" % (f & (g | h))
f & (g | h) -> (&(x=4)(|(y=5)(z=3)))
>>> print "f & g & h -> %s" % (f & g & h)
f & g & h -> (&(x=4)(y=5)(z=3))
>>> print "f & g & h | f -> %s" % (f & g & h | f)
f & g & h | f -> (|(&(x=4)(y=5)(z=3))(x=4))
>>> print "! f -> %s" % (f.negate())
! f -> (!(x=4) )
>>> print "fold & [f,g,h] -> %s" % LdapFilter.from_list(lambda x, y: x & y, [f, g, h])
fold & [f,g,h] -> (& (x=4) (y=5) (z=3))
>>> print "fold | [f,g,h] -> %s" % LdapFilter.from_list(lambda x, y: x | y, [f, g, h])
fold | [f,g,h] -> (| (x=4) (y=5) (z=3))
>>> print "fold & [f,g,h, g=1] -> %s" % LdapFilter.from_list(lambda x, y: x & y, [f, g, h, "g=1"])
fold & [f,g,h, g=1] -> (& (x=4) (y=5) (z=3) (g=1))
"""
import copy
from functools import reduce
from vsc.utils.timestamp import convert_timestamp
class LdapFilterError(Exception):
pass
class LdapFilter(object):
"""Representing an LDAP filter with operators between the filter values.
This is implemented as a tree, where the nodes are the operations, e.g.,
and, or, ... and the leaves are the values to finally concatenate to
a single filter when printing out the tree.
If you have multiple key value pairs that would wish to concatenate using a single
operator, for example to take the AND of them, the static from_list method will do
just that.
Note that for usage in a search, the resulting filter should be transformed into a
string, if the tools are not doing that automagically :)
Note that all operations are left associative.
"""
def __init__(self, value):
"""Initialises the filter with a single value to filter on."""
self.root = value
self.left = None
self.right = None
@staticmethod
def from_list(operator, ls):
"""Turns the given list into a filter using the given operator as the combinator.
@returns: LdapFilter instance representing the filter.
"""
if ls and len(ls) > 0:
if not isinstance(ls[0], LdapFilter):
initialiser = LdapFilter(ls[0])
else:
initialiser = ls[0]
return reduce(lambda x, y: operator(x, y), ls[1:], initialiser)
else:
raise LdapFilterError()
def __and__(self, value):
"""Return a new filter that is the logical and operator of this filter and the provided value.
It merges the currect filter with the value. The currect filter becomes the
left subtree of the new filter, the value becomes the right subtree.
@type value: This can be a string or an LdapFilter instance. In the former case,
first a new LdapFilter instance is made, such that all leaves are
actually LdapFilter instances.
@returns: the new filter instance
"""
if not isinstance(value, LdapFilter):
value = LdapFilter(value)
elif self == value:
value = copy.deepcopy(self)
return self._combine("&", value)
def __or__(self, value):
"""Return a new filter that is the logical or operator of this filter and the provided value.
It merges the currect filter with the value. The currect filter becomes the
left subtree of the new filter, the value becomes the right subtree.
@type value: This can be a string or an LdapFilter instance. In the former case,
first a new LdapFilter instance is made, such that all leaves are
actually LdapFilter instances.
@returns: the new filter instance
"""
if not isinstance(value, LdapFilter):
value = LdapFilter(value)
elif self == value:
value = copy.deepcopy(self)
return self._combine("|", value)
def negate(self):
"""Return a new filter that represents the negation of the current filter.
@returns: the new filter instance
"""
return self._combine("!", None)
def __str__(self):
"""Converts the LdapFilter instance to a string."""
return self._to_string()
def _to_string(self, previous_operator=None):
"""Pretty prints the filter, such that it can be used in the calls to the LDAP library."""
if self.left is None:
# single value, self.root should be a string not representing an operator
return "(%s)" % (self.root)
left_string = self.left._to_string(self.root)
if not self.right is None:
right_string = self.right._to_string(self.root)
else:
right_string = ""
if self.root == previous_operator:
return "%s%s" % (left_string, right_string)
else:
return "(%s%s%s)" % (self.root, left_string, right_string)
def _combine(self, operator, value=None):
"""Updates the tree with a new root, i.e., the given operator and
the value.
Thew original tree becomes the left child tree, the value the right.
@type value: Either an LdapFilter instance or None (default)
@returns: the updated instance.
"""
new = copy.deepcopy(self)
old = copy.copy(new)
new.root = operator
new.left = old
new.right = value
return new
class TimestampFilter(LdapFilter):
"""Represents a filter that aims to find entries that are compared to a given timestamp."""
def __init__(self, value, timestamp, comparator):
"""Initialise the filter.
@type value: string representing a filter
@type timestamp: string or datetime instance representing a timestamp. This value
will be converted to a format LDAP groks.
@type comparator: string representing a comparison operation, e.g., <=, >=
"""
super(TimestampFilter, self).__init__(value)
self.timestamp = convert_timestamp(timestamp)[1]
if comparator != '>=' and comparator != '<=':
raise LdapFilterError()
self.comparator = comparator
def __str__(self):
"""Converts the filter to an LDAP understood string."""
return "(& (modifyTimestamp%s%s) %s)" % (self.comparator,
self.timestamp,
super(TimestampFilter, self).__str__())
class NewerThanFilter(TimestampFilter):
"""Represents a filter that aims to find entries that are newer than the given timestamp."""
def __init__(self, value, timestamp):
"""Initialise the filter.
@type value: string representing a filter
@type timestamp: string or datetime instance representing a timestamp. This value
will be converted to a format LDAP groks.
"""
super(NewerThanFilter, self).__init__(value, timestamp, '>=')
class OlderThanFilter(TimestampFilter):
"""Represents a filter that aims to find entries that are older than the given timestamp."""
def __init__(self, value, timestamp):
"""Initialise the filter.
@type value: string representing a filter
@type timestamp: string or datetime instance representing a timestamp. This value
will be converted to a format LDAP groks.
"""
super(OlderThanFilter, self).__init__(value, timestamp, '<=')
class CnFilter(LdapFilter):
"""Representa a filter that matches a given common name."""
def __init__(self, cn):
super(CnFilter, self).__init__("cn=%s" % (cn))
class MemberFilter(LdapFilter):
"""Represents a filter that looks if a member is listed in the memberUid."""
def __init__(self, user_id):
super(MemberFilter, self).__init__("memberUid=%s" % (user_id))
class LoginFilter(LdapFilter):
"""Represents a filter that looks up a user based on his institute login name."""
def __init__(self, login):
super(LoginFilter, self).__init__("login=%s" % (login))
class InstituteFilter(LdapFilter):
"""Represents a filter that looks up a user based on his institute login name."""
def __init__(self, institute):
super(InstituteFilter, self).__init__("institute=%s" % (institute))
| gpl-2.0 | -6,010,921,779,081,361,000 | 34.710714 | 102 | 0.60466 | false |
drawks/ansible | lib/ansible/modules/storage/netapp/netapp_e_iscsi_target.py | 13 | 10627 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_iscsi_target
short_description: NetApp E-Series manage iSCSI target configuration
description:
- Configure the settings of an E-Series iSCSI target
version_added: '2.7'
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.eseries
options:
name:
description:
- The name/alias to assign to the iSCSI target.
- This alias is often used by the initiator software in order to make an iSCSI target easier to identify.
aliases:
- alias
ping:
description:
- Enable ICMP ping responses from the configured iSCSI ports.
type: bool
default: yes
chap_secret:
description:
- Enable Challenge-Handshake Authentication Protocol (CHAP), utilizing this value as the password.
- When this value is specified, we will always trigger an update (changed=True). We have no way of verifying
whether or not the password has changed.
- The chap secret may only use ascii characters with values between 32 and 126 decimal.
- The chap secret must be no less than 12 characters, but no more than 16 characters in length.
aliases:
- chap
- password
unnamed_discovery:
description:
- When an initiator initiates a discovery session to an initiator port, it is considered an unnamed
discovery session if the iSCSI target iqn is not specified in the request.
- This option may be disabled to increase security if desired.
type: bool
default: yes
log_path:
description:
- A local path (on the Ansible controller), to a file to be used for debug logging.
required: no
notes:
- Check mode is supported.
- Some of the settings are dependent on the settings applied to the iSCSI interfaces. These can be configured using
M(netapp_e_iscsi_interface).
- This module requires a Web Services API version of >= 1.3.
"""
EXAMPLES = """
- name: Enable ping responses and unnamed discovery sessions for all iSCSI ports
netapp_e_iscsi_target:
api_url: "https://localhost:8443/devmgr/v2"
api_username: admin
api_password: myPassword
ssid: "1"
validate_certs: no
name: myTarget
ping: yes
unnamed_discovery: yes
- name: Set the target alias and the CHAP secret
netapp_e_iscsi_target:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
name: myTarget
chap: password1234
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The iSCSI target settings have been updated.
alias:
description:
- The alias assigned to the iSCSI target.
returned: on success
sample: myArray
type: str
iqn:
description:
- The iqn (iSCSI Qualified Name), assigned to the iSCSI target.
returned: on success
sample: iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45
type: str
"""
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class IscsiTarget(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=False, aliases=['alias']),
ping=dict(type='bool', required=False, default=True),
chap_secret=dict(type='str', required=False, aliases=['chap', 'password'], no_log=True),
unnamed_discovery=dict(type='bool', required=False, default=True),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.name = args['name']
self.ping = args['ping']
self.chap_secret = args['chap_secret']
self.unnamed_discovery = args['unnamed_discovery']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
self.post_body = dict()
self.controllers = list()
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.chap_secret is not None:
if len(self.chap_secret) < 12 or len(self.chap_secret) > 16:
self.module.fail_json(msg="The provided CHAP secret is not valid, it must be between 12 and 16"
" characters in length.")
for c in self.chap_secret:
ordinal = ord(c)
if ordinal < 32 or ordinal > 126:
self.module.fail_json(msg="The provided CHAP secret is not valid, it may only utilize ascii"
" characters with decimal values between 32 and 126.")
@property
def target(self):
"""Provide information on the iSCSI Target configuration
Sample:
{
'alias': 'myCustomName',
'ping': True,
'unnamed_discovery': True,
'chap': False,
'iqn': 'iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45',
}
"""
target = dict()
try:
(rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/storagePoolBundle/target'
% self.ssid, headers=HEADERS, **self.creds)
# This likely isn't an iSCSI-enabled system
if not data:
self.module.fail_json(
msg="This storage-system doesn't appear to have iSCSI interfaces. Array Id [%s]." % (self.ssid))
data = data[0]
chap = any(
[auth for auth in data['configuredAuthMethods']['authMethodData'] if auth['authMethod'] == 'chap'])
target.update(dict(alias=data['alias']['iscsiAlias'],
iqn=data['nodeName']['iscsiNodeName'],
chap=chap))
(rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/sa/iscsiEntityData'
% self.ssid, headers=HEADERS, **self.creds)
data = data[0]
target.update(dict(ping=data['icmpPingResponseEnabled'],
unnamed_discovery=data['unnamedDiscoverySessionsEnabled']))
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve the iSCSI target information. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return target
def apply_iscsi_settings(self):
"""Update the iSCSI target alias and CHAP settings"""
update = False
target = self.target
body = dict()
if self.name is not None and self.name != target['alias']:
update = True
body['alias'] = self.name
# If the CHAP secret was provided, we trigger an update.
if self.chap_secret is not None:
update = True
body.update(dict(enableChapAuthentication=True,
chapSecret=self.chap_secret))
# If no secret was provided, then we disable chap
elif target['chap']:
update = True
body.update(dict(enableChapAuthentication=False))
if update and not self.check_mode:
try:
request(self.url + 'storage-systems/%s/iscsi/target-settings' % self.ssid, method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def apply_target_changes(self):
update = False
target = self.target
body = dict()
if self.ping != target['ping']:
update = True
body['icmpPingResponseEnabled'] = self.ping
if self.unnamed_discovery != target['unnamed_discovery']:
update = True
body['unnamedDiscoverySessionsEnabled'] = self.unnamed_discovery
self._logger.info(pformat(body))
if update and not self.check_mode:
try:
request(self.url + 'storage-systems/%s/iscsi/entity' % self.ssid, method='POST',
data=json.dumps(body), timeout=60, headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.apply_iscsi_settings()
update = self.apply_target_changes() or update
target = self.target
data = dict((key, target[key]) for key in target if key in ['iqn', 'alias'])
self.module.exit_json(msg="The interface settings have been updated.", changed=update, **data)
def __call__(self, *args, **kwargs):
self.update()
def main():
iface = IscsiTarget()
iface()
if __name__ == '__main__':
main()
| gpl-3.0 | 6,369,055,131,591,457,000 | 35.269625 | 120 | 0.585866 | false |
PaulWay/spacewalk | backend/satellite_tools/xmlDiskSource.py | 2 | 9003 | #
# Abstraction for an XML importer with a disk base
#
# Copyright (c) 2008--2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import gzip
from spacewalk.common.fileutils import createPath
from spacewalk.common.rhnLib import hash_object_id
class MissingXmlDiskSourceFileError(Exception):
pass
class MissingXmlDiskSourceDirError(Exception):
pass
class DiskSource:
subdir = None
# Allow for compressed files by default
allow_compressed_files = 1
def __init__(self, mountPoint):
self.mountPoint = mountPoint
# Returns a data stream
def load(self):
# Returns a stream
filename = self._getFile()
return self._loadFile(filename)
def _getFile(self, create=0):
# Virtual
# pylint: disable=W0613,R0201
return None
def _loadFile(self, filename):
# Look for a gzip file first
if self.allow_compressed_files:
if filename[-3:] == '.gz' and os.path.exists(filename):
return gzip.open(filename, "rb")
if os.path.exists(filename + '.gz'):
return gzip.open(filename + ".gz", "rb")
if os.path.exists(filename):
return open(filename, "r")
raise MissingXmlDiskSourceFileError("unable to process file %s" % filename)
def _getDir(self, create=0):
dirname = "%s/%s" % (self.mountPoint, self.subdir)
if not create:
return dirname
if not os.path.exists(dirname):
createPath(dirname)
if not os.path.isdir(dirname):
raise MissingXmlDiskSourceDirError("%s is not a directory" % dirname)
return dirname
class ArchesDiskSource(DiskSource):
subdir = 'arches'
filename = 'arches.xml'
def _getFile(self, create=0):
dirname = self._getDir(create)
if create and not os.path.isdir(dirname):
createPath(dirname)
return os.path.join(dirname, self.filename)
class ArchesExtraDiskSource(ArchesDiskSource):
filename = "arches-extra.xml"
class ProductnamesDiskSource(DiskSource):
subdir = 'product_names'
def _getFile(self, create=0):
dirname = self._getDir(create)
if create and not os.path.isdir(dirname):
createPath(dirname)
return "%s/product_names.xml" % dirname
class ChannelFamilyDiskSource(DiskSource):
subdir = 'channel_families'
def _getFile(self, create=0):
dirname = self._getDir(create)
if create and not os.path.isdir(dirname):
createPath(dirname)
return "%s/channel_families.xml" % dirname
class OrgsDiskSource(DiskSource):
subdir = 'orgs'
def _getFile(self, create=0):
dirname = self._getDir(create)
if create and not os.path.isdir(dirname):
createPath(dirname)
return "%s/orgs.xml" % dirname
class ChannelDiskSource(DiskSource):
subdir = 'channels'
def __init__(self, mountPoint):
DiskSource.__init__(self, mountPoint)
self.channel = None
def setChannel(self, channel):
self.channel = channel
def list(self):
# Lists the available channels
dirname = self._getDir(create=0)
if not os.path.isdir(dirname):
# No channels available
return []
return os.listdir(dirname)
def _getFile(self, create=0):
dirname = "%s/%s" % (self._getDir(create), self.channel)
if create and not os.path.isdir(dirname):
createPath(dirname)
return os.path.join(dirname, self._file_name())
@staticmethod
def _file_name():
return "channel.xml"
class ChannelCompsDiskSource(ChannelDiskSource):
@staticmethod
def _file_name():
return "comps.xml"
class ShortPackageDiskSource(DiskSource):
subdir = "packages_short"
def __init__(self, mountPoint):
DiskSource.__init__(self, mountPoint)
# Package ID
self.id = None
self._file_suffix = ".xml"
def setID(self, pid):
self.id = pid
# limited dict behaviour
def has_key(self, pid):
# Save the old id
old_id = self.id
self.id = pid
f = self._getFile()
# Restore the old id
self.id = old_id
if os.path.exists(f + '.gz') or os.path.exists(f):
return 1
return 0
def _getFile(self, create=0):
dirname = "%s/%s" % (self._getDir(create), self._hashID())
# Create the directoru if we have to
if create and not os.path.exists(dirname):
createPath(dirname)
return "%s/%s%s" % (dirname, self.id, self._file_suffix)
def _hashID(self):
# Hashes the package name
return hash_object_id(self.id, 2)
class PackageDiskSource(ShortPackageDiskSource):
subdir = "packages"
class SourcePackageDiskSource(ShortPackageDiskSource):
subdir = "source_packages"
class ErrataDiskSource(ShortPackageDiskSource):
subdir = "errata"
def _hashID(self):
# Hashes the erratum name
return hash_object_id(self.id, 1)
class BlacklistsDiskSource(DiskSource):
subdir = "blacklists"
def _getFile(self, create=0):
dirname = self._getDir(create)
if create and not os.path.isdir(dirname):
createPath(dirname)
return "%s/blacklists.xml" % dirname
class BinaryRPMDiskSource(ShortPackageDiskSource):
subdir = "rpms"
def __init__(self, mountPoint):
ShortPackageDiskSource.__init__(self, mountPoint)
self._file_suffix = '.rpm'
class SourceRPMDiskSource(BinaryRPMDiskSource):
subdir = "srpms"
class KickstartDataDiskSource(DiskSource):
subdir = "kickstart_trees"
def __init__(self, mountPoint):
DiskSource.__init__(self, mountPoint)
self.id = None
def setID(self, ks_label):
self.id = ks_label
def _getFile(self, create=0):
dirname = self._getDir(create)
if create and not os.path.isdir(dirname):
createPath(dirname)
return os.path.join(dirname, self.id) + '.xml'
class KickstartFileDiskSource(KickstartDataDiskSource):
subdir = "kickstart_files"
allow_compressed_files = 0
def __init__(self, mountPoint):
KickstartDataDiskSource.__init__(self, mountPoint)
# the file's relative path
self.relative_path = None
def set_relative_path(self, relative_path):
self.relative_path = relative_path
def _getFile(self, create=0):
path = os.path.join(self._getDir(create), self.id,
self.relative_path)
dirname = os.path.dirname(path)
if create and not os.path.isdir(dirname):
createPath(dirname)
return path
class MetadataDiskSource:
def __init__(self, mountpoint):
self.mountpoint = mountpoint
@staticmethod
def is_disk_loader():
return True
def getArchesXmlStream(self):
return ArchesDiskSource(self.mountpoint).load()
def getArchesExtraXmlStream(self):
return ArchesExtraDiskSource(self.mountpoint).load()
def getChannelFamilyXmlStream(self):
return ChannelFamilyDiskSource(self.mountpoint).load()
def getOrgsXmlStream(self):
return OrgsDiskSource(self.mountpoint).load()
def getProductNamesXmlStream(self):
return ProductnamesDiskSource(self.mountpoint).load()
def getComps(self, label):
sourcer = ChannelCompsDiskSource(self.mountpoint)
sourcer.setChannel(label)
return sourcer.load()
def getChannelXmlStream(self):
sourcer = ChannelDiskSource(self.mountpoint)
channels = sourcer.list()
stream_list = []
for c in channels:
sourcer.setChannel(c)
stream_list.append(sourcer.load())
return stream_list
def getChannelShortPackagesXmlStream(self):
return ShortPackageDiskSource(self.mountpoint)
def getPackageXmlStream(self):
return PackageDiskSource(self.mountpoint)
def getSourcePackageXmlStream(self):
return SourcePackageDiskSource(self.mountpoint)
def getKickstartsXmlStream(self):
return KickstartDataDiskSource(self.mountpoint)
def getErrataXmlStream(self):
return ErrataDiskSource(self.mountpoint)
if __name__ == '__main__':
# TEST CODE
s = ChannelDiskSource("/tmp")
print s.list()
s.setChannel("redhat-linux-i386-7.2")
print s.load()
| gpl-2.0 | -3,413,651,539,463,968,000 | 26.53211 | 83 | 0.64223 | false |
Onager/plaso | tests/parsers/esedb_plugins/msie_webcache.py | 1 | 3763 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Internet Explorer WebCache database."""
import unittest
from plaso.lib import definitions
from plaso.parsers.esedb_plugins import msie_webcache
from tests.parsers.esedb_plugins import test_lib
class MsieWebCacheESEDBPluginTest(test_lib.ESEDBPluginTestCase):
"""Tests for the MSIE WebCache ESE database plugin."""
# pylint: disable=protected-access
def testConvertHeadersValues(self):
"""Tests the _ConvertHeadersValues function."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
binary_value = (
b'HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n'
b'X-Content-Type-Options: nosniff\r\nContent-Length: 2759\r\n'
b'X-XSS-Protection: 1; mode=block\r\n'
b'Alternate-Protocol: 80:quic\r\n\r\n')
expected_headers_value = (
'[HTTP/1.1 200 OK; Content-Type: image/png; '
'X-Content-Type-Options: nosniff; Content-Length: 2759; '
'X-XSS-Protection: 1; mode=block; '
'Alternate-Protocol: 80:quic]')
headers_value = plugin._ConvertHeadersValues(binary_value)
self.assertEqual(headers_value, expected_headers_value)
def testProcessOnDatabaseWithPartitionsTable(self):
"""Tests the Process function on database with a Partitions table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(['WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 1354)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'container_identifier': 1,
'data_type': 'msie:webcache:containers',
'directory': (
'C:\\Users\\test\\AppData\\Local\\Microsoft\\Windows\\'
'INetCache\\IE\\'),
'name': 'Content',
'set_identifier': 0,
'timestamp': '2014-05-12 07:30:25.486199',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS}
self.CheckEventValues(storage_writer, events[567], expected_event_values)
def testProcessOnDatabaseWithPartitionsExTable(self):
"""Tests the Process function on database with a PartitionsEx table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(
['PartitionsEx-WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 3)
self.assertEqual(storage_writer.number_of_events, 4014)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'access_count': 5,
'cache_identifier': 0,
'cached_file_size': 726,
'cached_filename': 'b83d57c0[1].svg',
'container_identifier': 14,
'data_type': 'msie:webcache:container',
'entry_identifier': 63,
'sync_count': 0,
'response_headers': (
'[HTTP/1.1 200; content-length: 726; content-type: image/svg+xml; '
'x-cache: TCP_HIT; x-msedge-ref: Ref A: 3CD5FCBC8EAD4E0A80FA41A62'
'FBC8CCC Ref B: PRAEDGE0910 Ref C: 2019-12-16T20:55:28Z; date: '
'Mon, 16 Dec 2019 20:55:28 GMT]'),
'timestamp': '2019-03-20 17:22:14.000000',
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'url': 'https://www.bing.com/rs/3R/kD/ic/878ca0cd/b83d57c0.svg'}
self.CheckEventValues(storage_writer, events[100], expected_event_values)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -909,776,396,206,203,600 | 37.793814 | 80 | 0.674728 | false |
jeffreylu9/django-cms | cms/test_utils/project/sampleapp/migrations/0002_auto_20141015_1057.py | 60 | 1264 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sampleapp', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='level',
),
migrations.RemoveField(
model_name='category',
name='lft',
),
migrations.RemoveField(
model_name='category',
name='rght',
),
migrations.RemoveField(
model_name='category',
name='tree_id',
),
migrations.AddField(
model_name='category',
name='depth',
field=models.PositiveIntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='category',
name='numchild',
field=models.PositiveIntegerField(default=0),
preserve_default=True,
),
migrations.AddField(
model_name='category',
name='path',
field=models.CharField(default='0001', unique=True, max_length=255),
preserve_default=False,
),
]
| bsd-3-clause | 898,873,369,542,191,000 | 25.333333 | 80 | 0.522152 | false |
vrsys/avango | examples/stereo_example/main.py | 1 | 3087 | import avango
import avango.script
from avango.script import field_has_changed
import avango.gua
from examples_common.GuaVE import GuaVE
STEREO_MODE = avango.gua.StereoMode.ANAGLYPH_RED_CYAN
# STEREO_MODE = avango.gua.StereoMode.ANAGLYPH_RED_GREEN
# STEREO_MODE = avango.gua.StereoMode.SIDE_BY_SIDE
# STEREO_MODE = avango.gua.StereoMode.CHECKERBOARD
# STEREO_MODE = avango.gua.StereoMode.NVIDIA_3D_VISION
# STEREO_MODE = avango.gua.StereoMode.QUAD_BUFFERED
class TimedRotate(avango.script.Script):
TimeIn = avango.SFFloat()
MatrixOut = avango.gua.SFMatrix4()
@field_has_changed(TimeIn)
def update(self):
self.MatrixOut.value = avango.gua.make_rot_mat(self.TimeIn.value*2.0, 0.0, 1.0, 0.0)
def start():
# setup scenegraph
graph = avango.gua.nodes.SceneGraph(Name = "scenegraph")
loader = avango.gua.nodes.TriMeshLoader()
monkey = loader.create_geometry_from_file("monkey", "data/objects/monkey.obj", avango.gua.LoaderFlags.DEFAULTS)
light = avango.gua.nodes.LightNode(
Type=avango.gua.LightType.POINT,
Name = "light",
Color = avango.gua.Color(1.0, 1.0, 1.0),
Brightness = 10
)
light.Transform.value = avango.gua.make_trans_mat(1, 1, 2) * avango.gua.make_scale_mat(15, 15, 15)
# setup viewing
width = 1024
height = 768
eye_size = avango.gua.Vec2ui(width, height)
window_size = avango.gua.Vec2ui(width, height)
left_pos = avango.gua.Vec2ui(0, 0)
right_pos = avango.gua.Vec2ui(0, 0)
if STEREO_MODE == avango.gua.StereoMode.SIDE_BY_SIDE:
right_pos.x = width + 1
window_size.x *= 2
#window = avango.gua.nodes.GlfwWindow(Size = window_size,
window = avango.gua.nodes.Window(Size = window_size,
LeftPosition = left_pos,
LeftResolution = eye_size,
RightPosition = right_pos,
RightResolution = eye_size,
StereoMode = STEREO_MODE)
avango.gua.register_window("window", window)
cam = avango.gua.nodes.CameraNode(
Name = "cam",
LeftScreenPath = "/screen",
RightScreenPath = "/screen",
SceneGraph = "scenegraph",
Resolution = eye_size,
EyeDistance = 0.06,
EnableStereo = True,
OutputWindowName = "window",
Transform = avango.gua.make_trans_mat(0.0, 0.0, 0.5)
# NearClip =
)
screen = avango.gua.nodes.ScreenNode(Name = "screen", Width = 0.5, Height = 0.5 * 0.3 / 0.4)
screen.Transform.value = avango.gua.make_trans_mat(0.0, 0.0, 2.5)
screen.Children.value = [cam]
graph.Root.value.Children.value = [monkey, light, screen]
#setup viewer
viewer = avango.gua.nodes.Viewer()
viewer.SceneGraphs.value = [graph]
viewer.Windows.value = [window]
viewer.DesiredFPS.value = 500.0
monkey_updater = TimedRotate()
timer = avango.nodes.TimeSensor()
monkey_updater.TimeIn.connect_from(timer.Time)
monkey.Transform.connect_from(monkey_updater.MatrixOut)
guaVE = GuaVE()
guaVE.start(locals(), globals())
viewer.run()
if __name__ == '__main__':
start()
| lgpl-3.0 | -4,250,012,594,367,639,600 | 29.87 | 113 | 0.656301 | false |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.4/django/contrib/gis/db/backends/oracle/operations.py | 52 | 12903 | """
This module contains the spatial lookup types, and the `get_geo_where_clause`
routine for Oracle Spatial.
Please note that WKT support is broken on the XE version, and thus
this backend will not work on such platforms. Specifically, XE lacks
support for an internal JVM, and Java libraries are required to use
the WKT constructors.
"""
import re
from decimal import Decimal
from itertools import izip
from django.db.backends.oracle.base import DatabaseOperations
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.oracle.adapter import OracleSpatialAdapter
from django.contrib.gis.db.backends.util import SpatialFunction
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
class SDOOperation(SpatialFunction):
"Base class for SDO* Oracle operations."
sql_template = "%(function)s(%(geo_col)s, %(geometry)s) %(operator)s '%(result)s'"
def __init__(self, func, **kwargs):
kwargs.setdefault('operator', '=')
kwargs.setdefault('result', 'TRUE')
super(SDOOperation, self).__init__(func, **kwargs)
class SDODistance(SpatialFunction):
"Class for Distance queries."
sql_template = ('%(function)s(%(geo_col)s, %(geometry)s, %(tolerance)s) '
'%(operator)s %(result)s')
dist_func = 'SDO_GEOM.SDO_DISTANCE'
def __init__(self, op, tolerance=0.05):
super(SDODistance, self).__init__(self.dist_func,
tolerance=tolerance,
operator=op, result='%s')
class SDODWithin(SpatialFunction):
dwithin_func = 'SDO_WITHIN_DISTANCE'
sql_template = "%(function)s(%(geo_col)s, %(geometry)s, %%s) = 'TRUE'"
def __init__(self):
super(SDODWithin, self).__init__(self.dwithin_func)
class SDOGeomRelate(SpatialFunction):
"Class for using SDO_GEOM.RELATE."
relate_func = 'SDO_GEOM.RELATE'
sql_template = ("%(function)s(%(geo_col)s, '%(mask)s', %(geometry)s, "
"%(tolerance)s) %(operator)s '%(mask)s'")
def __init__(self, mask, tolerance=0.05):
# SDO_GEOM.RELATE(...) has a peculiar argument order: column, mask, geom, tolerance.
# Moreover, the runction result is the mask (e.g., 'DISJOINT' instead of 'TRUE').
super(SDOGeomRelate, self).__init__(self.relate_func, operator='=',
mask=mask, tolerance=tolerance)
class SDORelate(SpatialFunction):
"Class for using SDO_RELATE."
masks = 'TOUCH|OVERLAPBDYDISJOINT|OVERLAPBDYINTERSECT|EQUAL|INSIDE|COVEREDBY|CONTAINS|COVERS|ANYINTERACT|ON'
mask_regex = re.compile(r'^(%s)(\+(%s))*$' % (masks, masks), re.I)
sql_template = "%(function)s(%(geo_col)s, %(geometry)s, 'mask=%(mask)s') = 'TRUE'"
relate_func = 'SDO_RELATE'
def __init__(self, mask):
if not self.mask_regex.match(mask):
raise ValueError('Invalid %s mask: "%s"' % (self.relate_func, mask))
super(SDORelate, self).__init__(self.relate_func, mask=mask)
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float, int, long)
class OracleOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = "django.contrib.gis.db.backends.oracle.compiler"
name = 'oracle'
oracle = True
valid_aggregates = dict([(a, None) for a in ('Union', 'Extent')])
Adapter = OracleSpatialAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'SDO_GEOM.SDO_AREA'
gml= 'SDO_UTIL.TO_GMLGEOMETRY'
centroid = 'SDO_GEOM.SDO_CENTROID'
difference = 'SDO_GEOM.SDO_DIFFERENCE'
distance = 'SDO_GEOM.SDO_DISTANCE'
extent= 'SDO_AGGR_MBR'
intersection= 'SDO_GEOM.SDO_INTERSECTION'
length = 'SDO_GEOM.SDO_LENGTH'
num_geom = 'SDO_UTIL.GETNUMELEM'
num_points = 'SDO_UTIL.GETNUMVERTICES'
perimeter = length
point_on_surface = 'SDO_GEOM.SDO_POINTONSURFACE'
reverse = 'SDO_UTIL.REVERSE_LINESTRING'
sym_difference = 'SDO_GEOM.SDO_XOR'
transform = 'SDO_CS.TRANSFORM'
union = 'SDO_GEOM.SDO_UNION'
unionagg = 'SDO_AGGR_UNION'
# We want to get SDO Geometries as WKT because it is much easier to
# instantiate GEOS proxies from WKT than SDO_GEOMETRY(...) strings.
# However, this adversely affects performance (i.e., Java is called
# to convert to WKT on every query). If someone wishes to write a
# SDO_GEOMETRY(...) parser in Python, let me know =)
select = 'SDO_UTIL.TO_WKTGEOMETRY(%s)'
distance_functions = {
'distance_gt' : (SDODistance('>'), dtypes),
'distance_gte' : (SDODistance('>='), dtypes),
'distance_lt' : (SDODistance('<'), dtypes),
'distance_lte' : (SDODistance('<='), dtypes),
'dwithin' : (SDODWithin(), dtypes),
}
geometry_functions = {
'contains' : SDOOperation('SDO_CONTAINS'),
'coveredby' : SDOOperation('SDO_COVEREDBY'),
'covers' : SDOOperation('SDO_COVERS'),
'disjoint' : SDOGeomRelate('DISJOINT'),
'intersects' : SDOOperation('SDO_OVERLAPBDYINTERSECT'), # TODO: Is this really the same as ST_Intersects()?
'equals' : SDOOperation('SDO_EQUAL'),
'exact' : SDOOperation('SDO_EQUAL'),
'overlaps' : SDOOperation('SDO_OVERLAPS'),
'same_as' : SDOOperation('SDO_EQUAL'),
'relate' : (SDORelate, basestring), # Oracle uses a different syntax, e.g., 'mask=inside+touch'
'touches' : SDOOperation('SDO_TOUCH'),
'within' : SDOOperation('SDO_INSIDE'),
}
geometry_functions.update(distance_functions)
gis_terms = ['isnull']
gis_terms += geometry_functions.keys()
gis_terms = dict([(term, None) for term in gis_terms])
truncate_params = {'relate' : None}
def convert_extent(self, clob):
if clob:
# Generally, Oracle returns a polygon for the extent -- however,
# it can return a single point if there's only one Point in the
# table.
ext_geom = Geometry(clob.read())
gtype = str(ext_geom.geom_type)
if gtype == 'Polygon':
# Construct the 4-tuple from the coordinates in the polygon.
shell = ext_geom.shell
ll, ur = shell[0][:2], shell[2][:2]
elif gtype == 'Point':
ll = ext_geom.coords[:2]
ur = ll
else:
raise Exception('Unexpected geometry type returned for extent: %s' % gtype)
xmin, ymin = ll
xmax, ymax = ur
return (xmin, ymin, xmax, ymax)
else:
return None
def convert_geom(self, clob, geo_field):
if clob:
return Geometry(clob.read(), geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns the geometry database type for Oracle. Unlike other spatial
backends, no stored procedure is necessary and it's the same for all
geometry types.
"""
return 'MDSYS.SDO_GEOMETRY'
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters given the value and the lookup type.
On Oracle, geometry columns with a geodetic coordinate system behave
implicitly like a geography column, and thus meters will be used as
the distance parameter on them.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
# dwithin lookups on oracle require a special string parameter
# that starts with "distance=".
if lookup_type == 'dwithin':
dist_param = 'distance=%s' % dist_param
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
SDO_CS.TRANSFORM() function call.
"""
if value is None:
return 'NULL'
def transform_value(val, srid):
return val.srid != srid
if hasattr(value, 'expression'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitue in
# the column name instead.
return placeholder % '%s.%s' % tuple(map(self.quote_name, value.cols[value.expression]))
else:
if transform_value(value, f.srid):
return '%s(SDO_GEOMETRY(%%s, %s), %s)' % (self.transform, value.srid, f.srid)
else:
return 'SDO_GEOMETRY(%%s, %s)' % f.srid
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"Returns the SQL WHERE clause for use in Oracle spatial SQL construction."
alias, col, db_type = lvalue
# Getting the quoted table name as `geo_col`.
geo_col = '%s.%s' % (qn(alias), qn(col))
# See if a Oracle Geometry function matches the lookup type next
lookup_info = self.geometry_functions.get(lookup_type, False)
if lookup_info:
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# 'dwithin' lookup types.
if isinstance(lookup_info, tuple):
# First element of tuple is lookup type, second element is the type
# of the expected argument (e.g., str, float)
sdo_op, arg_type = lookup_info
geom = value[0]
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, tuple):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
if len(value) != 2:
raise ValueError('2-element tuple required for %s lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
if lookup_type == 'relate':
# The SDORelate class handles construction for these queries,
# and verifies the mask argument.
return sdo_op(value[1]).as_sql(geo_col, self.get_geom_placeholder(field, geom))
else:
# Otherwise, just call the `as_sql` method on the SDOOperation instance.
return sdo_op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
else:
# Lookup info is a SDOOperation instance, whose `as_sql` method returns
# the SQL necessary for the geometry function call. For example:
# SDO_CONTAINS("geoapp_country"."poly", SDO_GEOMTRY('POINT(5 23)', 4326)) = 'TRUE'
return lookup_info.as_sql(geo_col, self.get_geom_placeholder(field, value))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__.lower()
if agg_name == 'union' : agg_name += 'agg'
if agg.is_extent:
sql_template = '%(function)s(%(field)s)'
else:
sql_template = '%(function)s(SDOAGGRTYPE(%(field)s,%(tolerance)s))'
sql_function = getattr(self, agg_name)
return self.select % sql_template, sql_function
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.oracle.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.oracle.models import SpatialRefSys
return SpatialRefSys
def modify_insert_params(self, placeholders, params):
"""Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial
backend due to #10888
"""
# This code doesn't work for bulk insert cases.
assert len(placeholders) == 1
return [[param for pholder,param
in izip(placeholders[0], params[0]) if pholder != 'NULL'], ]
| apache-2.0 | 8,105,194,732,784,969,000 | 42.153846 | 115 | 0.603658 | false |
gromacs/copernicus | cpc/server/message/state.py | 2 | 4443 | # This file is part of Copernicus
# http://www.copernicus-computing.org/
#
# Copyright (C) 2011, Sander Pronk, Iman Pouya, Erik Lindahl, and others.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import logging
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from cpc.util.conf.server_conf import ServerConf, ServerIdNotFoundException
from cpc.util.version import __version__
from server_command import ServerCommand
from server_command import ServerCommandError
from cpc.server.state.user_handler import UserLevel, UserHandler, UserError
from cpc.dataflow.lib import getModulesList
log=logging.getLogger(__name__)
class SCStop(ServerCommand):
"""Stop server command"""
def __init__(self):
ServerCommand.__init__(self, "stop")
def run(self, serverState, request, response):
log.info("Stop request received")
serverState.doQuit()
response.add('Quitting.')
class SCSaveState(ServerCommand):
"""Save the server state"""
def __init__(self):
ServerCommand.__init__(self, "save-state")
def run(self, serverState, request, response):
serverState.write()
response.add('Saved state.')
log.info("Save-state request received")
class SCPingServer(ServerCommand):
"""Test server command"""
def __init__(self):
ServerCommand.__init__(self, "ping")
def run(self, serverState, request, response):
response.add("OK")
class SCServerInfo(ServerCommand):
def __init__(self):
ServerCommand.__init__(self, "server-info")
def run(self, serverState, request, response):
conf = ServerConf()
info = dict()
info['fqdn'] = conf.getFqdn()
info['version'] = __version__
try:
conf.getServerId()
info['serverId'] = conf.getServerId()
info['server_secure_port'] = conf.getServerSecurePort()
info['client_secure_port'] = conf.getClientSecurePort()
except ServerIdNotFoundException as e:
info['serverId'] = "ERROR: %s"%e.str
response.add("",info)
class SCListServerItems(ServerCommand):
"""queue/running/heartbeat list command """
def __init__(self):
ServerCommand.__init__(self, "list")
def run(self, serverState, request, response):
toList = request.getParam('type')
retstr = ""
if toList == "queue":
list = serverState.getCmdQueue().list()
queue = []
for cmd in list:
queue.append(cmd.toJSON())
running = []
cmds = serverState.getRunningCmdList().getCmdList()
for cmd in cmds:
running.append(cmd.toJSON())
retstr = {"queue": queue, "running": running}
elif toList == "running":
running = []
cmds = serverState.getRunningCmdList().getCmdList()
for cmd in cmds:
running.append(cmd.toJSON())
retstr = running
elif toList == "heartbeats":
heartbeats = serverState.getRunningCmdList().toJSON() #.list()
retstr = heartbeats
elif toList == "users":
retstr = UserHandler().getUsersAsList()
elif toList == "modules":
retstr = getModulesList()
else:
raise ServerCommandError("Unknown item to list: '%s'" % toList)
response.add(retstr)
log.info("Listed %s" % toList)
class SCReadConf(ServerCommand):
"""Update the configuration based on new settings."""
def __init__(self):
ServerCommand.__init__(self, "readconf")
def run(self, serverState, request, response):
conf = ServerConf()
conf.reread()
response.add("Reread configuration.")
log.info("Reread configuration done")
| gpl-2.0 | 7,820,634,948,040,680,000 | 30.288732 | 78 | 0.634031 | false |
elancom/storm | storm-core/src/dev/resources/tester_bolt.py | 16 | 1272 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http:# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This Python file uses the following encoding: utf-8
import storm
from random import random
class TesterBolt(storm.Bolt):
def initialize(self, conf, context):
storm.emit(['bolt initializing'])
def process(self, tup):
word = tup.values[0];
if (random() < 0.75):
storm.emit([word + 'lalala'], anchors=[tup])
storm.ack(tup)
else:
storm.log(word + ' randomly skipped!')
TesterBolt().run()
| apache-2.0 | 9,166,864,236,404,631,000 | 34.333333 | 74 | 0.703616 | false |
dneg/gaffer | python/GafferImageTest/SelectTest.py | 5 | 3240 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os
import IECore
import GafferImage
class SelectTest( unittest.TestCase ) :
rPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/redWithDataWindow.100x100.exr" )
gPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/greenWithDataWindow.100x100.exr" )
bPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/blueWithDataWindow.100x100.exr" )
# Do several tests to check the cache is working correctly:
def testHashPassThrough( self ) :
r1 = GafferImage.ImageReader()
r1["fileName"].setValue( self.rPath )
r2 = GafferImage.ImageReader()
r2["fileName"].setValue( self.gPath )
r3 = GafferImage.ImageReader()
r3["fileName"].setValue( self.bPath )
##########################################
# Test to see if the hash changes when we set the select plug.
##########################################
s = GafferImage.Select()
s["select"].setValue(1)
s["in"].setInput(r1["out"])
s["in1"].setInput(r2["out"])
s["in2"].setInput(r3["out"])
h1 = s["out"].image().hash()
h2 = r2["out"].image().hash()
self.assertEqual( h1, h2 )
s["select"].setValue(0)
h1 = s["out"].image().hash()
h2 = r1["out"].image().hash()
self.assertEqual( h1, h2 )
s["select"].setValue(2)
h1 = s["out"].image().hash()
h2 = r3["out"].image().hash()
self.assertEqual( h1, h2 )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 4,554,445,117,706,349,600 | 37.571429 | 102 | 0.652469 | false |
kphillisjr/burg | util/import_gcry.py | 6 | 17863 | #*
#* GRUB -- GRand Unified Bootloader
#* Copyright (C) 2009 Free Software Foundation, Inc.
#*
#* GRUB is free software: you can redistribute it and/or modify
#* it under the terms of the GNU General Public License as published by
#* the Free Software Foundation, either version 3 of the License, or
#* (at your option) any later version.
#*
#* GRUB is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
#*
import re
import sys
import os
import datetime
import codecs
if len (sys.argv) < 3:
print ("Usage: %s SOURCE DESTINATION" % sys.argv[0])
exit (0)
indir = sys.argv[1]
outdir = sys.argv[2]
basedir = os.path.join (outdir, "lib/libgcrypt-grub")
try:
os.makedirs (basedir)
except:
print ("WARNING: %s already exists" % basedir)
cipher_dir_in = os.path.join (indir, "cipher")
cipher_dir_out = os.path.join (basedir, "cipher")
try:
os.makedirs (cipher_dir_out)
except:
print ("WARNING: %s already exists" % cipher_dir_out)
cipher_files = os.listdir (cipher_dir_in)
conf = codecs.open (os.path.join ("grub-core", "Makefile.gcry.def"), "w", "utf-8")
conf.write ("AutoGen definitions Makefile.tpl;\n\n")
confutil = codecs.open ("Makefile.utilgcry.def", "w", "utf-8")
confutil.write ("AutoGen definitions Makefile.tpl;\n\n")
confutil.write ("library = {\n");
confutil.write (" name = libgrubgcry.a;\n");
confutil.write (" cflags = '$(CFLAGS_GCRY)';\n");
confutil.write (" cppflags = '$(CPPFLAGS_GCRY)';\n");
confutil.write (" extra_dist = grub-core/lib/libgcrypt-grub/cipher/ChangeLog;\n");
confutil.write ("\n");
chlog = ""
modules = []
# Strictly speaking CRC32/CRC24 work on bytes so this value should be 1
# But libgcrypt uses 64. Let's keep the value for compatibility. Since
# noone uses CRC24/CRC32 for HMAC this is no problem
mdblocksizes = {"_gcry_digest_spec_crc32" : 64,
"_gcry_digest_spec_crc32_rfc1510" : 64,
"_gcry_digest_spec_crc24_rfc2440" : 64,
"_gcry_digest_spec_md4" : 64,
"_gcry_digest_spec_md5" : 64,
"_gcry_digest_spec_rmd160" : 64,
"_gcry_digest_spec_sha1" : 64,
"_gcry_digest_spec_sha224" : 64,
"_gcry_digest_spec_sha256" : 64,
"_gcry_digest_spec_sha384" : 128,
"_gcry_digest_spec_sha512" : 128,
"_gcry_digest_spec_tiger" : 64,
"_gcry_digest_spec_whirlpool" : 64}
cryptolist = codecs.open (os.path.join (cipher_dir_out, "crypto.lst"), "w", "utf-8")
# rijndael is the only cipher using aliases. So no need for mangling, just
# hardcode it
cryptolist.write ("RIJNDAEL: gcry_rijndael\n");
cryptolist.write ("RIJNDAEL192: gcry_rijndael\n");
cryptolist.write ("RIJNDAEL256: gcry_rijndael\n");
cryptolist.write ("AES128: gcry_rijndael\n");
cryptolist.write ("AES-128: gcry_rijndael\n");
cryptolist.write ("AES-192: gcry_rijndael\n");
cryptolist.write ("AES-256: gcry_rijndael\n");
cryptolist.write ("ADLER32: adler32\n");
cryptolist.write ("CRC64: crc64\n");
for cipher_file in cipher_files:
infile = os.path.join (cipher_dir_in, cipher_file)
outfile = os.path.join (cipher_dir_out, cipher_file)
if cipher_file == "ChangeLog":
continue
chlognew = " * %s" % cipher_file
if re.match ("(Manifest|Makefile\.am|ac\.c|cipher\.c|hash-common\.c|hmac-tests\.c|md\.c|pubkey\.c)$", cipher_file):
chlog = "%s%s: Removed\n" % (chlog, chlognew)
continue
# Autogenerated files. Not even worth mentionning in ChangeLog
if re.match ("Makefile\.in$", cipher_file):
continue
nch = False
if re.match (".*\.[ch]$", cipher_file):
isc = re.match (".*\.c$", cipher_file)
f = codecs.open (infile, "r", "utf-8")
fw = codecs.open (outfile, "w", "utf-8")
fw.write ("/* This file was automatically imported with \n")
fw.write (" import_gcry.py. Please don't modify it */\n")
fw.write ("#include <grub/dl.h>\n")
if cipher_file == "camellia.h":
fw.write ("#include <grub/misc.h>\n")
fw.write ("void camellia_setup128(const unsigned char *key, grub_uint32_t *subkey);\n")
fw.write ("void camellia_setup192(const unsigned char *key, grub_uint32_t *subkey);\n")
fw.write ("void camellia_setup256(const unsigned char *key, grub_uint32_t *subkey);\n")
fw.write ("void camellia_encrypt128(const grub_uint32_t *subkey, grub_uint32_t *io);\n")
fw.write ("void camellia_encrypt192(const grub_uint32_t *subkey, grub_uint32_t *io);\n")
fw.write ("void camellia_encrypt256(const grub_uint32_t *subkey, grub_uint32_t *io);\n")
fw.write ("void camellia_decrypt128(const grub_uint32_t *subkey, grub_uint32_t *io);\n")
fw.write ("void camellia_decrypt192(const grub_uint32_t *subkey, grub_uint32_t *io);\n")
fw.write ("void camellia_decrypt256(const grub_uint32_t *subkey, grub_uint32_t *io);\n")
fw.write ("#define memcpy grub_memcpy\n")
# Whole libgcrypt is distributed under GPLv3+ or compatible
if isc:
fw.write ("GRUB_MOD_LICENSE (\"GPLv3+\");\n")
ciphernames = []
mdnames = []
hold = False
skip = False
skip2 = False
ismd = False
iscipher = False
iscryptostart = False
iscomma = False
isglue = False
skip_statement = False
if isc:
modname = cipher_file [0:len(cipher_file) - 2]
if re.match (".*-glue$", modname):
modname = modname.replace ("-glue", "")
isglue = True
modname = "gcry_%s" % modname
for line in f:
line = line
if skip_statement:
if not re.search (";", line) is None:
skip_statement = False
continue
if skip:
if line[0] == "}":
skip = False
continue
if skip2:
if not re.search (" *};", line) is None:
skip2 = False
continue
if iscryptostart:
s = re.search (" *\"([A-Z0-9_a-z]*)\"", line)
if not s is None:
sg = s.groups()[0]
cryptolist.write (("%s: %s\n") % (sg, modname))
iscryptostart = False
if ismd or iscipher:
if not re.search (" *};", line) is None:
if not iscomma:
fw.write (" ,\n")
fw.write ("#ifdef GRUB_UTIL\n");
fw.write (" .modname = \"%s\",\n" % modname);
fw.write ("#endif\n");
if ismd:
if not (mdname in mdblocksizes):
print ("ERROR: Unknown digest blocksize: %s\n"
% mdname)
exit (1)
fw.write (" .blocksize = %s\n"
% mdblocksizes [mdname])
ismd = False
iscipher = False
iscomma = not re.search (",$", line) is None
# Used only for selftests.
m = re.match ("(static byte|static unsigned char) (weak_keys_chksum)\[[0-9]*\] =", line)
if not m is None:
skip = True
fname = m.groups ()[1]
chmsg = "(%s): Removed." % fname
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s %s" % (chlognew, chmsg)
nch = True
continue
if hold:
hold = False
# We're optimising for size.
if not re.match ("(run_selftests|selftest|_gcry_aes_c.._..c|_gcry_[a-z0-9]*_hash_buffer|tripledes_set2keys|do_tripledes_set_extra_info|_gcry_rmd160_mixblock|serpent_test)", line) is None:
skip = True
if not re.match ("serpent_test", line) is None:
fw.write ("static const char *serpent_test (void) { return 0; }\n");
fname = re.match ("[a-zA-Z0-9_]*", line).group ()
chmsg = "(%s): Removed." % fname
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s %s" % (chlognew, chmsg)
nch = True
continue
else:
fw.write (holdline)
m = re.match ("# *include <(.*)>", line)
if not m is None:
chmsg = "Removed including of %s" % m.groups ()[0]
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s: %s" % (chlognew, chmsg)
nch = True
continue
m = re.match ("gcry_cipher_spec_t", line)
if isc and not m is None:
assert (not iscryptostart)
assert (not iscipher)
assert (not iscryptostart)
ciphername = line [len ("gcry_cipher_spec_t"):].strip ()
ciphername = re.match("[a-zA-Z0-9_]*",ciphername).group ()
ciphernames.append (ciphername)
iscipher = True
iscryptostart = True
m = re.match ("gcry_md_spec_t", line)
if isc and not m is None:
assert (not ismd)
assert (not iscipher)
assert (not iscryptostart)
mdname = line [len ("gcry_md_spec_t"):].strip ()
mdname = re.match("[a-zA-Z0-9_]*",mdname).group ()
mdnames.append (mdname)
ismd = True
iscryptostart = True
m = re.match ("static const char \*selftest.*;$", line)
if not m is None:
fname = line[len ("static const char \*"):]
fname = re.match ("[a-zA-Z0-9_]*", fname).group ()
chmsg = "(%s): Removed declaration." % fname
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s %s" % (chlognew, chmsg)
nch = True
continue
m = re.match ("(static const char( |)\*|static gpg_err_code_t|void|static int|static gcry_err_code_t)$", line)
if not m is None:
hold = True
holdline = line
continue
m = re.match ("static int tripledes_set2keys \(.*\);", line)
if not m is None:
continue
m = re.match ("static int tripledes_set2keys \(", line)
if not m is None:
skip_statement = True
continue
m = re.match ("cipher_extra_spec_t", line)
if isc and not m is None:
skip2 = True
fname = line[len ("cipher_extra_spec_t "):]
fname = re.match ("[a-zA-Z0-9_]*", fname).group ()
chmsg = "(%s): Removed." % fname
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s %s" % (chlognew, chmsg)
nch = True
continue
m = re.match ("md_extra_spec_t", line)
if isc and not m is None:
skip2 = True
fname = line[len ("md_extra_spec_t "):]
fname = re.match ("[a-zA-Z0-9_]*", fname).group ()
chmsg = "(%s): Removed." % fname
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s %s" % (chlognew, chmsg)
nch = True
continue
fw.write (line)
if len (ciphernames) > 0 or len (mdnames) > 0:
if isglue:
modfiles = "lib/libgcrypt-grub/cipher/%s lib/libgcrypt-grub/cipher/%s" \
% (cipher_file, cipher_file.replace ("-glue.c", ".c"))
else:
modfiles = "lib/libgcrypt-grub/cipher/%s" % cipher_file
modules.append (modname)
chmsg = "(GRUB_MOD_INIT(%s)): New function\n" % modname
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s%s" % (chlognew, chmsg)
nch = True
fw.write ("\n\nGRUB_MOD_INIT(%s)\n" % modname)
fw.write ("{\n")
for ciphername in ciphernames:
chmsg = "Register cipher %s" % ciphername
chlognew = "%s\n %s" % (chlognew, chmsg)
fw.write (" grub_cipher_register (&%s);\n" % ciphername)
for mdname in mdnames:
chmsg = "Register digest %s" % mdname
chlognew = "%s\n %s" % (chlognew, chmsg)
fw.write (" grub_md_register (&%s);\n" % mdname)
fw.write ("}")
chmsg = "(GRUB_MOD_FINI(%s)): New function\n" % modname
chlognew = "%s\n %s" % (chlognew, chmsg)
fw.write ("\n\nGRUB_MOD_FINI(%s)\n" % modname)
fw.write ("{\n")
for ciphername in ciphernames:
chmsg = "Unregister cipher %s" % ciphername
chlognew = "%s\n %s" % (chlognew, chmsg)
fw.write (" grub_cipher_unregister (&%s);\n" % ciphername)
for mdname in mdnames:
chmsg = "Unregister MD %s" % mdname
chlognew = "%s\n %s" % (chlognew, chmsg)
fw.write (" grub_md_unregister (&%s);\n" % mdname)
fw.write ("}\n")
conf.write ("module = {\n")
conf.write (" name = %s;\n" % modname)
for src in modfiles.split():
conf.write (" common = %s;\n" % src)
confutil.write (" common = grub-core/%s;\n" % src)
if modname == "gcry_rijndael" or modname == "gcry_md4" or modname == "gcry_md5" or modname == "gcry_rmd160" or modname == "gcry_sha1" or modname == "gcry_sha256" or modname == "gcry_sha512" or modname == "gcry_tiger":
# Alignment checked by hand
conf.write (" cflags = '$(CFLAGS_GCRY) -Wno-cast-align -Wno-strict-aliasing';\n");
else:
conf.write (" cflags = '$(CFLAGS_GCRY)';\n");
conf.write (" cppflags = '$(CPPFLAGS_GCRY)';\n");
conf.write ("};\n\n")
f.close ()
fw.close ()
if nch:
chlog = "%s%s\n" % (chlog, chlognew)
elif isc and cipher_file != "camellia.c":
print ("WARNING: C file isn't a module: %s" % cipher_file)
f.close ()
fw.close ()
os.remove (outfile)
chlog = "%s\n * %s: Removed" % (chlog, cipher_file)
continue
chlog = "%s%sSkipped unknown file\n" % (chlog, chlognew)
print ("WARNING: unknown file %s" % cipher_file)
cryptolist.close ()
chlog = "%s * crypto.lst: New file.\n" % chlog
outfile = os.path.join (cipher_dir_out, "types.h")
fw=codecs.open (outfile, "w", "utf-8")
fw.write ("#include <grub/types.h>\n")
fw.write ("#include <cipher_wrap.h>\n")
chlog = "%s * types.h: New file.\n" % chlog
fw.close ()
outfile = os.path.join (cipher_dir_out, "memory.h")
fw=codecs.open (outfile, "w", "utf-8")
fw.write ("#include <cipher_wrap.h>\n")
chlog = "%s * memory.h: New file.\n" % chlog
fw.close ()
outfile = os.path.join (cipher_dir_out, "cipher.h")
fw=codecs.open (outfile, "w", "utf-8")
fw.write ("#include <grub/crypto.h>\n")
fw.write ("#include <cipher_wrap.h>\n")
chlog = "%s * cipher.h: Likewise.\n" % chlog
fw.close ()
outfile = os.path.join (cipher_dir_out, "g10lib.h")
fw=codecs.open (outfile, "w", "utf-8")
fw.write ("#include <cipher_wrap.h>\n")
chlog = "%s * g10lib.h: Likewise.\n" % chlog
fw.close ()
infile = os.path.join (cipher_dir_in, "ChangeLog")
outfile = os.path.join (cipher_dir_out, "ChangeLog")
conf.close ();
initfile = codecs.open (os.path.join (cipher_dir_out, "init.c"), "w", "utf-8")
initfile.write ("#include <grub/crypto.h>\n")
for module in modules:
initfile.write ("extern void grub_%s_init (void);\n" % module)
initfile.write ("extern void grub_%s_fini (void);\n" % module)
initfile.write ("\n")
initfile.write ("void\n")
initfile.write ("grub_gcry_init_all (void)\n")
initfile.write ("{\n")
for module in modules:
initfile.write (" grub_%s_init ();\n" % module)
initfile.write ("}\n")
initfile.write ("\n")
initfile.write ("void\n")
initfile.write ("grub_gcry_fini_all (void)\n")
initfile.write ("{\n")
for module in modules:
initfile.write (" grub_%s_fini ();\n" % module)
initfile.write ("}\n")
initfile.close ()
confutil.write (" common = grub-core/lib/libgcrypt-grub/cipher/init.c;\n")
confutil.write ("};\n");
confutil.close ();
f=codecs.open (infile, "r", "utf-8")
fw=codecs.open (outfile, "w", "utf-8")
dt = datetime.date.today ()
fw.write ("%04d-%02d-%02d Automatic import tool\n" % \
(dt.year,dt.month, dt.day))
fw.write ("\n")
fw.write (" Imported ciphers to GRUB\n")
fw.write ("\n")
fw.write (chlog)
fw.write ("\n")
for line in f:
fw.write (line)
f.close ()
fw.close ()
| gpl-3.0 | 8,338,329,081,663,220,000 | 41.329384 | 229 | 0.516822 | false |
ptisserand/ansible | lib/ansible/utils/module_docs_fragments/vyos.py | 58 | 3118 | #
# (c) 2015, Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
provider:
description:
- B(Deprecated)
- "Starting with Ansible 2.5 we recommend using C(connection: network_cli)."
- For more information please see the L(Network Guide, ../network/getting_started/network_differences.html#multiple-communication-protocols).
- HORIZONTALLINE
- A dict object containing connection details.
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
notes:
- For more information on using Ansible to manage network devices see the :ref:`Ansible Network Guide <network_guide>`
"""
| gpl-3.0 | -2,810,341,904,596,070,000 | 42.915493 | 147 | 0.685696 | false |
zhinaonet/sqlmap-z | lib/controller/handler.py | 1 | 4543 | #!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.common import Backend
from lib.core.data import conf
from lib.core.data import kb
from lib.core.dicts import DBMS_DICT
from lib.core.enums import DBMS
from lib.core.settings import MSSQL_ALIASES
from lib.core.settings import MYSQL_ALIASES
from lib.core.settings import ORACLE_ALIASES
from lib.core.settings import PGSQL_ALIASES
from lib.core.settings import SQLITE_ALIASES
from lib.core.settings import ACCESS_ALIASES
from lib.core.settings import FIREBIRD_ALIASES
from lib.core.settings import MAXDB_ALIASES
from lib.core.settings import SYBASE_ALIASES
from lib.core.settings import DB2_ALIASES
from lib.core.settings import HSQLDB_ALIASES
from lib.core.settings import INFORMIX_ALIASES
from lib.utils.sqlalchemy import SQLAlchemy
from plugins.dbms.mssqlserver import MSSQLServerMap
from plugins.dbms.mssqlserver.connector import Connector as MSSQLServerConn
from plugins.dbms.mysql import MySQLMap
from plugins.dbms.mysql.connector import Connector as MySQLConn
from plugins.dbms.oracle import OracleMap
from plugins.dbms.oracle.connector import Connector as OracleConn
from plugins.dbms.postgresql import PostgreSQLMap
from plugins.dbms.postgresql.connector import Connector as PostgreSQLConn
from plugins.dbms.sqlite import SQLiteMap
from plugins.dbms.sqlite.connector import Connector as SQLiteConn
from plugins.dbms.access import AccessMap
from plugins.dbms.access.connector import Connector as AccessConn
from plugins.dbms.firebird import FirebirdMap
from plugins.dbms.firebird.connector import Connector as FirebirdConn
from plugins.dbms.maxdb import MaxDBMap
from plugins.dbms.maxdb.connector import Connector as MaxDBConn
from plugins.dbms.sybase import SybaseMap
from plugins.dbms.sybase.connector import Connector as SybaseConn
from plugins.dbms.db2 import DB2Map
from plugins.dbms.db2.connector import Connector as DB2Conn
from plugins.dbms.hsqldb import HSQLDBMap
from plugins.dbms.hsqldb.connector import Connector as HSQLDBConn
from plugins.dbms.informix import InformixMap
from plugins.dbms.informix.connector import Connector as InformixConn
def setHandler():
"""
Detect which is the target web application back-end database
management system.
"""
items = [
(DBMS.MYSQL, MYSQL_ALIASES, MySQLMap, MySQLConn),
(DBMS.ORACLE, ORACLE_ALIASES, OracleMap, OracleConn),
(DBMS.PGSQL, PGSQL_ALIASES, PostgreSQLMap, PostgreSQLConn),
(DBMS.MSSQL, MSSQL_ALIASES, MSSQLServerMap, MSSQLServerConn),
(DBMS.SQLITE, SQLITE_ALIASES, SQLiteMap, SQLiteConn),
(DBMS.ACCESS, ACCESS_ALIASES, AccessMap, AccessConn),
(DBMS.FIREBIRD, FIREBIRD_ALIASES, FirebirdMap, FirebirdConn),
(DBMS.MAXDB, MAXDB_ALIASES, MaxDBMap, MaxDBConn),
(DBMS.SYBASE, SYBASE_ALIASES, SybaseMap, SybaseConn),
(DBMS.DB2, DB2_ALIASES, DB2Map, DB2Conn),
(DBMS.HSQLDB, HSQLDB_ALIASES, HSQLDBMap, HSQLDBConn),
(DBMS.INFORMIX, INFORMIX_ALIASES, InformixMap, InformixConn),
]
_ = max(_ if (conf.get("dbms") or Backend.getIdentifiedDbms() or kb.heuristicExtendedDbms or "").lower() in _[1] else None for _ in items)
if _:
items.remove(_)
items.insert(0, _)
for dbms, aliases, Handler, Connector in items:
handler = Handler()
conf.dbmsConnector = Connector()
if conf.direct:
dialect = DBMS_DICT[dbms][3]
if dialect:
sqlalchemy = SQLAlchemy(dialect=dialect)
sqlalchemy.connect()
if sqlalchemy.connector:
conf.dbmsConnector = sqlalchemy
else:
try:
conf.dbmsConnector.connect()
except NameError:
pass
else:
conf.dbmsConnector.connect()
if handler.checkDbms():
if kb.resolutionDbms:
conf.dbmsHandler = max(_ for _ in items if _[0] == kb.resolutionDbms)[2]()
else:
conf.dbmsHandler = handler
conf.dbmsHandler._dbms = dbms
break
else:
conf.dbmsConnector = None
# At this point back-end DBMS is correctly fingerprinted, no need
# to enforce it anymore
Backend.flushForcedDbms()
| gpl-3.0 | -1,020,827,596,377,140,900 | 39.5625 | 142 | 0.692054 | false |
robotlinker/robotlinker_core | src/rosbridge_suite/rosbridge_server/src/tornado/test/simple_httpclient_test.py | 19 | 22731 | from __future__ import absolute_import, division, print_function, with_statement
import collections
from contextlib import closing
import errno
import gzip
import logging
import os
import re
import socket
import sys
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders
from tornado.ioloop import IOLoop
from tornado.log import gen_log, app_log
from tornado.netutil import Resolver, bind_sockets
from tornado.simple_httpclient import SimpleAsyncHTTPClient, _default_ca_certs
from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler
from tornado.test import httpclient_test
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog
from tornado.test.util import skipOnTravis, skipIfNoIPv6
from tornado.web import RequestHandler, Application, asynchronous, url, stream_request_body
class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
def get_http_client(self):
client = SimpleAsyncHTTPClient(io_loop=self.io_loop,
force_instance=True)
self.assertTrue(isinstance(client, SimpleAsyncHTTPClient))
return client
class TriggerHandler(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@asynchronous
def get(self):
logging.debug("queuing trigger")
self.queue.append(self.finish)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
class HangHandler(RequestHandler):
@asynchronous
def get(self):
pass
class ContentLengthHandler(RequestHandler):
def get(self):
self.set_header("Content-Length", self.get_argument("value"))
self.write("ok")
class HeadHandler(RequestHandler):
def head(self):
self.set_header("Content-Length", "7")
class OptionsHandler(RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("ok")
class NoContentHandler(RequestHandler):
def get(self):
if self.get_argument("error", None):
self.set_header("Content-Length", "5")
self.write("hello")
self.set_status(204)
class SeeOtherPostHandler(RequestHandler):
def post(self):
redirect_code = int(self.request.body)
assert redirect_code in (302, 303), "unexpected body %r" % self.request.body
self.set_header("Location", "/see_other_get")
self.set_status(redirect_code)
class SeeOtherGetHandler(RequestHandler):
def get(self):
if self.request.body:
raise Exception("unexpected body %r" % self.request.body)
self.write("ok")
class HostEchoHandler(RequestHandler):
def get(self):
self.write(self.request.headers["Host"])
class NoContentLengthHandler(RequestHandler):
@gen.coroutine
def get(self):
# Emulate the old HTTP/1.0 behavior of returning a body with no
# content-length. Tornado handles content-length at the framework
# level so we have to go around it.
stream = self.request.connection.stream
yield stream.write(b"HTTP/1.0 200 OK\r\n\r\n"
b"hello")
stream.close()
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
@stream_request_body
class RespondInPrepareHandler(RequestHandler):
def prepare(self):
self.set_status(403)
self.finish("forbidden")
class SimpleHTTPClientTestMixin(object):
def get_app(self):
# callable objects to finish pending /trigger requests
self.triggers = collections.deque()
return Application([
url("/trigger", TriggerHandler, dict(queue=self.triggers,
wake_callback=self.stop)),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
url("/options", OptionsHandler),
url("/no_content", NoContentHandler),
url("/see_other_post", SeeOtherPostHandler),
url("/see_other_get", SeeOtherGetHandler),
url("/host_echo", HostEchoHandler),
url("/no_content_length", NoContentLengthHandler),
url("/echo_post", EchoPostHandler),
url("/respond_in_prepare", RespondInPrepareHandler),
], gzip=True)
def test_singleton(self):
# Class "constructor" reuses objects on the same IOLoop
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is
SimpleAsyncHTTPClient(self.io_loop))
# unless force_instance is used
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(self.io_loop,
force_instance=True))
# different IOLoops use different objects
with closing(IOLoop()) as io_loop2:
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(io_loop2))
def test_connection_limit(self):
with closing(self.create_client(max_clients=2)) as client:
self.assertEqual(client.max_clients, 2)
seen = []
# Send 4 requests. Two can be sent immediately, while the others
# will be queued
for i in range(4):
client.fetch(self.get_url("/trigger"),
lambda response, i=i: (seen.append(i), self.stop()))
self.wait(condition=lambda: len(self.triggers) == 2)
self.assertEqual(len(client.queue), 2)
# Finish the first two requests and let the next two through
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: (len(self.triggers) == 2 and
len(seen) == 2))
self.assertEqual(set(seen), set([0, 1]))
self.assertEqual(len(client.queue), 0)
# Finish all the pending requests
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: len(seen) == 4)
self.assertEqual(set(seen), set([0, 1, 2, 3]))
self.assertEqual(len(self.triggers), 0)
def test_redirect_connection_limit(self):
# following redirects should not consume additional connections
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/countdown/3'), self.stop,
max_redirects=3)
response = self.wait()
response.rethrow()
def test_default_certificates_exist(self):
open(_default_ca_certs()).close()
def test_gzip(self):
# All the tests in this file should be using gzip, but this test
# ensures that it is in fact getting compressed.
# Setting Accept-Encoding manually bypasses the client's
# decompression so we can see the raw data.
response = self.fetch("/chunk", use_gzip=False,
headers={"Accept-Encoding": "gzip"})
self.assertEqual(response.headers["Content-Encoding"], "gzip")
self.assertNotEqual(response.body, b"asdfqwer")
# Our test data gets bigger when gzipped. Oops. :)
self.assertEqual(len(response.body), 34)
f = gzip.GzipFile(mode="r", fileobj=response.buffer)
self.assertEqual(f.read(), b"asdfqwer")
def test_max_redirects(self):
response = self.fetch("/countdown/5", max_redirects=3)
self.assertEqual(302, response.code)
# We requested 5, followed three redirects for 4, 3, 2, then the last
# unfollowed redirect is to 1.
self.assertTrue(response.request.url.endswith("/countdown/5"))
self.assertTrue(response.effective_url.endswith("/countdown/2"))
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
def test_header_reuse(self):
# Apps may reuse a headers object if they are only passing in constant
# headers like user-agent. The header object should not be modified.
headers = HTTPHeaders({'User-Agent': 'Foo'})
self.fetch("/hello", headers=headers)
self.assertEqual(list(headers.get_all()), [('User-Agent', 'Foo')])
def test_see_other_redirect(self):
for code in (302, 303):
response = self.fetch("/see_other_post", method="POST", body="%d" % code)
self.assertEqual(200, response.code)
self.assertTrue(response.request.url.endswith("/see_other_post"))
self.assertTrue(response.effective_url.endswith("/see_other_get"))
# request is the original request, is a POST still
self.assertEqual("POST", response.request.method)
@skipOnTravis
def test_request_timeout(self):
response = self.fetch('/trigger?wake=false', request_timeout=0.1)
self.assertEqual(response.code, 599)
self.assertTrue(0.099 < response.request_time < 0.15, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
# trigger the hanging request to let it clean up after itself
self.triggers.popleft()()
@skipIfNoIPv6
def test_ipv6(self):
try:
[sock] = bind_sockets(None, '::1', family=socket.AF_INET6)
port = sock.getsockname()[1]
self.http_server.add_socket(sock)
except socket.gaierror as e:
if e.args[0] == socket.EAI_ADDRFAMILY:
# python supports ipv6, but it's not configured on the network
# interface, so skip this test.
return
raise
url = '%s://[::1]:%d/hello' % (self.get_protocol(), port)
# ipv6 is currently enabled by default but can be disabled
self.http_client.fetch(url, self.stop, allow_ipv6=False)
response = self.wait()
self.assertEqual(response.code, 599)
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(response.body, b"Hello world!")
def xtest_multiple_content_length_accepted(self):
response = self.fetch("/content_length?value=2,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,%202,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,4")
self.assertEqual(response.code, 599)
response = self.fetch("/content_length?value=2,%202,3")
self.assertEqual(response.code, 599)
def test_head_request(self):
response = self.fetch("/head", method="HEAD")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "7")
self.assertFalse(response.body)
def test_options_request(self):
response = self.fetch("/options", method="OPTIONS")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "2")
self.assertEqual(response.headers["access-control-allow-origin"], "*")
self.assertEqual(response.body, b"ok")
def test_no_content(self):
response = self.fetch("/no_content")
self.assertEqual(response.code, 204)
# 204 status doesn't need a content-length, but tornado will
# add a zero content-length anyway.
#
# A test without a content-length header is included below
# in HTTP204NoContentTestCase.
self.assertEqual(response.headers["Content-length"], "0")
# 204 status with non-zero content length is malformed
with ExpectLog(gen_log, "Malformed HTTP message"):
response = self.fetch("/no_content?error=1")
self.assertEqual(response.code, 599)
def test_host_header(self):
host_re = re.compile(b"^localhost:[0-9]+$")
response = self.fetch("/host_echo")
self.assertTrue(host_re.match(response.body))
url = self.get_url("/host_echo").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertTrue(host_re.match(response.body), response.body)
def test_connection_refused(self):
server_socket, port = bind_unused_port()
server_socket.close()
with ExpectLog(gen_log, ".*", required=False):
self.http_client.fetch("http://localhost:%d/" % port, self.stop)
response = self.wait()
self.assertEqual(599, response.code)
if sys.platform != 'cygwin':
# cygwin returns EPERM instead of ECONNREFUSED here
contains_errno = str(errno.ECONNREFUSED) in str(response.error)
if not contains_errno and hasattr(errno, "WSAECONNREFUSED"):
contains_errno = str(errno.WSAECONNREFUSED) in str(response.error)
self.assertTrue(contains_errno, response.error)
# This is usually "Connection refused".
# On windows, strerror is broken and returns "Unknown error".
expected_message = os.strerror(errno.ECONNREFUSED)
self.assertTrue(expected_message in str(response.error),
response.error)
def test_queue_timeout(self):
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/trigger'), self.stop,
request_timeout=10)
# Wait for the trigger request to block, not complete.
self.wait()
client.fetch(self.get_url('/hello'), self.stop,
connect_timeout=0.1)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertTrue(response.request_time < 1, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
self.triggers.popleft()()
self.wait()
def test_no_content_length(self):
response = self.fetch("/no_content_length")
self.assertEquals(b"hello", response.body)
def sync_body_producer(self, write):
write(b'1234')
write(b'5678')
@gen.coroutine
def async_body_producer(self, write):
yield write(b'1234')
yield gen.Task(IOLoop.current().add_callback)
yield write(b'5678')
def test_sync_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_sync_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_100_continue(self):
response = self.fetch("/echo_post", method="POST",
body=b"1234",
expect_100_continue=True)
self.assertEqual(response.body, b"1234")
def test_100_continue_early_response(self):
def body_producer(write):
raise Exception("should not be called")
response = self.fetch("/respond_in_prepare", method="POST",
body_producer=body_producer,
expect_100_continue=True)
self.assertEqual(response.code, 403)
class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase):
def setUp(self):
super(SimpleHTTPClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
**kwargs)
class SimpleHTTPSClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPSTestCase):
def setUp(self):
super(SimpleHTTPSClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
defaults=dict(validate_cert=False),
**kwargs)
class CreateAsyncHTTPClientTestCase(AsyncTestCase):
def setUp(self):
super(CreateAsyncHTTPClientTestCase, self).setUp()
self.saved = AsyncHTTPClient._save_configuration()
def tearDown(self):
AsyncHTTPClient._restore_configuration(self.saved)
super(CreateAsyncHTTPClientTestCase, self).tearDown()
def test_max_clients(self):
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 10)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=11, force_instance=True)) as client:
self.assertEqual(client.max_clients, 11)
# Now configure max_clients statically and try overriding it
# with each way max_clients can be passed
AsyncHTTPClient.configure(SimpleAsyncHTTPClient, max_clients=12)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 12)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=13, force_instance=True)) as client:
self.assertEqual(client.max_clients, 13)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=14, force_instance=True)) as client:
self.assertEqual(client.max_clients, 14)
class HTTP100ContinueTestCase(AsyncHTTPTestCase):
def respond_100(self, request):
self.request = request
self.request.connection.stream.write(
b"HTTP/1.1 100 CONTINUE\r\n\r\n",
self.respond_200)
def respond_200(self):
self.request.connection.stream.write(
b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\nA",
self.request.connection.stream.close)
def get_app(self):
# Not a full Application, but works as an HTTPServer callback
return self.respond_100
def test_100_continue(self):
res = self.fetch('/')
self.assertEqual(res.body, b'A')
class HTTP204NoContentTestCase(AsyncHTTPTestCase):
def respond_204(self, request):
# A 204 response never has a body, even if doesn't have a content-length
# (which would otherwise mean read-until-close). Tornado always
# sends a content-length, so we simulate here a server that sends
# no content length and does not close the connection.
#
# Tests of a 204 response with a Content-Length header are included
# in SimpleHTTPClientTestMixin.
request.connection.stream.write(
b"HTTP/1.1 204 No content\r\n\r\n")
def get_app(self):
return self.respond_204
def test_204_no_content(self):
resp = self.fetch('/')
self.assertEqual(resp.code, 204)
self.assertEqual(resp.body, b'')
class HostnameMappingTestCase(AsyncHTTPTestCase):
def setUp(self):
super(HostnameMappingTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
hostname_mapping={
'www.example.com': '127.0.0.1',
('foo.example.com', 8000): ('127.0.0.1', self.get_http_port()),
})
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_hostname_mapping(self):
self.http_client.fetch(
'http://www.example.com:%d/hello' % self.get_http_port(), self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
def test_port_mapping(self):
self.http_client.fetch('http://foo.example.com:8000/hello', self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
class ResolveTimeoutTestCase(AsyncHTTPTestCase):
def setUp(self):
# Dummy Resolver subclass that never invokes its callback.
class BadResolver(Resolver):
def resolve(self, *args, **kwargs):
pass
super(ResolveTimeoutTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
resolver=BadResolver())
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_resolve_timeout(self):
response = self.fetch('/hello', connect_timeout=0.1)
self.assertEqual(response.code, 599)
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 100)
self.write("ok")
class LargeHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 1000)
self.write("ok")
return Application([('/small', SmallHeaders),
('/large', LargeHeaders)])
def get_http_client(self):
return SimpleAsyncHTTPClient(io_loop=self.io_loop, max_header_size=1024)
def test_small_headers(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'ok')
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read"):
response = self.fetch('/large')
self.assertEqual(response.code, 599)
| apache-2.0 | 1,399,773,377,362,358,500 | 38.463542 | 109 | 0.620826 | false |
abhiQmar/servo | tests/wpt/css-tests/tools/pytest/testing/test_capture.py | 171 | 32410 | # note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
from __future__ import with_statement
import pickle
import os
import sys
import _pytest._code
import py
import pytest
import contextlib
from _pytest import capture
from _pytest.capture import CaptureManager
from _pytest.main import EXIT_NOTESTSCOLLECTED
from py.builtin import print_
needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')")
if sys.version_info >= (3, 0):
def tobytes(obj):
if isinstance(obj, str):
obj = obj.encode('UTF-8')
assert isinstance(obj, bytes)
return obj
def totext(obj):
if isinstance(obj, bytes):
obj = str(obj, 'UTF-8')
assert isinstance(obj, str)
return obj
else:
def tobytes(obj):
if isinstance(obj, unicode):
obj = obj.encode('UTF-8')
assert isinstance(obj, str)
return obj
def totext(obj):
if isinstance(obj, str):
obj = unicode(obj, 'UTF-8')
assert isinstance(obj, unicode)
return obj
def oswritebytes(fd, obj):
os.write(fd, tobytes(obj))
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def StdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
class TestCaptureManager:
def test_getmethod_default_no_fd(self, monkeypatch):
from _pytest.capture import pytest_addoption
from _pytest.config import Parser
parser = Parser()
pytest_addoption(parser)
default = parser._groups[0].options[0].default
assert default == "fd" if hasattr(os, "dup") else "sys"
parser = Parser()
monkeypatch.delattr(os, 'dup', raising=False)
pytest_addoption(parser)
assert parser._groups[0].options[0].default == "sys"
@needsosdup
@pytest.mark.parametrize("method",
['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')])
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.init_capturings()
outerr = capman.suspendcapture()
assert outerr == ("", "")
outerr = capman.suspendcapture()
assert outerr == ("", "")
print ("hello")
out, err = capman.suspendcapture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resumecapture()
print ("hello")
out, err = capman.suspendcapture()
if method != "no":
assert out == "hello\n"
capman.reset_capturings()
finally:
capouter.stop_capturing()
@needsosdup
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.init_capturings()
pytest.raises(AssertionError, "capman.init_capturings()")
capman.reset_capturings()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ['fd', 'sys'])
def test_capturing_unicode(testdir, method):
if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2,2):
pytest.xfail("does not work on pypy < 2.2")
if sys.version_info >= (3, 0):
obj = "'b\u00f6y'"
else:
obj = "u'\u00f6y'"
testdir.makepyfile("""
# coding=utf8
# taken from issue 227 from nosetests
def test_unicode():
import sys
print (sys.stdout)
print (%s)
""" % obj)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines([
"*1 passed*"
])
@pytest.mark.parametrize("method", ['fd', 'sys'])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile("""
def test_unicode():
print ('b\\u00f6y')
""")
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines([
"*1 passed*"
])
def test_collect_capturing(testdir):
p = testdir.makepyfile("""
print ("collect %s failure" % 13)
import xyz42123
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*Captured stdout*",
"*collect 13 failure*",
])
class TestPerTestCapturing:
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile("""
def setup_module(mod):
print ("setup module")
def setup_function(function):
print ("setup " + function.__name__)
def test_func1():
print ("in func1")
assert 0
def test_func2():
print ("in func2")
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
])
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile("""
import sys
def setup_module(func):
print ("module-setup")
def setup_function(func):
print ("function-setup")
def test_func():
print ("in function")
assert 0
def teardown_function(func):
print ("in teardown")
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
])
def test_no_carry_over(self, testdir):
p = testdir.makepyfile("""
def test_func1():
print ("in func1")
def test_func2():
print ("in func2")
assert 0
""")
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile("""
def setup_function(function):
print ("setup func1")
def teardown_function(function):
print ("teardown func1")
assert 0
def test_func1():
print ("in func1")
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
'*teardown_function*',
'*Captured stdout*',
"setup func1*",
"in func1*",
"teardown func1*",
#"*1 fixture failure*"
])
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile("""
def teardown_module(mod):
print ("teardown module")
assert 0
def test_func():
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
])
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile("""
import sys
def test_capturing():
print (42)
sys.stderr.write(str(23))
def test_capturing_error():
print (1)
sys.stderr.write(str(2))
raise ValueError
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
"*test_capturing_outerr.py .F",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
])
class TestLoggingInteraction:
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile("""
def test_logging():
import logging
import pytest
stream = capture.TextIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
""")
result = testdir.runpytest_subprocess(p)
result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile("""
import logging
def setup_function(function):
logging.warn("hello1")
def test_logging():
logging.warn("hello2")
assert 0
def teardown_function(function):
logging.warn("hello3")
assert 0
""")
for optargs in (('--capture=sys',), ('--capture=fd',)):
print (optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines([
"*WARN*hello3", # errors show first!
"*WARN*hello1",
"*WARN*hello2",
])
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile("""
import logging
def setup_module(function):
logging.warn("hello1")
def test_logging():
logging.warn("hello2")
assert 0
def teardown_module(function):
logging.warn("hello3")
assert 0
""")
for optargs in (('--capture=sys',), ('--capture=fd',)):
print (optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines([
"*WARN*hello3", # errors come first
"*WARN*hello1",
"*WARN*hello2",
])
# verify proper termination
assert "closed" not in s
def test_logging_initialized_in_test(self, testdir):
p = testdir.makepyfile("""
import sys
def test_something():
# pytest does not import logging
assert 'logging' not in sys.modules
import logging
logging.basicConfig()
logging.warn("hello432")
assert 0
""")
result = testdir.runpytest_subprocess(
p, "--traceconfig",
"-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines([
"*hello432*",
])
assert 'operation on closed file' not in result.stderr.str()
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest("""
import logging
logging.basicConfig()
logging.warn("hello435")
""")
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stderr.fnmatch_lines([
"WARNING*hello435*",
])
assert 'operation on closed file' not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest("""
import logging
logging.basicConfig()
""")
# make sure that logging is still captured in tests
p = testdir.makepyfile("""
def test_hello():
import logging
logging.warn("hello433")
assert 0
""")
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines([
"WARNING*hello433*",
])
assert 'something' not in result.stderr.str()
assert 'operation on closed file' not in result.stderr.str()
class TestCaptureFixture:
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource("""
def test_hello(capsys):
print (42)
out, err = capsys.readouterr()
assert out.startswith("42")
""", *opt)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile("""
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR*setup*test_one*",
"*capsys*capfd*same*time*",
"*ERROR*setup*test_two*",
"*capsys*capfd*same*time*",
"*2 error*"])
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile("""
def test_hello(cap%s):
print ("xxx42xxx")
assert 0
""" % method)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"xxx42xxx",
])
@needsosdup
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource("""
def test_hello(capfd):
import os
os.write(1, "42".encode('ascii'))
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
""")
reprec.assertoutcome(passed=1)
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile("""
def test_hello(capsys, missingarg):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*test_partial_setup_failure*",
"*1 error*",
])
@needsosdup
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile("""
def test_hello(capfd):
import os
os.write(1, str(42).encode('ascii'))
raise KeyboardInterrupt()
""")
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines([
"*KeyboardInterrupt*"
])
assert result.ret == 2
@pytest.mark.issue14
def test_capture_and_logging(self, testdir):
p = testdir.makepyfile("""
import logging
def test_log(capsys):
logging.error('x')
""")
result = testdir.runpytest_subprocess(p)
assert 'closed' not in result.stderr.str()
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(_pytest._code.Source("""
def pytest_runtest_setup(item):
raise ValueError(42)
"""))
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, '--traceconfig')
result.stdout.fnmatch_lines([
"*ValueError(42)*",
"*1 error*"
])
def test_fdfuncarg_skips_on_no_osdup(testdir):
testdir.makepyfile("""
import os
if hasattr(os, 'dup'):
del os.dup
def test_hello(capfd):
pass
""")
result = testdir.runpytest_subprocess("--capture=no")
result.stdout.fnmatch_lines([
"*1 skipped*"
])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest("""
def pytest_runtest_setup():
print ("hello19")
""")
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
assert 'hello19' not in result.stdout.str()
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile("""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
""")
result = testdir.runpytest('--cap=fd')
result.stdout.fnmatch_lines('''
*def test_func*
*assert 0*
*Captured*
*1 failed*
''')
def test_capture_early_option_parsing(testdir):
testdir.makeconftest("""
def pytest_runtest_setup():
print ("hello19")
""")
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert 'hello19' in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
""")
result = testdir.runpytest('--assert=plain')
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occurr during readouterr"""
testdir.makepyfile(pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
""")
result = testdir.runpytest_subprocess(
"-p", "pytest_xyz", "--version", syspathinsert=True
)
result.stderr.fnmatch_lines([
"*in bad_snap",
" raise Exception('boom')",
"Exception: boom",
])
class TestTextIO:
def test_text(self):
f = capture.TextIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.TextIO()
if sys.version_info >= (3, 0):
f.write("\u00f6")
pytest.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))")
else:
f.write(unicode("\u00f6", 'UTF-8'))
f.write("hello") # bytes
s = f.getvalue()
f.close()
assert isinstance(s, unicode)
def test_bytes_io():
f = py.io.BytesIO()
f.write(tobytes("hello"))
pytest.raises(TypeError, "f.write(totext('hello'))")
s = f.getvalue()
assert s == tobytes("hello")
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert not f.isatty()
pytest.raises(IOError, f.read)
pytest.raises(IOError, f.readlines)
pytest.raises(IOError, iter, f)
pytest.raises(ValueError, f.fileno)
f.close() # just for completeness
@pytest.yield_fixture
def tmpfile(testdir):
f = testdir.makepyfile("").open('wb+')
yield f
if not f.closed:
f.close()
@needsosdup
def test_dupfile(tmpfile):
flist = []
for i in range(5):
nf = capture.safe_text_dupfile(tmpfile, "wb")
assert nf != tmpfile
assert nf.fileno() != tmpfile.fileno()
assert nf not in flist
print_(i, end="", file=nf)
flist.append(nf)
for i in range(5):
f = flist[i]
f.close()
tmpfile.seek(0)
s = tmpfile.read()
assert "01234" in repr(s)
tmpfile.close()
def test_dupfile_on_bytesio():
io = py.io.BytesIO()
f = capture.safe_text_dupfile(io, "wb")
f.write("hello")
assert io.getvalue() == b"hello"
def test_dupfile_on_textio():
io = py.io.TextIO()
f = capture.safe_text_dupfile(io, "wb")
f.write("hello")
assert io.getvalue() == "hello"
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = py.process.cmdexec("lsof -p %d" % pid)
except (py.process.cmdexec.Error, UnicodeDecodeError):
# about UnicodeDecodeError, see note on pytester
pytest.skip("could not run 'lsof'")
yield
out2 = py.process.cmdexec("lsof -p %d" % pid)
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture:
pytestmark = needsosdup
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = tobytes("hello")
os.write(fd, data)
s = cap.snap()
cap.done()
assert not s
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open('wb+') as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(ValueError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print_("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self, tmpfile):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == tobytes('')
def test_writeorg(self, tmpfile):
data1, data2 = tobytes("foo"), tobytes("bar")
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2)
scap = cap.snap()
cap.done()
assert scap == totext(data1)
with open(tmpfile.name, 'rb') as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self, tmpfile):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = tobytes("hello")
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, tobytes("world"))
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, tobytes("but now"))
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AttributeError, cap.suspend)
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture:
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print ("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print ("hx\xc4\x85\xc4\x87")
out, err = cap.readouterr()
assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8")
@pytest.mark.skipif('sys.version_info >= (3,)',
reason='text output different for bytes on python3')
def test_capturing_readouterr_decode_error_handling(self):
with self.getcapture() as cap:
# triggered a internal error in pytest
print('\xa6')
out, err = cap.readouterr()
assert out == py.builtin._totext('\ufffd\n', 'unicode-escape')
def test_reset_twice_error(self):
with self.getcapture() as cap:
print ("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.TextIO()
sys.stderr = capture.TextIO()
print ("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print ("cap1")
with self.getcapture() as cap2:
print ("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print ("XXX this test may well hang instead of crashing")
print ("XXX which indicates an error in the underlying capturing")
print ("XXX mechanisms")
with self.getcapture():
pytest.raises(IOError, "sys.stdin.read()")
class TestStdCaptureFD(TestStdCapture):
pytestmark = needsosdup
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile("""
import os
def test_x():
os.write(1, "hello\\n".encode("ascii"))
assert 0
""")
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("""
*test_x*
*assert 0*
*Captured stdout*
""")
def test_intermingling(self):
with self.getcapture() as cap:
oswritebytes(1, "1")
sys.stdout.write(str(2))
sys.stdout.flush()
oswritebytes(1, "3")
oswritebytes(2, "a")
sys.stderr.write("b")
sys.stderr.flush()
oswritebytes(2, "c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD:
pytestmark = needsosdup
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile("""
import os
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_,
Capture=capture.FDCapture)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
cap.stop_capturing()
""")
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()['passed'] == 3
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
@needsosdup
@pytest.mark.parametrize('use', [True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
if not use:
tmpfile = True
cap = StdCaptureFD(out=False, err=tmpfile)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
@needsosdup
def test_close_and_capture_again(testdir):
testdir.makepyfile("""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
""")
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("""
*test_capture_again*
*assert 0*
*stdout*
*hello*
""")
@pytest.mark.parametrize('method', ['SysCapture', 'FDCapture'])
def test_capturing_and_logging_fundamentals(testdir, method):
if method == "StdCaptureFD" and not hasattr(os, 'dup'):
pytest.skip("need os.dup")
# here we check a fundamental feature
p = testdir.makepyfile("""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(out=False, in_=False,
Capture=capture.%s)
cap.start_capturing()
logging.warn("hello1")
outerr = cap.readouterr()
print ("suspend, captured %%s" %%(outerr,))
logging.warn("hello2")
cap.pop_outerr_to_orig()
logging.warn("hello3")
outerr = cap.readouterr()
print ("suspend2, captured %%s" %% (outerr,))
""" % (method,))
result = testdir.runpython(p)
result.stdout.fnmatch_lines("""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
""")
result.stderr.fnmatch_lines("""
WARNING:root:hello2
""")
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile("""
import sys
def test_capattr():
assert sys.stdout.errors == "strict"
assert sys.stderr.errors == "strict"
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile("""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_pickling_and_unpickling_enocded_file():
# See https://bitbucket.org/pytest-dev/pytest/pull-request/194
# pickle.loads() raises infinite recursion if
# EncodedFile.__getattr__ is not implemented properly
ef = capture.EncodedFile(None, None)
ef_as_str = pickle.dumps(ef)
pickle.loads(ef_as_str)
| mpl-2.0 | -3,719,405,192,336,998,000 | 29.346442 | 79 | 0.541376 | false |
NumberZeroSoftware/PDFINVEST | pdfapp/migrations/0010_auto_20170225_0034.py | 1 | 2473 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-25 00:34
from __future__ import unicode_literals
from django.db import migrations, models
import pdfapp.validators
class Migration(migrations.Migration):
dependencies = [
('pdfapp', '0009_auto_20170223_2247'),
]
operations = [
migrations.AlterField(
model_name='program',
name='credits',
field=models.IntegerField(blank=True, null=True, validators=[pdfapp.validators.validate_credits]),
),
migrations.AlterField(
model_name='program',
name='evaluation_strategies',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='program',
name='laboratory_hours',
field=models.IntegerField(blank=True, null=True, validators=[pdfapp.validators.validate_positive_integer]),
),
migrations.AlterField(
model_name='program',
name='methodological_strategies',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='program',
name='objectives',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='program',
name='practice_hours',
field=models.IntegerField(blank=True, null=True, validators=[pdfapp.validators.validate_positive_integer]),
),
migrations.AlterField(
model_name='program',
name='recommended_sources',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='program',
name='requirements',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='program',
name='synoptic_content',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='program',
name='theory_hours',
field=models.IntegerField(blank=True, null=True, validators=[pdfapp.validators.validate_positive_integer]),
),
migrations.AlterField(
model_name='program',
name='validity_year',
field=models.IntegerField(validators=[pdfapp.validators.validate_program_years]),
),
]
| mit | -8,271,191,923,686,082,000 | 33.830986 | 119 | 0.585928 | false |
alfredhq/djlint | djlint/analyzers/base.py | 3 | 8412 | import ast
import os
from .context import Context
class BaseAnalyzer(object):
"""
Base code analyzer class. Takes dict `file path => ast node` as first
param and path to repository as second.
Subclass this class and implement `analyze_file` method if you want to
create new code analyzer.
"""
surround_by = 2
def __init__(self, code_dict, repo_path):
self._file_lines = None
self.code_dict = code_dict
self.repo_path = repo_path
def get_file_lines(self, filepath, start, stop):
"""
Yield code snippet from file `filepath` for line number `lineno`
as tuples `(<line number>, <importance>, <text>)` extending it by
`surround_by` lines up and down if possible.
If important part has blank lines at the bottom they will be removed.
"""
if self._file_lines is None:
with open(os.path.join(self.repo_path, filepath)) as f:
self._file_lines = f.readlines()
if stop is None:
lines = self._file_lines[start - 1:]
else:
lines = self._file_lines[start - 1:stop]
for i, line in enumerate(lines):
lines[i] = [start + i, True, line.rstrip()]
while lines and self.is_empty_line(lines[-1][-1]):
lines.pop()
if not lines:
return []
stop = lines[0][0]
start = max(1, stop - self.surround_by)
prefix_lines = []
for i, line in enumerate(self._file_lines[start - 1:stop - 1], start=start):
prefix_lines.append([i, False, line.rstrip()])
start = lines[-1][0] + 1
stop = start + self.surround_by
suffix_lines = []
for i, line in enumerate(self._file_lines[start - 1:stop - 1], start=start):
suffix_lines.append([i, False, line.rstrip()])
return prefix_lines + lines + suffix_lines
def is_empty_line(self, line):
return not line.split('#')[0].strip()
def clear_file_lines_cache(self):
self._file_lines = None
def analyze_file(self, filepath, code):
raise NotImplementedError
def analyze(self):
"""
Iterate over `code_dict` and yield all results from every file.
"""
for filepath, code in self.code_dict.items():
for result in self.analyze_file(filepath, code):
yield result
self.clear_file_lines_cache()
class CodeSnippet(list):
"""
Represents code snippet as list of tuples `(<line number>, <importance>,
<text>)`.
Use `add_line` method to add new lines to the snippet.
"""
def add_line(self, lineno, text, important=True):
"""
Add new line to the end of snippet.
"""
self.append((lineno, important, text))
class Result(object):
"""
Represents the result of code analysis.
"""
def __init__(self, description, path, line):
self.description = description
self.path = path
self.line = line
self.source = CodeSnippet()
self.solution = CodeSnippet()
class AttributeVisitor(ast.NodeVisitor):
"""
Process attribute node and build the name of the attribute if possible.
Currently only simple expressions are supported (like `foo.bar.baz`).
If it is not possible to get attribute name as string `is_usable` is
set to `True`.
After `visit()` method call `get_name()` method can be used to get
attribute name if `is_usable` == `True`.
"""
def __init__(self):
self.is_usable = True
self.name = []
def get_name(self):
"""
Get the name of the visited attribute.
"""
return '.'.join(self.name)
def visit_Attribute(self, node):
self.generic_visit(node)
self.name.append(node.attr)
def visit_Name(self, node):
self.name.append(node.id)
def visit_Load(self, node):
pass
def generic_visit(self, node):
# If attribute node consists not only from nodes of types `Attribute`
# and `Name` mark it as unusable.
if not isinstance(node, ast.Attribute):
self.is_usable = False
ast.NodeVisitor.generic_visit(self, node)
def set_lineno(meth):
def decorator(self, node):
self.push_lineno(node.lineno)
result = meth(self, node)
self.pop_lineno()
return result
decorator.__name__ = meth.__name__
return decorator
class ModuleVisitor(ast.NodeVisitor):
"""
Collect interesting imported names during module nodes visiting.
"""
interesting = {}
def __init__(self):
self.names = Context()
self.lineno = []
self.found = {}
def add_found(self, name, node):
lineno_level = self.get_lineno_level()
if lineno_level not in self.found:
self.found[lineno_level] = []
self.found[lineno_level].append([name, node, self.get_lineno(), None])
def get_found(self):
for level in self.found.values():
for found in level:
yield found
def push_lineno(self, lineno):
self.lineno.append(lineno)
lineno_level = self.get_lineno_level()
for level in self.found.keys():
if level < lineno_level:
return
for found in self.found[level]:
if found[-1] is None and lineno >= found[-2]:
found[-1] = max(lineno - 1, found[-2])
def pop_lineno(self):
return self.lineno.pop()
def get_lineno(self):
return self.lineno[-1]
def get_lineno_level(self):
return len(self.lineno)
def update_names(self, aliases, get_path):
"""
Update `names` context with interesting imported `aliases` using
`get_path` function to get full path to the object by object name.
"""
for alias in aliases:
path = get_path(alias.name)
if path not in self.interesting:
continue
if self.interesting[path]:
for attr in self.interesting[path]:
name = '.'.join((alias.asname or alias.name, attr))
self.names[name] = '.'.join((path, attr))
else:
name = alias.asname or alias.name
self.names[name] = path
@set_lineno
def visit_Import(self, node):
self.update_names(node.names, lambda x: x)
@set_lineno
def visit_ImportFrom(self, node):
self.update_names(node.names, lambda x: '.'.join((node.module, x)))
@set_lineno
def visit_FunctionDef(self, node):
# Create new scope in `names` context if we are coming to function body
self.names.push()
self.generic_visit(node)
self.names.pop()
@set_lineno
def visit_Assign(self, node):
# Some assingments attach interesting imports to new names.
# Trying to parse it.
visitor = AttributeVisitor()
visitor.visit(node.value)
if not visitor.is_usable:
# Seems on the right side is not an attribute. Let's visit
# assignment as it also can contain interesting code.
self.generic_visit(node)
return
name = visitor.get_name()
# skipping assignment if value is not interesting
if name not in self.names:
return
# trying to parse the left-side attribute name
for target in node.targets:
visitor = AttributeVisitor()
visitor.visit(target)
if not visitor.is_usable:
continue
target = visitor.get_name()
self.names[target] = self.names[name]
@set_lineno
def visit_Call(self, node):
self.generic_visit(node)
@set_lineno
def visit_List(self, node):
self.generic_visit(node)
@set_lineno
def visit_Tuple(self, node):
self.generic_visit(node)
class DeprecatedCodeVisitor(ModuleVisitor):
def visit_Attribute(self, node):
visitor = AttributeVisitor()
visitor.visit(node)
if visitor.is_usable:
name = visitor.get_name()
if name in self.names:
self.add_found(self.names[name], node)
def visit_Name(self, node):
if node.id in self.names:
self.add_found(self.names[node.id], node)
| isc | 7,375,841,228,670,127,000 | 28.724382 | 84 | 0.58048 | false |
eriwoon/ShellScriptCollect | Python/replaceAVP.py | 1 | 3204 | #! /usr/bin/python
#coding: utf-8
import sys
import os
import re
def findAllFile(dir):
folder = [dir]
file = []
while len(folder) > 0:
curDir = folder[0]
folder.pop(0)
lst = os.listdir(curDir)
for i in lst:
name = curDir + '\\' + i
if os.path.isfile(name) == True:
file.append(name)
else:
folder.append(name)
return file
def filterExtension(files, extension):
outputFiles = []
for file in files:
if os.path.splitext(file)[1] == extension:
outputFiles.append(file)
return outputFiles
def avpReplaceDefination():
return {
re.compile('^\s*avp\s+264\s*$') : '''ASSIGN_RULE = crl_begin
shadow0;
} crl_end;
''',
re.compile('^\s*avp\s+296\s*$') : '''ASSIGN_RULE = crl_begin
shadow1;
} crl_end;
'''
}
def replaceFilesWithAvp(files, avpReplace):
log = open("replaceAVP.py.log", "a+")
for file in files:
log.write("open file:" + file + "\n")
f = open(file, 'r')
new_content = ""
reAssignmentModeA = re.compile("^\s*ASSIGNMENT_MODE\s*=\s*A\s*;.*")
reAssignmentModeR = re.compile("^\s*ASSIGNMENT_MODE\s*=\s*R\s*;.*")
reEvtAttrId = re.compile("^\s*EVT_ATTR_ID\s*=.*;.*")
reLiftBrace = re.compile("^\s*{.*")
reRightBrace = re.compile("^\s*}.*")
lineNo = 1
line = f.readline()
while line != "":
patternMatched = False
for pattern in avpReplace:
if pattern.match(line) :
#print("line matched pattern : " + line)
log.write(str(lineNo) + ":line matched pattern : " + line)
patternMatched = True
new_content += line
lineNo += 1
line = f.readline()
while line != "":
#This is the place to find all content enclosed by { }
if reRightBrace.match(line):
#print("reRightBrace.matched" + line)
log.write(str(lineNo) + ":reRightBrace.matched : " + line + '\n')
new_content += line
break
elif reAssignmentModeA.match(line):
#print("reAssignmentModeA.matched" + line)
log.write(str(lineNo) + ":reAssignmentModeA.matched : " + line)
split = line.split("=")
newline = split[0] + "=" + split[1].replace("A","R")
new_content += newline
elif reAssignmentModeR.match(line):
#print("reAssignmentModeR.matched" + line)
log.write(str(lineNo) + ":reAssignmentModeR.matched : " + line)
pass
elif reEvtAttrId.match(line):
#print("reEvtAttrId.matched" + line)
log.write(str(lineNo) + ":reEvtAttrId.matched : " + line)
split = line.split("EVT_ATTR_ID")
newline = split[0] + avpReplace[pattern]
new_content += newline
else:
new_content += line
lineNo += 1
line = f.readline()
if patternMatched == False:
new_content += line
lineNo += 1
line = f.readline()
f.close()
fout = open(file, "w")
fout.write(new_content)
fout.close()
log.write("close file:" + file + " Finished\n")
log.close()
if __name__ == '__main__':
cwd = os.getcwd()
files = findAllFile(cwd)
files = filterExtension(files, '.diamEncoding')
#print(files)
avpReplace = avpReplaceDefination()
replaceFilesWithAvp(files, avpReplace)
print("Replace finished, please refer to replaceAVP.py.log")
| mit | 2,863,149,763,808,397,000 | 23.272727 | 72 | 0.602996 | false |
lochiiconnectivity/boto | boto/connection.py | 1 | 44959 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# Copyright (c) 2010 Google
# Copyright (c) 2008 rPath, Inc.
# Copyright (c) 2009 The Echo Nest Corporation
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Parts of this code were copied or derived from sample code supplied by AWS.
# The following notice applies to that code.
#
# This software code is made available "AS IS" without warranties of any
# kind. You may copy, display, modify and redistribute the software
# code either by itself or as incorporated into your code; provided that
# you do not remove any proprietary notices. Your use of this software
# code is at your own risk and you waive any claim against Amazon
# Digital Services, Inc. or its affiliates with respect to your use of
# this software code. (c) 2006 Amazon Digital Services, Inc. or its
# affiliates.
"""
Handles basic connections to AWS
"""
from __future__ import with_statement
import base64
import errno
import httplib
import os
import Queue
import random
import re
import socket
import sys
import time
import urllib
import urlparse
import xml.sax
import copy
import auth
import auth_handler
import boto
import boto.utils
import boto.handler
import boto.cacerts
from boto import config, UserAgent
from boto.exception import AWSConnectionError
from boto.exception import BotoClientError
from boto.exception import BotoServerError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.resultset import ResultSet
HAVE_HTTPS_CONNECTION = False
try:
import ssl
from boto import https_connection
# Google App Engine runs on Python 2.5 so doesn't have ssl.SSLError.
if hasattr(ssl, 'SSLError'):
HAVE_HTTPS_CONNECTION = True
except ImportError:
pass
try:
import threading
except ImportError:
import dummy_threading as threading
ON_APP_ENGINE = all(key in os.environ for key in (
'USER_IS_ADMIN', 'CURRENT_VERSION_ID', 'APPLICATION_ID'))
PORTS_BY_SECURITY = {True: 443,
False: 80}
DEFAULT_CA_CERTS_FILE = os.path.join(os.path.dirname(os.path.abspath(boto.cacerts.__file__ )), "cacerts.txt")
class HostConnectionPool(object):
"""
A pool of connections for one remote (host,is_secure).
When connections are added to the pool, they are put into a
pending queue. The _mexe method returns connections to the pool
before the response body has been read, so they connections aren't
ready to send another request yet. They stay in the pending queue
until they are ready for another request, at which point they are
returned to the pool of ready connections.
The pool of ready connections is an ordered list of
(connection,time) pairs, where the time is the time the connection
was returned from _mexe. After a certain period of time,
connections are considered stale, and discarded rather than being
reused. This saves having to wait for the connection to time out
if AWS has decided to close it on the other end because of
inactivity.
Thread Safety:
This class is used only from ConnectionPool while it's mutex
is held.
"""
def __init__(self):
self.queue = []
def size(self):
"""
Returns the number of connections in the pool for this host.
Some of the connections may still be in use, and may not be
ready to be returned by get().
"""
return len(self.queue)
def put(self, conn):
"""
Adds a connection to the pool, along with the time it was
added.
"""
self.queue.append((conn, time.time()))
def get(self):
"""
Returns the next connection in this pool that is ready to be
reused. Returns None of there aren't any.
"""
# Discard ready connections that are too old.
self.clean()
# Return the first connection that is ready, and remove it
# from the queue. Connections that aren't ready are returned
# to the end of the queue with an updated time, on the
# assumption that somebody is actively reading the response.
for _ in range(len(self.queue)):
(conn, _) = self.queue.pop(0)
if self._conn_ready(conn):
return conn
else:
self.put(conn)
return None
def _conn_ready(self, conn):
"""
There is a nice state diagram at the top of httplib.py. It
indicates that once the response headers have been read (which
_mexe does before adding the connection to the pool), a
response is attached to the connection, and it stays there
until it's done reading. This isn't entirely true: even after
the client is done reading, the response may be closed, but
not removed from the connection yet.
This is ugly, reading a private instance variable, but the
state we care about isn't available in any public methods.
"""
if ON_APP_ENGINE:
# Google AppEngine implementation of HTTPConnection doesn't contain
# _HTTPConnection__response attribute. Moreover, it's not possible
# to determine if given connection is ready. Reusing connections
# simply doesn't make sense with App Engine urlfetch service.
return False
else:
response = getattr(conn, '_HTTPConnection__response', None)
return (response is None) or response.isclosed()
def clean(self):
"""
Get rid of stale connections.
"""
# Note that we do not close the connection here -- somebody
# may still be reading from it.
while len(self.queue) > 0 and self._pair_stale(self.queue[0]):
self.queue.pop(0)
def _pair_stale(self, pair):
"""
Returns true of the (connection,time) pair is too old to be
used.
"""
(_conn, return_time) = pair
now = time.time()
return return_time + ConnectionPool.STALE_DURATION < now
class ConnectionPool(object):
"""
A connection pool that expires connections after a fixed period of
time. This saves time spent waiting for a connection that AWS has
timed out on the other end.
This class is thread-safe.
"""
#
# The amout of time between calls to clean.
#
CLEAN_INTERVAL = 5.0
#
# How long before a connection becomes "stale" and won't be reused
# again. The intention is that this time is less that the timeout
# period that AWS uses, so we'll never try to reuse a connection
# and find that AWS is timing it out.
#
# Experimentation in July 2011 shows that AWS starts timing things
# out after three minutes. The 60 seconds here is conservative so
# we should never hit that 3-minute timout.
#
STALE_DURATION = 60.0
def __init__(self):
# Mapping from (host,is_secure) to HostConnectionPool.
# If a pool becomes empty, it is removed.
self.host_to_pool = {}
# The last time the pool was cleaned.
self.last_clean_time = 0.0
self.mutex = threading.Lock()
ConnectionPool.STALE_DURATION = \
config.getfloat('Boto', 'connection_stale_duration',
ConnectionPool.STALE_DURATION)
def __getstate__(self):
pickled_dict = copy.copy(self.__dict__)
pickled_dict['host_to_pool'] = {}
del pickled_dict['mutex']
return pickled_dict
def __setstate__(self, dct):
self.__init__()
def size(self):
"""
Returns the number of connections in the pool.
"""
return sum(pool.size() for pool in self.host_to_pool.values())
def get_http_connection(self, host, is_secure):
"""
Gets a connection from the pool for the named host. Returns
None if there is no connection that can be reused. It's the caller's
responsibility to call close() on the connection when it's no longer
needed.
"""
self.clean()
with self.mutex:
key = (host, is_secure)
if key not in self.host_to_pool:
return None
return self.host_to_pool[key].get()
def put_http_connection(self, host, is_secure, conn):
"""
Adds a connection to the pool of connections that can be
reused for the named host.
"""
with self.mutex:
key = (host, is_secure)
if key not in self.host_to_pool:
self.host_to_pool[key] = HostConnectionPool()
self.host_to_pool[key].put(conn)
def clean(self):
"""
Clean up the stale connections in all of the pools, and then
get rid of empty pools. Pools clean themselves every time a
connection is fetched; this cleaning takes care of pools that
aren't being used any more, so nothing is being gotten from
them.
"""
with self.mutex:
now = time.time()
if self.last_clean_time + self.CLEAN_INTERVAL < now:
to_remove = []
for (host, pool) in self.host_to_pool.items():
pool.clean()
if pool.size() == 0:
to_remove.append(host)
for host in to_remove:
del self.host_to_pool[host]
self.last_clean_time = now
class HTTPRequest(object):
def __init__(self, method, protocol, host, port, path, auth_path,
params, headers, body):
"""Represents an HTTP request.
:type method: string
:param method: The HTTP method name, 'GET', 'POST', 'PUT' etc.
:type protocol: string
:param protocol: The http protocol used, 'http' or 'https'.
:type host: string
:param host: Host to which the request is addressed. eg. abc.com
:type port: int
:param port: port on which the request is being sent. Zero means unset,
in which case default port will be chosen.
:type path: string
:param path: URL path that is being accessed.
:type auth_path: string
:param path: The part of the URL path used when creating the
authentication string.
:type params: dict
:param params: HTTP url query parameters, with key as name of
the param, and value as value of param.
:type headers: dict
:param headers: HTTP headers, with key as name of the header and value
as value of header.
:type body: string
:param body: Body of the HTTP request. If not present, will be None or
empty string ('').
"""
self.method = method
self.protocol = protocol
self.host = host
self.port = port
self.path = path
if auth_path is None:
auth_path = path
self.auth_path = auth_path
self.params = params
# chunked Transfer-Encoding should act only on PUT request.
if headers and 'Transfer-Encoding' in headers and \
headers['Transfer-Encoding'] == 'chunked' and \
self.method != 'PUT':
self.headers = headers.copy()
del self.headers['Transfer-Encoding']
else:
self.headers = headers
self.body = body
def __str__(self):
return (('method:(%s) protocol:(%s) host(%s) port(%s) path(%s) '
'params(%s) headers(%s) body(%s)') % (self.method,
self.protocol, self.host, self.port, self.path, self.params,
self.headers, self.body))
def authorize(self, connection, **kwargs):
for key in self.headers:
val = self.headers[key]
if isinstance(val, unicode):
self.headers[key] = urllib.quote_plus(val.encode('utf-8'))
connection._auth_handler.add_auth(self, **kwargs)
self.headers['User-Agent'] = UserAgent
# I'm not sure if this is still needed, now that add_auth is
# setting the content-length for POST requests.
if 'Content-Length' not in self.headers:
if 'Transfer-Encoding' not in self.headers or \
self.headers['Transfer-Encoding'] != 'chunked':
self.headers['Content-Length'] = str(len(self.body))
class HTTPResponse(httplib.HTTPResponse):
def __init__(self, *args, **kwargs):
httplib.HTTPResponse.__init__(self, *args, **kwargs)
self._cached_response = ''
def read(self, amt=None):
"""Read the response.
This method does not have the same behavior as
httplib.HTTPResponse.read. Instead, if this method is called with
no ``amt`` arg, then the response body will be cached. Subsequent
calls to ``read()`` with no args **will return the cached response**.
"""
if amt is None:
# The reason for doing this is that many places in boto call
# response.read() and except to get the response body that they
# can then process. To make sure this always works as they expect
# we're caching the response so that multiple calls to read()
# will return the full body. Note that this behavior only
# happens if the amt arg is not specified.
if not self._cached_response:
self._cached_response = httplib.HTTPResponse.read(self)
return self._cached_response
else:
return httplib.HTTPResponse.read(self, amt)
class AWSAuthConnection(object):
def __init__(self, host, aws_access_key_id=None,
aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, path='/',
provider='aws', security_token=None,
suppress_consec_slashes=True,
validate_certs=True):
"""
:type host: str
:param host: The host to make the connection to
:keyword str aws_access_key_id: Your AWS Access Key ID (provided by
Amazon). If none is specified, the value in your
``AWS_ACCESS_KEY_ID`` environmental variable is used.
:keyword str aws_secret_access_key: Your AWS Secret Access Key
(provided by Amazon). If none is specified, the value in your
``AWS_SECRET_ACCESS_KEY`` environmental variable is used.
:type is_secure: boolean
:param is_secure: Whether the connection is over SSL
:type https_connection_factory: list or tuple
:param https_connection_factory: A pair of an HTTP connection
factory and the exceptions to catch. The factory should have
a similar interface to L{httplib.HTTPSConnection}.
:param str proxy: Address/hostname for a proxy server
:type proxy_port: int
:param proxy_port: The port to use when connecting over a proxy
:type proxy_user: str
:param proxy_user: The username to connect with on the proxy
:type proxy_pass: str
:param proxy_pass: The password to use when connection over a proxy.
:type port: int
:param port: The port to use to connect
:type suppress_consec_slashes: bool
:param suppress_consec_slashes: If provided, controls whether
consecutive slashes will be suppressed in key paths.
:type validate_certs: bool
:param validate_certs: Controls whether SSL certificates
will be validated or not. Defaults to True.
"""
self.suppress_consec_slashes = suppress_consec_slashes
self.num_retries = 6
# Override passed-in is_secure setting if value was defined in config.
if config.has_option('Boto', 'is_secure'):
is_secure = config.getboolean('Boto', 'is_secure')
self.is_secure = is_secure
# Whether or not to validate server certificates.
# The default is now to validate certificates. This can be
# overridden in the boto config file are by passing an
# explicit validate_certs parameter to the class constructor.
self.https_validate_certificates = config.getbool(
'Boto', 'https_validate_certificates',
validate_certs)
if self.https_validate_certificates and not HAVE_HTTPS_CONNECTION:
raise BotoClientError(
"SSL server certificate validation is enabled in boto "
"configuration, but Python dependencies required to "
"support this feature are not available. Certificate "
"validation is only supported when running under Python "
"2.6 or later.")
self.ca_certificates_file = config.get_value(
'Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE)
self.handle_proxy(proxy, proxy_port, proxy_user, proxy_pass)
# define exceptions from httplib that we want to catch and retry
self.http_exceptions = (httplib.HTTPException, socket.error,
socket.gaierror, httplib.BadStatusLine)
# define subclasses of the above that are not retryable.
self.http_unretryable_exceptions = []
if HAVE_HTTPS_CONNECTION:
self.http_unretryable_exceptions.append(
https_connection.InvalidCertificateException)
# define values in socket exceptions we don't want to catch
self.socket_exception_values = (errno.EINTR,)
if https_connection_factory is not None:
self.https_connection_factory = https_connection_factory[0]
self.http_exceptions += https_connection_factory[1]
else:
self.https_connection_factory = None
if (is_secure):
self.protocol = 'https'
else:
self.protocol = 'http'
self.host = host
self.path = path
# if the value passed in for debug
if not isinstance(debug, (int, long)):
debug = 0
self.debug = config.getint('Boto', 'debug', debug)
if port:
self.port = port
else:
self.port = PORTS_BY_SECURITY[is_secure]
# Timeout used to tell httplib how long to wait for socket timeouts.
# Default is to leave timeout unchanged, which will in turn result in
# the socket's default global timeout being used. To specify a
# timeout, set http_socket_timeout in Boto config. Regardless,
# timeouts will only be applied if Python is 2.6 or greater.
self.http_connection_kwargs = {}
if (sys.version_info[0], sys.version_info[1]) >= (2, 6):
# If timeout isn't defined in boto config file, use 70 second
# default as recommended by
# http://docs.aws.amazon.com/amazonswf/latest/apireference/API_PollForActivityTask.html
self.http_connection_kwargs['timeout'] = config.getint(
'Boto', 'http_socket_timeout', 70)
if isinstance(provider, Provider):
# Allow overriding Provider
self.provider = provider
else:
self._provider_type = provider
self.provider = Provider(self._provider_type,
aws_access_key_id,
aws_secret_access_key,
security_token)
# Allow config file to override default host and port.
if self.provider.host:
self.host = self.provider.host
if self.provider.port:
self.port = self.provider.port
self._pool = ConnectionPool()
self._connection = (self.server_name(), self.is_secure)
self._last_rs = None
self._auth_handler = auth.get_auth_handler(
host, config, self.provider, self._required_auth_capability())
if getattr(self, 'AuthServiceName', None) is not None:
self.auth_service_name = self.AuthServiceName
def __repr__(self):
return '%s:%s' % (self.__class__.__name__, self.host)
def _required_auth_capability(self):
return []
def _get_auth_service_name(self):
return getattr(self._auth_handler, 'service_name')
# For Sigv4, the auth_service_name/auth_region_name properties allow
# the service_name/region_name to be explicitly set instead of being
# derived from the endpoint url.
def _set_auth_service_name(self, value):
self._auth_handler.service_name = value
auth_service_name = property(_get_auth_service_name, _set_auth_service_name)
def _get_auth_region_name(self):
return getattr(self._auth_handler, 'region_name')
def _set_auth_region_name(self, value):
self._auth_handler.region_name = value
auth_region_name = property(_get_auth_region_name, _set_auth_region_name)
def connection(self):
return self.get_http_connection(*self._connection)
connection = property(connection)
def aws_access_key_id(self):
return self.provider.access_key
aws_access_key_id = property(aws_access_key_id)
gs_access_key_id = aws_access_key_id
access_key = aws_access_key_id
def aws_secret_access_key(self):
return self.provider.secret_key
aws_secret_access_key = property(aws_secret_access_key)
gs_secret_access_key = aws_secret_access_key
secret_key = aws_secret_access_key
def get_path(self, path='/'):
# The default behavior is to suppress consecutive slashes for reasons
# discussed at
# https://groups.google.com/forum/#!topic/boto-dev/-ft0XPUy0y8
# You can override that behavior with the suppress_consec_slashes param.
if not self.suppress_consec_slashes:
return self.path + re.sub('^(/*)/', "\\1", path)
pos = path.find('?')
if pos >= 0:
params = path[pos:]
path = path[:pos]
else:
params = None
if path[-1] == '/':
need_trailing = True
else:
need_trailing = False
path_elements = self.path.split('/')
path_elements.extend(path.split('/'))
path_elements = [p for p in path_elements if p]
path = '/' + '/'.join(path_elements)
if path[-1] != '/' and need_trailing:
path += '/'
if params:
path = path + params
return path
def server_name(self, port=None):
if not port:
port = self.port
if port == 80:
signature_host = self.host
else:
# This unfortunate little hack can be attributed to
# a difference in the 2.6 version of httplib. In old
# versions, it would append ":443" to the hostname sent
# in the Host header and so we needed to make sure we
# did the same when calculating the V2 signature. In 2.6
# (and higher!)
# it no longer does that. Hence, this kludge.
if ((ON_APP_ENGINE and sys.version[:3] == '2.5') or
sys.version[:3] in ('2.6', '2.7')) and port == 443:
signature_host = self.host
else:
signature_host = '%s:%d' % (self.host, port)
return signature_host
def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass):
self.proxy = proxy
self.proxy_port = proxy_port
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
if 'http_proxy' in os.environ and not self.proxy:
pattern = re.compile(
'(?:http://)?' \
'(?:(?P<user>\w+):(?P<pass>.*)@)?' \
'(?P<host>[\w\-\.]+)' \
'(?::(?P<port>\d+))?'
)
match = pattern.match(os.environ['http_proxy'])
if match:
self.proxy = match.group('host')
self.proxy_port = match.group('port')
self.proxy_user = match.group('user')
self.proxy_pass = match.group('pass')
else:
if not self.proxy:
self.proxy = config.get_value('Boto', 'proxy', None)
if not self.proxy_port:
self.proxy_port = config.get_value('Boto', 'proxy_port', None)
if not self.proxy_user:
self.proxy_user = config.get_value('Boto', 'proxy_user', None)
if not self.proxy_pass:
self.proxy_pass = config.get_value('Boto', 'proxy_pass', None)
if not self.proxy_port and self.proxy:
print "http_proxy environment variable does not specify " \
"a port, using default"
self.proxy_port = self.port
self.use_proxy = (self.proxy != None)
def get_http_connection(self, host, is_secure):
conn = self._pool.get_http_connection(host, is_secure)
if conn is not None:
return conn
else:
return self.new_http_connection(host, is_secure)
def new_http_connection(self, host, is_secure):
if self.use_proxy and not is_secure:
host = '%s:%d' % (self.proxy, int(self.proxy_port))
if host is None:
host = self.server_name()
if is_secure:
boto.log.debug(
'establishing HTTPS connection: host=%s, kwargs=%s',
host, self.http_connection_kwargs)
if self.use_proxy:
connection = self.proxy_ssl(host, is_secure and 443 or 80)
elif self.https_connection_factory:
connection = self.https_connection_factory(host)
elif self.https_validate_certificates and HAVE_HTTPS_CONNECTION:
connection = https_connection.CertValidatingHTTPSConnection(
host, ca_certs=self.ca_certificates_file,
**self.http_connection_kwargs)
else:
connection = httplib.HTTPSConnection(host,
**self.http_connection_kwargs)
else:
boto.log.debug('establishing HTTP connection: kwargs=%s' %
self.http_connection_kwargs)
if self.https_connection_factory:
# even though the factory says https, this is too handy
# to not be able to allow overriding for http also.
connection = self.https_connection_factory(host,
**self.http_connection_kwargs)
else:
connection = httplib.HTTPConnection(host,
**self.http_connection_kwargs)
if self.debug > 1:
connection.set_debuglevel(self.debug)
# self.connection must be maintained for backwards-compatibility
# however, it must be dynamically pulled from the connection pool
# set a private variable which will enable that
if host.split(':')[0] == self.host and is_secure == self.is_secure:
self._connection = (host, is_secure)
# Set the response class of the http connection to use our custom
# class.
connection.response_class = HTTPResponse
return connection
def put_http_connection(self, host, is_secure, connection):
self._pool.put_http_connection(host, is_secure, connection)
def proxy_ssl(self, host=None, port=None):
if host and port:
host = '%s:%d' % (host, port)
else:
host = '%s:%d' % (self.host, self.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.proxy, int(self.proxy_port)))
if "timeout" in self.http_connection_kwargs:
sock.settimeout(self.http_connection_kwargs["timeout"])
except:
raise
boto.log.debug("Proxy connection: CONNECT %s HTTP/1.0\r\n", host)
sock.sendall("CONNECT %s HTTP/1.0\r\n" % host)
sock.sendall("User-Agent: %s\r\n" % UserAgent)
if self.proxy_user and self.proxy_pass:
for k, v in self.get_proxy_auth_header().items():
sock.sendall("%s: %s\r\n" % (k, v))
# See discussion about this config option at
# https://groups.google.com/forum/?fromgroups#!topic/boto-dev/teenFvOq2Cc
if config.getbool('Boto', 'send_crlf_after_proxy_auth_headers', False):
sock.sendall("\r\n")
else:
sock.sendall("\r\n")
resp = httplib.HTTPResponse(sock, strict=True, debuglevel=self.debug)
resp.begin()
if resp.status != 200:
# Fake a socket error, use a code that make it obvious it hasn't
# been generated by the socket library
raise socket.error(-71,
"Error talking to HTTP proxy %s:%s: %s (%s)" %
(self.proxy, self.proxy_port,
resp.status, resp.reason))
# We can safely close the response, it duped the original socket
resp.close()
h = httplib.HTTPConnection(host)
if self.https_validate_certificates and HAVE_HTTPS_CONNECTION:
boto.log.debug("wrapping ssl socket for proxied connection; "
"CA certificate file=%s",
self.ca_certificates_file)
key_file = self.http_connection_kwargs.get('key_file', None)
cert_file = self.http_connection_kwargs.get('cert_file', None)
sslSock = ssl.wrap_socket(sock, keyfile=key_file,
certfile=cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certificates_file)
cert = sslSock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not https_connection.ValidateCertificateHostname(cert, hostname):
raise https_connection.InvalidCertificateException(
hostname, cert, 'hostname mismatch')
else:
# Fallback for old Python without ssl.wrap_socket
if hasattr(httplib, 'ssl'):
sslSock = httplib.ssl.SSLSocket(sock)
else:
sslSock = socket.ssl(sock, None, None)
sslSock = httplib.FakeSocket(sock, sslSock)
# This is a bit unclean
h.sock = sslSock
return h
def prefix_proxy_to_path(self, path, host=None):
path = self.protocol + '://' + (host or self.server_name()) + path
return path
def get_proxy_auth_header(self):
auth = base64.encodestring(self.proxy_user + ':' + self.proxy_pass)
return {'Proxy-Authorization': 'Basic %s' % auth}
def _mexe(self, request, sender=None, override_num_retries=None,
retry_handler=None):
"""
mexe - Multi-execute inside a loop, retrying multiple times to handle
transient Internet errors by simply trying again.
Also handles redirects.
This code was inspired by the S3Utils classes posted to the boto-users
Google group by Larry Bates. Thanks!
"""
boto.log.debug('Method: %s' % request.method)
boto.log.debug('Path: %s' % request.path)
boto.log.debug('Data: %s' % request.body)
boto.log.debug('Headers: %s' % request.headers)
boto.log.debug('Host: %s' % request.host)
boto.log.debug('Params: %s' % request.params)
response = None
body = None
e = None
if override_num_retries is None:
num_retries = config.getint('Boto', 'num_retries', self.num_retries)
else:
num_retries = override_num_retries
i = 0
connection = self.get_http_connection(request.host, self.is_secure)
while i <= num_retries:
# Use binary exponential backoff to desynchronize client requests.
next_sleep = random.random() * (2 ** i)
try:
# we now re-sign each request before it is retried
boto.log.debug('Token: %s' % self.provider.security_token)
request.authorize(connection=self)
if callable(sender):
response = sender(connection, request.method, request.path,
request.body, request.headers)
else:
connection.request(request.method, request.path,
request.body, request.headers)
response = connection.getresponse()
location = response.getheader('location')
# -- gross hack --
# httplib gets confused with chunked responses to HEAD requests
# so I have to fake it out
if request.method == 'HEAD' and getattr(response,
'chunked', False):
response.chunked = 0
if callable(retry_handler):
status = retry_handler(response, i, next_sleep)
if status:
msg, i, next_sleep = status
if msg:
boto.log.debug(msg)
time.sleep(next_sleep)
continue
if response.status == 500 or response.status == 503:
msg = 'Received %d response. ' % response.status
msg += 'Retrying in %3.1f seconds' % next_sleep
boto.log.debug(msg)
body = response.read()
elif response.status < 300 or response.status >= 400 or \
not location:
self.put_http_connection(request.host, self.is_secure,
connection)
return response
else:
scheme, request.host, request.path, \
params, query, fragment = urlparse.urlparse(location)
if query:
request.path += '?' + query
msg = 'Redirecting: %s' % scheme + '://'
msg += request.host + request.path
boto.log.debug(msg)
connection = self.get_http_connection(request.host,
scheme == 'https')
response = None
continue
except PleaseRetryException, e:
boto.log.debug('encountered a retry exception: %s' % e)
connection = self.new_http_connection(request.host,
self.is_secure)
response = e.response
except self.http_exceptions, e:
for unretryable in self.http_unretryable_exceptions:
if isinstance(e, unretryable):
boto.log.debug(
'encountered unretryable %s exception, re-raising' %
e.__class__.__name__)
raise e
boto.log.debug('encountered %s exception, reconnecting' % \
e.__class__.__name__)
connection = self.new_http_connection(request.host,
self.is_secure)
time.sleep(next_sleep)
i += 1
# If we made it here, it's because we have exhausted our retries
# and stil haven't succeeded. So, if we have a response object,
# use it to raise an exception.
# Otherwise, raise the exception that must have already happened.
if response:
raise BotoServerError(response.status, response.reason, body)
elif e:
raise e
else:
msg = 'Please report this exception as a Boto Issue!'
raise BotoClientError(msg)
def build_base_http_request(self, method, path, auth_path,
params=None, headers=None, data='', host=None):
path = self.get_path(path)
if auth_path is not None:
auth_path = self.get_path(auth_path)
if params == None:
params = {}
else:
params = params.copy()
if headers == None:
headers = {}
else:
headers = headers.copy()
host = host or self.host
if self.use_proxy:
if not auth_path:
auth_path = path
path = self.prefix_proxy_to_path(path, host)
if self.proxy_user and self.proxy_pass and not self.is_secure:
# If is_secure, we don't have to set the proxy authentication
# header here, we did that in the CONNECT to the proxy.
headers.update(self.get_proxy_auth_header())
return HTTPRequest(method, self.protocol, host, self.port,
path, auth_path, params, headers, data)
def make_request(self, method, path, headers=None, data='', host=None,
auth_path=None, sender=None, override_num_retries=None,
params=None, retry_handler=None):
"""Makes a request to the server, with stock multiple-retry logic."""
if params is None:
params = {}
http_request = self.build_base_http_request(method, path, auth_path,
params, headers, data, host)
return self._mexe(http_request, sender, override_num_retries,
retry_handler=retry_handler)
def close(self):
"""(Optional) Close any open HTTP connections. This is non-destructive,
and making a new request will open a connection again."""
boto.log.debug('closing all HTTP connections')
self._connection = None # compat field
class AWSQueryConnection(AWSAuthConnection):
APIVersion = ''
ResponseError = BotoServerError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host=None, debug=0,
https_connection_factory=None, path='/', security_token=None,
validate_certs=True):
AWSAuthConnection.__init__(self, host, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
debug, https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs)
def _required_auth_capability(self):
return []
def get_utf8_value(self, value):
return boto.utils.get_utf8_value(value)
def make_request(self, action, params=None, path='/', verb='GET'):
http_request = self.build_base_http_request(verb, path, None,
params, {}, '',
self.server_name())
if action:
http_request.params['Action'] = action
if self.APIVersion:
http_request.params['Version'] = self.APIVersion
return self._mexe(http_request)
def build_list_params(self, params, items, label):
if isinstance(items, basestring):
items = [items]
for i in range(1, len(items) + 1):
params['%s.%d' % (label, i)] = items[i - 1]
def build_complex_list_params(self, params, items, label, names):
"""Serialize a list of structures.
For example::
items = [('foo', 'bar', 'baz'), ('foo2', 'bar2', 'baz2')]
label = 'ParamName.member'
names = ('One', 'Two', 'Three')
self.build_complex_list_params(params, items, label, names)
would result in the params dict being updated with these params::
ParamName.member.1.One = foo
ParamName.member.1.Two = bar
ParamName.member.1.Three = baz
ParamName.member.2.One = foo2
ParamName.member.2.Two = bar2
ParamName.member.2.Three = baz2
:type params: dict
:param params: The params dict. The complex list params
will be added to this dict.
:type items: list of tuples
:param items: The list to serialize.
:type label: string
:param label: The prefix to apply to the parameter.
:type names: tuple of strings
:param names: The names associated with each tuple element.
"""
for i, item in enumerate(items, 1):
current_prefix = '%s.%s' % (label, i)
for key, value in zip(names, item):
full_key = '%s.%s' % (current_prefix, key)
params[full_key] = value
# generics
def get_list(self, action, params, markers, path='/',
parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
rs = ResultSet(markers)
h = boto.handler.XmlHandler(rs, parent)
xml.sax.parseString(body, h)
return rs
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_object(self, action, params, cls, path='/',
parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
obj = cls(parent)
h = boto.handler.XmlHandler(obj, parent)
xml.sax.parseString(body, h)
return obj
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_status(self, action, params, path='/', parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
rs = ResultSet()
h = boto.handler.XmlHandler(rs, parent)
xml.sax.parseString(body, h)
return rs.status
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
| mit | -80,022,250,089,414,130 | 40.020985 | 109 | 0.57975 | false |
psavery/avogadro | libavogadro/src/python/unittest/camera.py | 9 | 2353 | from PyQt4.Qt import *
from numpy import *
import Avogadro
import sys
import unittest
from util import *
class TestCamera(unittest.TestCase):
def setUp(self):
# create the GLWidget and load the default engines
self.glwidget = Avogadro.GLWidget()
self.glwidget.loadDefaultEngines()
self.molecule = Avogadro.molecules.addMolecule()
self.molecule.addAtom()
self.glwidget.molecule = self.molecule
self.assertNotEqual(self.glwidget.camera, None)
def tearDown(self):
# create the GLWidget and load the default engines
None
def test_parent(self):
self.assertNotEqual(self.glwidget.camera.parent, None)
def test_angleOfViewY(self):
self.assert_(self.glwidget.camera.angleOfViewY)
testReadWriteProperty(self, self.glwidget.camera.angleOfViewY, 40.0, 60.0)
def test_modelview(self):
self.glwidget.camera.modelview
m = self.glwidget.camera.modelview
self.glwidget.camera.modelview = m
def test_various(self):
self.glwidget.camera.applyPerspective()
self.glwidget.camera.applyModelview()
self.glwidget.camera.initializeViewPoint()
dist = self.glwidget.camera.distance(array([0., 0., 0.]))
self.glwidget.camera.translate(array([0., 0., 0.]))
self.glwidget.camera.pretranslate(array([0., 0., 0.]))
self.glwidget.camera.rotate(3.14, array([0., 0., 0.]))
self.glwidget.camera.prerotate(3.14, array([0., 0., 0.]))
self.glwidget.camera.normalize()
def test_axes(self):
self.glwidget.camera.transformedXAxis
self.glwidget.camera.transformedYAxis
self.glwidget.camera.transformedZAxis
self.glwidget.camera.backTransformedXAxis
self.glwidget.camera.backTransformedYAxis
self.glwidget.camera.backTransformedZAxis
def test_project(self):
point = QPoint(10,20)
self.assertEqual(len(self.glwidget.camera.unProject(point)), 3)
self.assertEqual(len(self.glwidget.camera.unProject(point, array([1., 0., 0.]))), 3)
# added to fix name conflict WithZ
self.assertEqual(len(self.glwidget.camera.unProjectWithZ(array([1., 2., 0.]))), 3)
self.assertEqual(len(self.glwidget.camera.project(array([1., 2., 3.]))), 3)
if __name__ == "__main__":
# create a new application
# (must be done before creating a GLWidget)
app = QApplication(sys.argv)
unittest.main()
sys.exit(app.exec_())
| gpl-2.0 | -8,479,210,946,714,413,000 | 30.373333 | 88 | 0.706332 | false |
google/grr | grr/server/grr_response_server/flows/general/registry.py | 1 | 6530 | #!/usr/bin/env python
"""Gather information from the registry on windows."""
from grr_response_core import config
from grr_response_core.lib import artifact_utils
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import compatibility
from grr_response_core.path_detection import windows as path_detection_windows
from grr_response_proto import flows_pb2
from grr_response_server import data_store
from grr_response_server import flow_base
from grr_response_server.flows.general import collectors
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import transfer
class RegistryFinderCondition(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.RegistryFinderCondition
rdf_deps = [
rdf_file_finder.FileFinderContentsLiteralMatchCondition,
rdf_file_finder.FileFinderContentsRegexMatchCondition,
rdf_file_finder.FileFinderModificationTimeCondition,
rdf_file_finder.FileFinderSizeCondition,
]
class RegistryFinderArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.RegistryFinderArgs
rdf_deps = [
rdf_paths.GlobExpression,
RegistryFinderCondition,
]
def _ConditionsToFileFinderConditions(conditions):
"""Converts FileFinderSizeConditions to RegistryFinderConditions."""
ff_condition_type_cls = rdf_file_finder.FileFinderCondition.Type
result = []
for c in conditions:
if c.condition_type == RegistryFinderCondition.Type.MODIFICATION_TIME:
result.append(
rdf_file_finder.FileFinderCondition(
condition_type=ff_condition_type_cls.MODIFICATION_TIME,
modification_time=c.modification_time))
elif c.condition_type == RegistryFinderCondition.Type.VALUE_LITERAL_MATCH:
result.append(
rdf_file_finder.FileFinderCondition(
condition_type=ff_condition_type_cls.CONTENTS_LITERAL_MATCH,
contents_literal_match=c.value_literal_match))
elif c.condition_type == RegistryFinderCondition.Type.VALUE_REGEX_MATCH:
result.append(
rdf_file_finder.FileFinderCondition(
condition_type=ff_condition_type_cls.CONTENTS_REGEX_MATCH,
contents_regex_match=c.value_regex_match))
elif c.condition_type == RegistryFinderCondition.Type.SIZE:
result.append(
rdf_file_finder.FileFinderCondition(
condition_type=ff_condition_type_cls.SIZE, size=c.size))
else:
raise ValueError("Unknown condition type: %s" % c.condition_type)
return result
class RegistryFinder(flow_base.FlowBase):
"""This flow looks for registry items matching given criteria."""
friendly_name = "Registry Finder"
category = "/Registry/"
args_type = RegistryFinderArgs
behaviours = flow_base.BEHAVIOUR_BASIC
@classmethod
def GetDefaultArgs(cls, username=None):
del username
return cls.args_type(keys_paths=[
"HKEY_USERS/%%users.sid%%/Software/"
"Microsoft/Windows/CurrentVersion/Run/*"
])
def Start(self):
self.CallFlow(
compatibility.GetName(file_finder.FileFinder),
paths=self.args.keys_paths,
pathtype=rdf_paths.PathSpec.PathType.REGISTRY,
conditions=_ConditionsToFileFinderConditions(self.args.conditions),
action=rdf_file_finder.FileFinderAction.Stat(),
next_state=compatibility.GetName(self.Done))
def Done(self, responses):
if not responses.success:
raise flow_base.FlowError("Registry search failed %s" % responses.status)
for response in responses:
self.SendReply(response)
class ClientRegistryFinder(flow_base.FlowBase):
"""This flow looks for registry items matching given criteria."""
friendly_name = "Client Side Registry Finder"
category = "/Registry/"
args_type = RegistryFinderArgs
behaviours = flow_base.BEHAVIOUR_DEBUG
@classmethod
def GetDefaultArgs(cls, username=None):
del username
return cls.args_type(
keys_paths=["HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows NT/*"])
def Start(self):
self.CallFlow(
compatibility.GetName(file_finder.ClientFileFinder),
paths=self.args.keys_paths,
pathtype=rdf_paths.PathSpec.PathType.REGISTRY,
conditions=_ConditionsToFileFinderConditions(self.args.conditions),
action=rdf_file_finder.FileFinderAction.Stat(),
next_state=compatibility.GetName(self.Done))
def Done(self, responses):
if not responses.success:
raise flow_base.FlowError("Registry search failed %s" % responses.status)
for response in responses:
self.SendReply(response)
class CollectRunKeyBinaries(flow_base.FlowBase):
"""Collect the binaries used by Run and RunOnce keys on the system.
We use the RunKeys artifact to get RunKey command strings for all users and
System. This flow guesses file paths from the strings, expands any
windows system environment variables, and attempts to retrieve the files.
"""
category = "/Registry/"
behaviours = flow_base.BEHAVIOUR_BASIC
def Start(self):
"""Get runkeys via the ArtifactCollectorFlow."""
self.CallFlow(
collectors.ArtifactCollectorFlow.__name__,
artifact_list=["WindowsRunKeys"],
use_raw_filesystem_access=True,
next_state=compatibility.GetName(self.ParseRunKeys))
def ParseRunKeys(self, responses):
"""Get filenames from the RunKeys and download the files."""
filenames = []
client = data_store.REL_DB.ReadClientSnapshot(self.client_id)
kb = client.knowledge_base
for response in responses:
runkey = response.registry_data.string
environ_vars = artifact_utils.GetWindowsEnvironmentVariablesMap(kb)
path_guesses = path_detection_windows.DetectExecutablePaths([runkey],
environ_vars)
if not path_guesses:
self.Log("Couldn't guess path for %s", runkey)
for path in path_guesses:
filenames.append(
rdf_paths.PathSpec(
path=path,
pathtype=config.CONFIG["Server.raw_filesystem_access_pathtype"])
)
if filenames:
self.CallFlow(
transfer.MultiGetFile.__name__,
pathspecs=filenames,
next_state=compatibility.GetName(self.Done))
def Done(self, responses):
for response in responses:
self.SendReply(response)
| apache-2.0 | -2,931,743,398,361,208,300 | 35.277778 | 80 | 0.713323 | false |
Galexrt/zulip | zerver/views/pointer.py | 5 | 1267 |
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from typing import Text
from zerver.decorator import to_non_negative_int
from zerver.lib.actions import do_update_pointer
from zerver.lib.request import has_request_variables, JsonableError, REQ
from zerver.lib.response import json_success
from zerver.models import UserProfile, UserMessage
def get_pointer_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
return json_success({'pointer': user_profile.pointer})
@has_request_variables
def update_pointer_backend(request, user_profile,
pointer=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
if pointer <= user_profile.pointer:
return json_success()
try:
UserMessage.objects.get(
user_profile=user_profile,
message__id=pointer
)
except UserMessage.DoesNotExist:
raise JsonableError(_("Invalid message ID"))
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, pointer, update_flags=update_flags)
return json_success()
| apache-2.0 | -2,045,797,681,912,640,000 | 36.264706 | 89 | 0.711918 | false |
arrabito/DIRAC | Core/Utilities/DErrno.py | 1 | 11716 | """ :mod: DErrno
==========================
.. module: DErrno
:synopsis: Error list and utilities for handling errors in DIRAC
This module contains list of errors that can be encountered in DIRAC.
It complements the errno module of python.
It also contains utilities to manipulate these errors.
Finally, it contains a DErrno class that contains an error number
as well as a low level error message. It behaves like a string for
compatibility reasons
In order to add extension specific error, you need to create in your extension the file
Core/Utilities/DErrno.py, which will contain the following dictionary:
* extra_dErrName: keys are the error name, values the number of it
* extra_dErrorCode: same as dErrorCode. keys are the error code, values the name
(we don't simply revert the previous dict in case we do not
have a one to one mapping)
* extra_dStrError: same as dStrError, Keys are the error code, values the error description
* extra_compatErrorString: same as compatErrorString. The compatible error strings are
added to the existing one, and not replacing them.
Example of extension file :
* extra_dErrName = { 'ELHCBSPE' : 3001 }
* extra_dErrorCode = { 3001 : 'ELHCBSPE'}
* extra_dStrError = { 3001 : "This is a description text of the specific LHCb error" }
* extra_compatErrorString = { 3001 : ["living easy, living free"],
DErrno.ERRX : ['An error message for ERRX that is specific to LHCb']}
"""
import os
import imp
import sys
# pylint: disable=bad-continuation
# To avoid conflict, the error numbers should be greater than 1000
# We decided to group the by range of 100 per system
# 1000: Generic
# 1100: Core
# 1200: Framework
# 1300: Interfaces
# 1400: Config
# 1500: WMS / Workflow
# 1600: DMS/StorageManagement
# 1700: RMS
# 1800: Accounting
# 1900: TS
# 2000: Resources and RSS
# ## Generic (10XX)
# Python related: 0X
ETYPE = 1000
EIMPERR = 1001
ENOMETH = 1002
ECONF = 1003
EVALUE = 1004
EEEXCEPTION = 1005
# Files manipulation: 1X
ECTMPF = 1010
EOF = 1011
ERF = 1012
EWF = 1013
ESPF = 1014
# ## Core (11XX)
# Certificates and Proxy: 0X
EX509 = 1100
EPROXYFIND = 1101
EPROXYREAD = 1102
ECERTFIND = 1103
ECERTREAD = 1104
ENOCERT = 1105
ENOCHAIN = 1106
ENOPKEY = 1107
ENOGROUP = 1108
# DISET: 1X
EDISET = 1110
ENOAUTH = 1111
# 3rd party security: 2X
E3RDPARTY = 1120
EVOMS = 1121
# Databases : 3X
EDB = 1130
EMYSQL = 1131
ESQLA = 1132
# Message Queues: 4X
EMQUKN = 1140
EMQNOM = 1141
EMQCONN = 1142
#Elasticsearch
EELNOFOUND = 1146
#config
ESECTION = 1400
#processes
EEZOMBIE = 1147
EENOPID = 1148
# ## WMS/Workflow
EWMSUKN = 1500
EWMSJDL = 1501
EWMSRESC = 1502
EWMSSUBM = 1503
# ## DMS/StorageManagement (16XX)
EFILESIZE = 1601
EGFAL = 1602
EBADCKS = 1603
EFCERR = 1604
# ## RMS (17XX)
ERMSUKN = 1700
# ## TS (19XX)
ETSUKN = 1900
ETSDATA = 1901
# ## Resources and RSS (20XX)
ERESGEN = 2000
ERESUNA = 2001
ERESUNK = 2002
# This translates the integer number into the name of the variable
dErrorCode = {
# ## Generic (10XX)
# 100X: Python related
1000 : 'ETYPE',
1001 : 'EIMPERR',
1002 : 'ENOMETH',
1003 : 'ECONF',
1004 : 'EVALUE',
1005 : 'EEEXCEPTION',
# 101X: Files manipulation
1010 : 'ECTMPF',
1011 : 'EOF',
1012 : 'ERF',
1013 : 'EWF',
1014 : 'ESPF',
# ## Core
# 110X: Certificates and Proxy
1100 : 'EX509',
1101 : 'EPROXYFIND',
1102 : 'EPROXYREAD',
1103 : 'ECERTFIND',
1104 : 'ECERTREAD',
1105 : 'ENOCERT',
1106 : 'ENOCHAIN',
1107 : 'ENOPKEY',
1108 : 'ENOGROUP',
# 111X: DISET
1110 : 'EDISET',
1111 : 'ENOAUTH',
# 112X: 3rd party security
1120 : 'E3RDPARTY',
1121 : 'EVOMS',
# 113X: Databases
1130 : 'EDB',
1131 : 'EMYSQL',
1132 : 'ESQLA',
# 114X: Message Queues
1140 : 'EMQUKN',
1141 : 'EMQNOM',
1142 : 'EMQCONN',
# Elasticsearch
1146 : 'EELNOFOUND',
# Config
1400 : "ESECTION",
#Processes
1147 : 'EEZOMBIE',
1148 : 'EENOPID',
# WMS/Workflow
1500 : 'EWMSUKN',
1501 : 'EWMSJDL',
1502 : 'EWMSRESC',
1503: 'EWMSSUBM',
# DMS/StorageManagement
1601 : 'EFILESIZE',
1602 : 'EGFAL',
1603 : 'EBADCKS',
1604 : "EFCERR",
# RMS
1700 : 'ERMSUKN',
# Resources and RSS
2000 : 'ERESGEN',
2001 : 'ERESUNA',
2002 : 'ERESUNK',
# TS
1900 : "ETSUKN",
1901 : "ETSDATA"}
dStrError = {
# ## Generic (10XX)
# 100X: Python related
ETYPE : "Object Type Error",
EIMPERR : "Failed to import library",
ENOMETH : "No such method or function",
ECONF : "Configuration error",
EVALUE: "Wrong value passed",
EEEXCEPTION: "runtime general exception",
# 101X: Files manipulation
ECTMPF : "Failed to create temporary file",
EOF : "Cannot open file",
ERF : "Cannot read from file",
EWF : "Cannot write to file",
ESPF : "Cannot set permissions to file",
# ## Core
# 110X: Certificates and Proxy
EX509 : "Generic Error with X509",
EPROXYFIND : "Can't find proxy",
EPROXYREAD : "Can't read proxy",
ECERTFIND : "Can't find certificate",
ECERTREAD : "Can't read certificate",
ENOCERT : "No certificate loaded",
ENOCHAIN : "No chain loaded",
ENOPKEY : "No private key loaded",
ENOGROUP: "No DIRAC group",
# 111X: DISET
EDISET : "DISET Error",
ENOAUTH : "Unauthorized query",
# 112X: 3rd party security
E3RDPARTY: "3rd party security service error",
EVOMS : "VOMS Error",
# 113X: Databases
EDB : "Database Error",
EMYSQL : "MySQL Error",
ESQLA : "SQLAlchemy Error",
# 114X: Message Queues
EMQUKN : "Unknown MQ Error",
EMQNOM : "No messages",
EMQCONN : "MQ connection failure",
# 114X Elasticsearch
EELNOFOUND: "Index not found",
# Config
ESECTION : "Section is not found",
#processes
EEZOMBIE: "Zombie process",
EENOPID: "No PID of process",
# WMS/Workflow
EWMSUKN : "Unknown WMS error",
EWMSJDL : "Invalid job description",
EWMSRESC: "Job to reschedule",
EWMSSUBM: "Job submission error",
# DMS/StorageManagement
EFILESIZE : "Bad file size",
EGFAL : "Error with the gfal call",
EBADCKS : "Bad checksum",
EFCERR : "FileCatalog error",
# RMS
ERMSUKN : "Unknown RMS error",
# Resources and RSS
ERESGEN: "Unknown Resource Failure",
ERESUNA: "Resource not available",
ERESUNK: "Unknown Resource",
# TS
ETSUKN : "Unknown Transformation System Error",
ETSDATA : "Invalid Input Data definition"}
def strerror(code):
""" This method wraps up os.strerror, and behave the same way.
It completes it with the DIRAC specific errors.
"""
if code == 0 :
return "Undefined error"
errMsg = "Unknown error %s" % code
try:
errMsg = dStrError[code]
except KeyError:
# It is not a DIRAC specific error, try the os one
try:
errMsg = os.strerror( code )
# On some system, os.strerror raises an exception with unknown code,
# on others, it returns a message...
except ValueError:
pass
return errMsg
def cmpError( inErr, candidate ):
""" This function compares an error (in its old form (a string or dictionary) or in its int form
with a candidate error code.
:param inErr: a string, an integer, a S_ERROR dictionary
:type inErr: str or int or S_ERROR
:param int candidate: error code to compare with
:return: True or False
If an S_ERROR instance is passed, we compare the code with S_ERROR['Errno']
If it is a Integer, we do a direct comparison
If it is a String, we use strerror to check the error string
"""
if isinstance(inErr, basestring): # old style
# Compare error message strings
errMsg = strerror(candidate)
return errMsg in inErr
elif isinstance(inErr, dict): # if the S_ERROR structure is given
# Check if Errno defined in the dict
errorNumber = inErr.get('Errno')
if errorNumber:
return errorNumber == candidate
errMsg = strerror(candidate)
return errMsg in inErr.get('Message', '')
elif isinstance(inErr, int):
return inErr == candidate
else:
raise TypeError("Unknown input error type %s" % type(inErr))
def includeExtensionErrors():
""" Merge all the errors of all the extensions into the errors of these modules
Should be called only at the initialization of DIRAC, so by the parseCommandLine,
dirac-agent.py, dirac-service.py, dirac-executor.py
"""
def __recurseImport( modName, parentModule = None, fullName = False ):
""" Internal function to load modules
"""
if isinstance( modName, basestring ):
modName = modName.split( "." )
if not fullName:
fullName = ".".join( modName )
try:
if parentModule:
impData = imp.find_module( modName[0], parentModule.__path__ )
else:
impData = imp.find_module( modName[0] )
impModule = imp.load_module( modName[0], *impData )
if impData[0]:
impData[0].close()
except ImportError:
return None
if len( modName ) == 1:
return impModule
return __recurseImport( modName[1:], impModule, fullName = fullName )
from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals
allExtensions = CSGlobals.getCSExtensions()
for extension in allExtensions:
ext_derrno = None
try:
ext_derrno = __recurseImport( '%sDIRAC.Core.Utilities.DErrno' % extension )
if ext_derrno:
# The next 3 dictionary MUST be present for consistency
# Global name of errors
sys.modules[__name__].__dict__.update( ext_derrno.extra_dErrName )
# Dictionary with the error codes
sys.modules[__name__].dErrorCode.update( ext_derrno.extra_dErrorCode )
# Error description string
sys.modules[__name__].dStrError.update( ext_derrno.extra_dStrError )
# extra_compatErrorString is optional
for err in getattr( ext_derrno, 'extra_compatErrorString', [] ) :
sys.modules[__name__].compatErrorString.setdefault( err, [] ).extend( ext_derrno.extra_compatErrorString[err] )
except:
pass
| gpl-3.0 | -747,050,061,043,167,000 | 29.195876 | 121 | 0.567002 | false |
JaviMerino/trappy | trappy/thermal.py | 2 | 9812 | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Process the output of the power allocator trace in the current
directory's trace.dat"""
from collections import OrderedDict
import pandas as pd
import re
from trappy.base import Base
from trappy.dynamic import register_ftrace_parser
class Thermal(Base):
"""Process the thermal framework data in a FTrace dump"""
unique_word = "thermal_temperature:"
"""The unique word that will be matched in a trace line"""
name = "thermal"
"""The name of the :mod:`pandas.DataFrame` member that will be created in a
:mod:`trappy.ftrace.FTrace` object"""
pivot = "id"
"""The Pivot along which the data is orthogonal"""
def plot_temperature(self, control_temperature=None, title="", width=None,
height=None, ylim="range", ax=None, legend_label=""):
"""Plot the temperature.
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param legend_label: Label for the legend
:type legend_label: str
:param title: The title of the plot
:type title: str
:param control_temperature: If control_temp is a
:mod:`pd.Series` representing the (possible)
variation of :code:`control_temp` during the
run, draw it using a dashed yellow line.
Otherwise, only the temperature is plotted.
:type control_temperature: :mod:`pandas.Series`
:param width: The width of the plot
:type width: int
:param height: The height of the plot
:type int: int
"""
from matplotlib import pyplot as plt
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
title = normalize_title("Temperature", title)
if len(self.data_frame) == 0:
raise ValueError("Empty DataFrame")
setup_plot = False
if not ax:
ax = pre_plot_setup(width, height)
setup_plot = True
temp_label = normalize_title("Temperature", legend_label)
(self.data_frame["temp"] / 1000).plot(ax=ax, label=temp_label)
if control_temperature is not None:
ct_label = normalize_title("Control", legend_label)
control_temperature.plot(ax=ax, color="y", linestyle="--",
label=ct_label)
if setup_plot:
post_plot_setup(ax, title=title, ylim=ylim)
plt.legend()
def plot_temperature_hist(self, ax, title):
"""Plot a temperature histogram
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param title: The title of the plot
:type title: str
"""
from trappy.plot_utils import normalize_title, plot_hist
temps = self.data_frame["temp"] / 1000
title = normalize_title("Temperature", title)
xlim = (0, temps.max())
plot_hist(temps, ax, title, "C", 30, "Temperature", xlim, "default")
register_ftrace_parser(Thermal, "thermal")
class ThermalGovernor(Base):
"""Process the power allocator data in a ftrace dump"""
unique_word = "thermal_power_allocator:"
"""The unique word that will be matched in a trace line"""
name = "thermal_governor"
"""The name of the :mod:`pandas.DataFrame` member that will be created in a
:mod:`trappy.ftrace.FTrace` object"""
pivot = "thermal_zone_id"
"""The Pivot along which the data is orthogonal"""
def plot_temperature(self, title="", width=None, height=None, ylim="range",
ax=None, legend_label=""):
"""Plot the temperature"""
from matplotlib import pyplot as plt
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
dfr = self.data_frame
curr_temp = dfr["current_temperature"]
control_temp_series = (curr_temp + dfr["delta_temperature"]) / 1000
title = normalize_title("Temperature", title)
setup_plot = False
if not ax:
ax = pre_plot_setup(width, height)
setup_plot = True
temp_label = normalize_title("Temperature", legend_label)
(curr_temp / 1000).plot(ax=ax, label=temp_label)
control_temp_series.plot(ax=ax, color="y", linestyle="--",
label="control temperature")
if setup_plot:
post_plot_setup(ax, title=title, ylim=ylim)
plt.legend()
def plot_input_power(self, actor_order, title="", width=None, height=None,
ax=None):
"""Plot input power
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param title: The title of the plot
:type title: str
:param width: The width of the plot
:type width: int
:param height: The height of the plot
:type int: int
:param actor_order: An array showing the order in which the actors
were registered. The array values are the labels that
will be used in the input and output power plots.
For Example:
::
["GPU", "A15", "A7"]
:type actor_order: list
"""
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
dfr = self.data_frame
in_cols = [s for s in dfr.columns if re.match("req_power[0-9]+", s)]
plot_dfr = dfr[in_cols]
# Rename the columns from "req_power0" to "A15" or whatever is
# in actor_order. Note that we can do it just with an
# assignment because the columns are already sorted (i.e.:
# req_power0, req_power1...)
plot_dfr.columns = actor_order
title = normalize_title("Input Power", title)
if not ax:
ax = pre_plot_setup(width, height)
plot_dfr.plot(ax=ax)
post_plot_setup(ax, title=title)
def plot_weighted_input_power(self, actor_weights, title="", width=None,
height=None, ax=None):
"""Plot weighted input power
:param actor_weights: An array of tuples. First element of the
tuple is the name of the actor, the second is the weight. The
array is in the same order as the :code:`req_power` appear in the
trace.
:type actor_weights: list
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param title: The title of the plot
:type title: str
:param width: The width of the plot
:type width: int
:param height: The height of the plot
:type int: int
"""
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
dfr = self.data_frame
in_cols = [s for s in dfr.columns if re.match(r"req_power\d+", s)]
plot_dfr_dict = OrderedDict()
for in_col, (name, weight) in zip(in_cols, actor_weights):
plot_dfr_dict[name] = dfr[in_col] * weight / 1024
plot_dfr = pd.DataFrame(plot_dfr_dict)
title = normalize_title("Weighted Input Power", title)
if not ax:
ax = pre_plot_setup(width, height)
plot_dfr.plot(ax=ax)
post_plot_setup(ax, title=title)
def plot_output_power(self, actor_order, title="", width=None, height=None,
ax=None):
"""Plot output power
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param title: The title of the plot
:type title: str
:param width: The width of the plot
:type width: int
:param height: The height of the plot
:type int: int
:param actor_order: An array showing the order in which the actors
were registered. The array values are the labels that
will be used in the input and output power plots.
For Example:
::
["GPU", "A15", "A7"]
:type actor_order: list
"""
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
out_cols = [s for s in self.data_frame.columns
if re.match("granted_power[0-9]+", s)]
# See the note in plot_input_power()
plot_dfr = self.data_frame[out_cols]
plot_dfr.columns = actor_order
title = normalize_title("Output Power", title)
if not ax:
ax = pre_plot_setup(width, height)
plot_dfr.plot(ax=ax)
post_plot_setup(ax, title=title)
def plot_inout_power(self, title=""):
"""Make multiple plots showing input and output power for each actor
:param title: The title of the plot
:type title: str
"""
from trappy.plot_utils import normalize_title
dfr = self.data_frame
actors = []
for col in dfr.columns:
match = re.match("P(.*)_in", col)
if match and col != "Ptot_in":
actors.append(match.group(1))
for actor in actors:
cols = ["P" + actor + "_in", "P" + actor + "_out"]
this_title = normalize_title(actor, title)
dfr[cols].plot(title=this_title)
register_ftrace_parser(ThermalGovernor, "thermal")
| apache-2.0 | -2,251,892,719,779,163,100 | 31.926174 | 86 | 0.596616 | false |
Godiyos/python-for-android | python-modules/twisted/twisted/internet/pollreactor.py | 56 | 6856 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A poll() based implementation of the twisted main loop.
To install the event loop (and you should do this before any connections,
listeners or connectors are added)::
from twisted.internet import pollreactor
pollreactor.install()
"""
# System imports
import errno, sys
from select import error as SelectError, poll
from select import POLLIN, POLLOUT, POLLHUP, POLLERR, POLLNVAL
from zope.interface import implements
# Twisted imports
from twisted.python import log
from twisted.internet import main, posixbase, error
from twisted.internet.interfaces import IReactorFDSet
POLL_DISCONNECTED = (POLLHUP | POLLERR | POLLNVAL)
class PollReactor(posixbase.PosixReactorBase):
"""
A reactor that uses poll(2).
@ivar _poller: A L{poll} which will be used to check for I/O
readiness.
@ivar _selectables: A dictionary mapping integer file descriptors to
instances of L{FileDescriptor} which have been registered with the
reactor. All L{FileDescriptors} which are currently receiving read or
write readiness notifications will be present as values in this
dictionary.
@ivar _reads: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for read readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
@ivar _writes: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for write readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
"""
implements(IReactorFDSet)
def __init__(self):
"""
Initialize polling object, file descriptor tracking dictionaries, and
the base class.
"""
self._poller = poll()
self._selectables = {}
self._reads = {}
self._writes = {}
posixbase.PosixReactorBase.__init__(self)
def _updateRegistration(self, fd):
"""Register/unregister an fd with the poller."""
try:
self._poller.unregister(fd)
except KeyError:
pass
mask = 0
if fd in self._reads:
mask = mask | POLLIN
if fd in self._writes:
mask = mask | POLLOUT
if mask != 0:
self._poller.register(fd, mask)
else:
if fd in self._selectables:
del self._selectables[fd]
def _dictRemove(self, selectable, mdict):
try:
# the easy way
fd = selectable.fileno()
# make sure the fd is actually real. In some situations we can get
# -1 here.
mdict[fd]
except:
# the hard way: necessary because fileno() may disappear at any
# moment, thanks to python's underlying sockets impl
for fd, fdes in self._selectables.items():
if selectable is fdes:
break
else:
# Hmm, maybe not the right course of action? This method can't
# fail, because it happens inside error detection...
return
if fd in mdict:
del mdict[fd]
self._updateRegistration(fd)
def addReader(self, reader):
"""Add a FileDescriptor for notification of data available to read.
"""
fd = reader.fileno()
if fd not in self._reads:
self._selectables[fd] = reader
self._reads[fd] = 1
self._updateRegistration(fd)
def addWriter(self, writer):
"""Add a FileDescriptor for notification of data available to write.
"""
fd = writer.fileno()
if fd not in self._writes:
self._selectables[fd] = writer
self._writes[fd] = 1
self._updateRegistration(fd)
def removeReader(self, reader):
"""Remove a Selectable for notification of data available to read.
"""
return self._dictRemove(reader, self._reads)
def removeWriter(self, writer):
"""Remove a Selectable for notification of data available to write.
"""
return self._dictRemove(writer, self._writes)
def removeAll(self):
"""
Remove all selectables, and return a list of them.
"""
return self._removeAll(
[self._selectables[fd] for fd in self._reads],
[self._selectables[fd] for fd in self._writes])
def doPoll(self, timeout):
"""Poll the poller for new events."""
if timeout is not None:
timeout = int(timeout * 1000) # convert seconds to milliseconds
try:
l = self._poller.poll(timeout)
except SelectError, e:
if e[0] == errno.EINTR:
return
else:
raise
_drdw = self._doReadOrWrite
for fd, event in l:
try:
selectable = self._selectables[fd]
except KeyError:
# Handles the infrequent case where one selectable's
# handler disconnects another.
continue
log.callWithLogger(selectable, _drdw, selectable, fd, event)
doIteration = doPoll
def _doReadOrWrite(self, selectable, fd, event):
why = None
inRead = False
if event & POLL_DISCONNECTED and not (event & POLLIN):
if fd in self._reads:
why = main.CONNECTION_DONE
inRead = True
else:
why = main.CONNECTION_LOST
else:
try:
if event & POLLIN:
why = selectable.doRead()
inRead = True
if not why and event & POLLOUT:
why = selectable.doWrite()
inRead = False
if not selectable.fileno() == fd:
why = error.ConnectionFdescWentAway('Filedescriptor went away')
inRead = False
except:
log.deferr()
why = sys.exc_info()[1]
if why:
self._disconnectSelectable(selectable, why, inRead)
def getReaders(self):
return [self._selectables[fd] for fd in self._reads]
def getWriters(self):
return [self._selectables[fd] for fd in self._writes]
def install():
"""Install the poll() reactor."""
p = PollReactor()
from twisted.internet.main import installReactor
installReactor(p)
__all__ = ["PollReactor", "install"]
| apache-2.0 | 7,158,869,929,387,656,000 | 31.339623 | 83 | 0.587515 | false |
shengqh/ngsperl | lib/Visualization/plotGene.py | 1 | 7060 | import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
cqsdir = os.path.abspath(os.path.dirname(currentdir) + "/CQS")
sys.path.insert(0,cqsdir)
import logging
import argparse
import string
import subprocess
from LocusItem import LocusItem, readBedFile
from FileListUtils import readUniqueHashMap
def main():
DEBUG = False
NOT_DEBUG = not DEBUG
parser = argparse.ArgumentParser(description="Draw bam plot based on peak list.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', action='store', nargs='?', required=NOT_DEBUG, help="Input bed file")
parser.add_argument('-b', '--bamListFile', action='store', nargs='?', required=NOT_DEBUG, help="Sample bam file list")
parser.add_argument('-s', '--sizeFactorFile', action='store', nargs='?', required=NOT_DEBUG, help="Sample chromosome size factor file")
parser.add_argument('-e', '--extend_bases', action='store', type=int, default=0, nargs='?', help="Extending X bases before and after coordinates")
parser.add_argument('-g', '--plot_gene', action='store_true', help="Plot hg38 gene track")
parser.add_argument('-o', '--output', action='store', nargs='?', required=NOT_DEBUG, help="Output folder")
args = parser.parse_args()
if(DEBUG):
# args.input = "/scratch/cqs/shengq2/vickers/20190504_smallRNA_as_chipseq_GCF_000005845.2_ASM584v2/plotPeak/result/20190504_smallRNA_as_chipseq__fileList1.list"
# args.groupsFile = "/scratch/cqs/shengq2/vickers/20190504_smallRNA_as_chipseq_GCF_000005845.2_ASM584v2/plotPeak/result/20190504_smallRNA_as_chipseq__fileList2.list"
# args.bamListFile = "/scratch/cqs/shengq2/vickers/20190504_smallRNA_as_chipseq_GCF_000005845.2_ASM584v2/plotPeak/result/20190504_smallRNA_as_chipseq__fileList3.list"
# args.output = "/scratch/cqs/shengq2/vickers/20190504_smallRNA_as_chipseq_GCF_000005845.2_ASM584v2/plotPeak/result/Control.pdf"
args.input = "/scratch/cqs/shengq2/macrae_linton/20190517_linton_exomeseq_3321_human/annotation_genes_locus/result/linton_exomeseq_3321.bed"
args.bamListFile = "/scratch/cqs/shengq2/macrae_linton/20190517_linton_exomeseq_3321_human/GATK4_CNV_Germline_8_PlotGeneCNV/result/linton_exomeseq_3321__fileList3.list"
args.output = "/scratch/cqs/shengq2/macrae_linton/20190517_linton_exomeseq_3321_human/GATK4_CNV_Germline_8_PlotGeneCNV/result/linton_exomeseq_3321.position.txt"
args.sizeFactorFile = "/scratch/cqs/shengq2/macrae_linton/20190517_linton_exomeseq_3321_human/background/linton_exomeseq_3321.excluded.bed.sizefactor"
logger = logging.getLogger('plotGene')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
print(args)
bamMap = readUniqueHashMap(args.bamListFile)
sampleNames = sorted(bamMap.keys())
sampleFiles = [bamMap[sampleName] for sampleName in sampleNames]
outputFolder = os.path.dirname(args.output)
bedFile = args.input
logger.info("processing " + bedFile + "...")
bedResultFile = args.output
bamListFile = bedResultFile + ".bam.list"
with open(bamListFile, "w") as flist:
for sampleFile in sampleFiles:
flist.write(sampleFile + "\n")
chrMap = {}
with open(args.sizeFactorFile, "rt") as fin:
for line in fin:
parts = line.rstrip().split('\t')
chrom = parts[0]
chromKey = chrom.replace("chr","")
chrMap[chromKey] = chrom
chrMap[chrom] = chrom
#print(chrMap)
bedResultTmpFile = bedResultFile + ".tmp"
with open(bedResultTmpFile, "wt") as fout:
fout.write("File\tFeature\tLocus\tPosition\tPositionCount\tMaxCount\tPercentage\n")
posData = []
locusList = readBedFile(bedFile)
for locus in locusList:
locus.Chromosome = chrMap[locus.Chromosome]
locusName = locus.getName()
locusString = locus.getLocusString(args.extend_bases)
logger.info(" processing " + locus.getLocusString() + " ...")
locusData = []
locusData.append([]) #add position from depth
for sampleName in sampleNames:
locusData.append([])
posData.append([locus, locusData])
proc = subprocess.Popen(["samtools", "depth", "-f", bamListFile, "-r", locusString, "-d", "0"], stdout=subprocess.PIPE)
for pline in proc.stdout:
pparts = pline.rstrip().decode("utf-8").split("\t")
position = int(pparts[1])
locusData[0].append(position)
for idx in range(len(sampleNames)):
locusData[idx+1].append(int(pparts[idx+2]))
positions = locusData[0]
for idx in range(len(sampleNames)):
sampleCount = locusData[idx+1]
if len(sampleCount) == 0:
maxCount = 0
else:
maxCount = max(sampleCount)
if maxCount == 0:
fout.write("%s\t%s\t%s\t%d\t%d\t%d\t%lf\n" % (sampleNames[idx], locusName, locusString, locus.Start, 0, 0, 0))
continue
lastZero = True
lastPosition = positions[0] - 1
for cIdx in range(len(positions)):
curPosition = positions[cIdx]
if curPosition != lastPosition + 1:
if not lastZero:
fout.write("%s\t%s\t%s\t%d\t%d\t%d\t%lf\n" % (sampleNames[idx], locusName, locusString, lastPosition + 1, 0, maxCount, 0))
lastZero = True
if sampleCount[cIdx] != 0:
if lastZero:
fout.write("%s\t%s\t%s\t%d\t%d\t%d\t%lf\n" % (sampleNames[idx], locusName, locusString, positions[cIdx] - 1, 0, maxCount, 0))
fout.write("%s\t%s\t%s\t%d\t%d\t%d\t%lf\n" % (sampleNames[idx], locusName, locusString, positions[cIdx], sampleCount[cIdx], maxCount, sampleCount[cIdx] * 1.0 / maxCount))
lastZero = False
else:
if not lastZero:
fout.write("%s\t%s\t%s\t%d\t%d\t%d\t%lf\n" % (sampleNames[idx], locusName, locusString, positions[cIdx], 0, maxCount, 0))
lastZero = True
lastPosition = curPosition
fout.write("%s\t%s\t%s\t%d\t%d\t%d\t%lf\n" % (sampleNames[idx], locusName, locusString, positions[len(positions)-1] + 1, 0, maxCount, 0))
if os.path.exists(bedResultFile):
os.remove(bedResultFile)
os.remove(bamListFile)
os.rename(bedResultTmpFile, bedResultFile)
realpath = os.path.dirname(os.path.realpath(__file__))
#rPath = realpath + "/plotGeneHuman.r" if args.plot_gene else realpath + "/plotGene.r"
#plotGeneHuman is still under development
rPath = realpath + "/plotGene.r" if args.plot_gene else realpath + "/plotGene.r"
targetR = bedResultFile + ".r"
with open(targetR, "wt") as fout:
fout.write("inputFile<-\"%s\"\n" % bedResultFile)
fout.write("outputPrefix<-\"%s\"\n" % bedResultFile)
fout.write("sizeFactorFile<-\"%s\"\n\n" % args.sizeFactorFile)
with open(rPath, "r") as fin:
for line in fin:
line = line.rstrip()
fout.write(line + "\n")
cmd = "R --vanilla -f " + targetR
logger.info(cmd)
os.system(cmd)
logger.info("done.")
main()
| apache-2.0 | 3,913,873,010,884,401,000 | 42.312883 | 183 | 0.666289 | false |
faircloth-lab/uce-probe-design | run_lastz.py | 1 | 6837 | #!/usr/bin/env python
# encoding: utf-8
"""
run_lastz.py
Created by Brant Faircloth on 2010-02-24.
Copyright (c) 2010 Brant Faircloth. All rights reserved.
# Description
A helper script to run lastz.
"""
import pdb
import sys
import os
import time
import optparse
import tempfile
import subprocess
import bx.seq.twobit
import multiprocessing
def interface():
'''Get the starting parameters from a configuration file'''
usage = "usage: %prog [options]"
p = optparse.OptionParser(usage)
p.add_option('--target', dest = 'target', action='store', \
type='string', default = None, help='The path to the target file (2bit)', \
metavar='FILE')
p.add_option('--query', dest = 'query', action='store', \
type='string', default = None, help='The path to the query file (2bit)', \
metavar='FILE')
p.add_option('--output', dest = 'output', action='store', \
type='string', default = None, help='The path to the output file', \
metavar='FILE')
p.add_option('--nprocs', dest = 'nprocs', action='store', \
type='int', default = 1, help='The number of processors to use')
p.add_option('--huge', dest = 'huge', action='store_true', default=False, \
help='Deal with poorly assembled (many scaffolds) genome sequences')
p.add_option('--size', dest = 'size', action='store', \
type='int', default = 10000000, help='The chunk size (in bp) to stick in a \
file while using the --huge option')
(options,arg) = p.parse_args()
for f in [options.target, options.query, options.output]:
if not f:
p.print_help()
sys.exit(2)
if f != options.output and not os.path.isfile(f):
print "You must provide a valid path to the query/target file."
p.print_help()
sys.exit(2)
return options, arg
def q_runner(n_procs, list_item, function, *args):
'''generic function used to start worker processes'''
task_queue = multiprocessing.Queue()
results_queue = multiprocessing.JoinableQueue()
if args:
arguments = (task_queue, results_queue,) + args
else:
arguments = (task_queue, results_queue,)
results = []
# reduce processer count if proc count > files
if len(list_item) < n_procs:
n_procs = len(list_item)
for l in list_item:
task_queue.put(l)
for _ in range(n_procs):
p = multiprocessing.Process(target=function, args=arguments).start()
#print 'Starting %s' % function
for _ in range(len(list_item)):
# indicated done results processing
results.append(results_queue.get())
results_queue.task_done()
#tell child processes to stop
for _ in range(n_procs):
task_queue.put('STOP')
# join the queue until we're finished processing results
results_queue.join()
# not closing the Queues caused me untold heartache and suffering
task_queue.close()
results_queue.close()
return results
def lastzParams(query, target, temp_out):
cli = \
'lastz {0}[nameparse=full] {1}[nameparse=full]\
--hspthresh=3000 \
--gappedthresh=3000 \
--ydrop=9400 \
--inner=0 \
--gap=400,30 \
--output={2} \
--format=lav'.format(query, target, temp_out)
return cli
def lastz(input, output):
'''docstring for worker2'''
for chromo, probe in iter(input.get, 'STOP'):
print '\t%s' % chromo
temp_fd, temp_out = tempfile.mkstemp(suffix='.lastz')
os.close(temp_fd)
cli = lastzParams(chromo, probe, temp_out)
lzstdout, lztstderr = subprocess.Popen(cli, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate(None)
if lztstderr:
output.put(lztstderr)
else:
output.put(temp_out)
def SingleProcLastz(input, output):
'''docstring for worker2'''
#pdb.set_trace()
chromo, probe = input
temp_fd, temp_out = tempfile.mkstemp(suffix='.lastz')
os.close(temp_fd)
cli = lastzParams(chromo, probe, temp_out)
#pdb.set_trace()
lzstdout, lztstderr = subprocess.Popen(cli, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate(None)
if lztstderr:
output.append(lztstderr)
else:
output.append(tmp_out)
return output
def main():
start_time = time.time()
print 'Started: ', time.strftime("%a %b %d, %Y %H:%M:%S", time.localtime(start_time))
options, arg = interface()
if not options.huge:
# get individual records from the 2bit file
chromos = [os.path.join(options.target, c) for c in bx.seq.twobit.TwoBitFile(file(options.target)).keys()]
else:
chromos = []
# split target file into `options.size` (~10 Mbp) chunks
temp_fd, temp_out = tempfile.mkstemp(suffix='.fasta')
os.close(temp_fd)
temp_out_handle = open(temp_out, 'w')
tb = bx.seq.twobit.TwoBitFile(file(options.target))
sequence_length = 0
print 'Running with the --huge option. Chunking files into {0} bp...'.format(options.size)
for seq in tb.keys():
sequence = tb[seq][0:]
sequence_length += len(sequence)
# write it to the outfile
temp_out_handle.write('>{0}\n{1}\n'.format(seq, sequence))
if sequence_length > options.size:
temp_out_handle.close()
# put tempfile name on stack
chromos.append(temp_out + '[multiple]')
# open a new temp file
temp_fd, temp_out = tempfile.mkstemp(suffix='.fasta')
os.close(temp_fd)
temp_out_handle = open(temp_out, 'w')
# reset sequence length
sequence_length = 0
probes = (options.query,) * len(chromos)
cp = zip(chromos, probes)
# put those record names on the stack
print "Running the targets against %s queries..." % len(chromos)
if options.nprocs == 1:
results = []
for each in cp:
print each
print results
results = SingleProcLastz(each, results)
else:
results = q_runner(options.nprocs, cp, lastz)
outp = open(options.output, 'wb')
print "Writing the results file..."
#pdb.set_trace()
for f in results:
print '\t%s' % f
# read the file
outp.write(open(f, 'rb').read())
# cleanup the lastz output files
os.remove(f)
outp.close()
print 'Cleaning up the chunked files...'
if options.huge:
for f in chromos:
# cleanup the chunked files
os.remove(f.strip('[multiple]'))
# stats
end_time = time.time()
print 'Ended: ', time.strftime("%a %b %d, %Y %H:%M:%S", time.localtime(end_time))
print 'Time for execution: ', (end_time - start_time) / 60, 'minutes'
if __name__ == '__main__':
main()
| bsd-3-clause | 7,238,165,040,286,243,000 | 33.014925 | 129 | 0.6086 | false |
CingHu/neutron-ustack | neutron/plugins/cisco/cfg_agent/device_drivers/devicedriver_api.py | 5 | 5750 | # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Hareesh Puthalath, Cisco Systems, Inc.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class RoutingDriverBase(object):
"""Base class that defines an abstract interface for the Routing Driver.
This class defines the abstract interface/API for the Routing and
NAT related operations. Driver class corresponding to a hosting device
should inherit this base driver and implement its methods.
RouterInfo object (neutron.plugins.cisco.cfg_agent.router_info.RouterInfo)
is a wrapper around the router dictionary, with attributes for easy access
to parameters.
"""
@abc.abstractmethod
def router_added(self, router_info):
"""A logical router was assigned to the hosting device.
:param router_info: RouterInfo object for this router
:return None
"""
pass
@abc.abstractmethod
def router_removed(self, router_info):
"""A logical router was de-assigned from the hosting device.
:param router_info: RouterInfo object for this router
:return None
"""
pass
@abc.abstractmethod
def internal_network_added(self, router_info, port):
"""An internal network was connected to a router.
:param router_info: RouterInfo object for this router
:param port : port dictionary for the port where the internal
network is connected
:return None
"""
pass
@abc.abstractmethod
def internal_network_removed(self, router_info, port):
"""An internal network was removed from a router.
:param router_info: RouterInfo object for this router
:param port : port dictionary for the port where the internal
network was connected
:return None
"""
pass
@abc.abstractmethod
def external_gateway_added(self, router_info, ex_gw_port):
"""An external network was added to a router.
:param router_info: RouterInfo object of the router
:param ex_gw_port : port dictionary for the port where the external
gateway network is connected
:return None
"""
pass
@abc.abstractmethod
def external_gateway_removed(self, router_info, ex_gw_port):
"""An external network was removed from the router.
:param router_info: RouterInfo object of the router
:param ex_gw_port : port dictionary for the port where the external
gateway network was connected
:return None
"""
pass
@abc.abstractmethod
def enable_internal_network_NAT(self, router_info, port, ex_gw_port):
"""Enable NAT on an internal network.
:param router_info: RouterInfo object for this router
:param port : port dictionary for the port where the internal
network is connected
:param ex_gw_port : port dictionary for the port where the external
gateway network is connected
:return None
"""
pass
@abc.abstractmethod
def disable_internal_network_NAT(self, router_info, port, ex_gw_port):
"""Disable NAT on an internal network.
:param router_info: RouterInfo object for this router
:param port : port dictionary for the port where the internal
network is connected
:param ex_gw_port : port dictionary for the port where the external
gateway network is connected
:return None
"""
pass
@abc.abstractmethod
def floating_ip_added(self, router_info, ex_gw_port,
floating_ip, fixed_ip):
"""A floating IP was added.
:param router_info: RouterInfo object for this router
:param ex_gw_port : port dictionary for the port where the external
gateway network is connected
:param floating_ip: Floating IP as a string
:param fixed_ip : Fixed IP of internal internal interface as
a string
:return None
"""
pass
@abc.abstractmethod
def floating_ip_removed(self, router_info, ex_gw_port,
floating_ip, fixed_ip):
"""A floating IP was removed.
:param router_info: RouterInfo object for this router
:param ex_gw_port : port dictionary for the port where the external
gateway network is connected
:param floating_ip: Floating IP as a string
:param fixed_ip: Fixed IP of internal internal interface as a string
:return None
"""
pass
@abc.abstractmethod
def routes_updated(self, router_info, action, route):
"""Routes were updated for router.
:param router_info: RouterInfo object for this router
:param action : Action on the route , either 'replace' or 'delete'
:param route: route dictionary with keys 'destination' & 'next_hop'
:return None
"""
pass
| apache-2.0 | 3,515,366,804,328,147,500 | 34.9375 | 78 | 0.630435 | false |
wavesoft/CCLib | Python/cc_write_flash.py | 1 | 2943 | #!/usr/bin/python
#
# CCLib_proxy Utilities
# Copyright (c) 2014 Ioannis Charalampidis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
from cclib import CCHEXFile, getOptions, openCCDebugger
import sys
# Get serial port either form environment or from arguments
opts = getOptions("Generic CCDebugger Flash Writer Tool", hexIn=True,
erase="Full chip erase before write", offset=":Offset the addresses in the .hex file by this value")
# Open debugger
try:
dbg = openCCDebugger(opts['port'], enterDebug=opts['enter'])
except Exception as e:
print("ERROR: %s" % str(e))
sys.exit(1)
# Get offset
offset = 0
if opts['offset']:
if opts['offset'][0:2] == "0x":
offset = int(opts['offset'], 16)
else:
offset = int(opts['offset'])
print("NOTE: The memory addresses are offset by %i bytes!" % offset)
# Get bluegiga-specific info
serial = dbg.getSerial()
# Parse the HEX file
hexFile = CCHEXFile( opts['in'] )
hexFile.load()
# Display sections & calculate max memory usage
maxMem = 0
print("Sections in %s:\n" % opts['in'])
print(" Addr. Size")
print("-------- -------------")
for mb in hexFile.memBlocks:
# Calculate top position
memTop = mb.addr + mb.size
if memTop > maxMem:
maxMem = memTop
# Print portion
print(" 0x%04x %i B " % (mb.addr + offset, mb.size))
print("")
# Check for oversize data
if maxMem > (dbg.chipInfo['flash'] * 1024):
print("ERROR: Data too bit to fit in chip's memory!")
sys.exit(4)
# Confirm
erasePrompt = "OVERWRITE"
if opts['erase']:
erasePrompt = "ERASE and REPROGRAM"
print("This is going to %s the chip. Are you sure? <y/N>: " % erasePrompt, end=' ')
ans = sys.stdin.readline()[0:-1]
if (ans != "y") and (ans != "Y"):
print("Aborted")
sys.exit(2)
# Flashing messages
print("\nFlashing:")
# Send chip erase
if opts['erase']:
print(" - Chip erase...")
try:
dbg.chipErase()
except Exception as e:
print("ERROR: %s" % str(e))
sys.exit(3)
# Flash memory
dbg.pauseDMA(False)
print(" - Flashing %i memory blocks..." % len(hexFile.memBlocks))
for mb in hexFile.memBlocks:
# Flash memory block
print(" -> 0x%04x : %i bytes " % (mb.addr + offset, mb.size))
try:
dbg.writeCODE( mb.addr + offset, mb.bytes, verify=True, showProgress=True )
except Exception as e:
print("ERROR: %s" % str(e))
sys.exit(3)
# Done
print("\nCompleted")
print("")
| gpl-3.0 | 8,938,638,814,940,296,000 | 26 | 101 | 0.687734 | false |
orioncoin-dev/orioncoin | contrib/testgen/gen_base58_test_vectors.py | 1064 | 4344 | #!/usr/bin/env python
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json
gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
# key types
PUBKEY_ADDRESS = 48
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PRIVKEY = 176
PRIVKEY_TEST = 239
metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)),
((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)),
((PRIVKEY,), 32, (), (True, False, None, False)),
((PRIVKEY,), 32, (1,), (True, False, None, True)),
((PRIVKEY_TEST,), 32, (), (True, True, None, False)),
((PRIVKEY_TEST,), 32, (1,), (True, True, None, True))
]
def is_valid(v):
'''Check vector v for validity'''
result = b58decode_chk(v)
if result is None:
return False
valid = False
for template in templates:
prefix = str(bytearray(template[0]))
suffix = str(bytearray(template[2]))
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return False
def gen_valid_vectors():
'''Generate valid test vectors'''
while True:
for template in templates:
prefix = str(bytearray(template[0]))
payload = os.urandom(template[1])
suffix = str(bytearray(template[2]))
rv = b58encode_chk(prefix + payload + suffix)
assert is_valid(rv)
metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None])
yield (rv, b2a_hex(payload), metadata)
def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = str(bytearray(template[0]))
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = str(bytearray(template[2]))
return b58encode_chk(prefix + payload + suffix)
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
while True:
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
for template in templates:
val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2))
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys, json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| mit | 4,101,568,194,970,254,000 | 33.47619 | 97 | 0.59116 | false |
DepthDeluxe/ansible | lib/ansible/plugins/action/copy.py | 18 | 15161 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import stat
import tempfile
from ansible.constants import mk_boolean as boolean
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
raw = boolean(self._task.args.get('raw', 'no'))
force = boolean(self._task.args.get('force', 'yes'))
remote_src = boolean(self._task.args.get('remote_src', False))
follow = boolean(self._task.args.get('follow', False))
decrypt = boolean(self._task.args.get('decrypt', True))
result['failed'] = True
if (source is None and content is None) or dest is None:
result['msg'] = "src (or content) and dest are required"
elif source is not None and content is not None:
result['msg'] = "src and content are mutually exclusive"
elif content is not None and dest is not None and dest.endswith("/"):
result['msg'] = "dest must be a file if content is defined"
else:
del result['failed']
if result.get('failed'):
return result
# Check if the source ends with a "/"
source_trailing_slash = False
if source:
source_trailing_slash = self._connection._shell.path_has_trailing_slash(source)
# Define content_tempfile in case we set it after finding content populated.
content_tempfile = None
# If content is defined make a temp file and write the content into it.
if content is not None:
try:
# If content comes to us as a dict it should be decoded json.
# We need to encode it back into a string to write it out.
if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
except Exception as err:
result['failed'] = True
result['msg'] = "could not write content temp file: %s" % to_native(err)
return result
# if we have first_available_file in our vars
# look up the files and use the first one we find as src
elif remote_src:
result.update(self._execute_module(task_vars=task_vars))
return result
else: # find in expected paths
try:
source = self._find_needle('files', source)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_text(e)
return result
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
source_files = []
# If source is a directory populate our list else source is a file and translate it to a tuple.
if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
# Get the amount of spaces to remove to get the relative path.
if source_trailing_slash:
sz = len(source)
else:
sz = len(source.rsplit('/', 1)[0]) + 1
# Walk the directory and append the file tuples to source_files.
for base_path, sub_folders, files in os.walk(to_bytes(source), followlinks=True):
for file in files:
full_path = to_text(os.path.join(base_path, file), errors='surrogate_or_strict')
rel_path = full_path[sz:]
if rel_path.startswith('/'):
rel_path = rel_path[1:]
source_files.append((full_path, rel_path))
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - copy module relies on this).
if not self._connection._shell.path_has_trailing_slash(dest):
dest = self._connection._shell.join_path(dest, '')
else:
source_files.append((source, os.path.basename(source)))
changed = False
module_return = dict(changed=False)
# A register for if we executed a module.
# Used to cut down on command calls when not recursive.
module_executed = False
# Tell _execute_module to delete the file if there is one file.
delete_remote_tmp = (len(source_files) == 1)
# If this is a recursive action create a tmp path that we can share as the _exec_module create is too late.
if not delete_remote_tmp:
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
# expand any user home dir specifier
dest = self._remote_expand_user(dest)
# Keep original value for mode parameter
mode_value = self._task.args.get('mode', None)
diffs = []
for source_full, source_rel in source_files:
# If the local file does not exist, get_real_file() raises AnsibleFileNotFound
try:
source_full = self._loader.get_real_file(source_full, decrypt=decrypt)
except AnsibleFileNotFound as e:
result['failed'] = True
result['msg'] = "could not find src=%s, %s" % (source_full, e)
self._remove_tmp_path(tmp)
return result
# Get the local mode and set if user wanted it preserved
# https://github.com/ansible/ansible-modules-core/issues/1124
if self._task.args.get('mode', None) == 'preserve':
lmode = '0%03o' % stat.S_IMODE(os.stat(source_full).st_mode)
self._task.args['mode'] = lmode
# This is kind of optimization - if user told us destination is
# dir, do path manipulation right away, otherwise we still check
# for dest being a dir via remote call below.
if self._connection._shell.path_has_trailing_slash(dest):
dest_file = self._connection._shell.join_path(dest, source_rel)
else:
dest_file = self._connection._shell.join_path(dest)
# Attempt to get remote file info
dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, tmp=tmp, checksum=force)
if dest_status['exists'] and dest_status['isdir']:
# The dest is a directory.
if content is not None:
# If source was defined as content remove the temporary file and fail out.
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._remove_tmp_path(tmp)
result['failed'] = True
result['msg'] = "can not use content with a dir as dest"
return result
else:
# Append the relative source location to the destination and get remote stats again
dest_file = self._connection._shell.join_path(dest, source_rel)
dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, tmp=tmp, checksum=force)
if dest_status['exists'] and not force:
# remote_file exists so continue to next iteration.
continue
# Generate a hash of the local file.
local_checksum = checksum(source_full)
if local_checksum != dest_status['checksum']:
# The checksums don't match and we will change or error out.
changed = True
# Create a tmp path if missing only if this is not recursive.
# If this is recursive we already have a tmp path.
if delete_remote_tmp:
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
if self._play_context.diff and not raw:
diffs.append(self._get_diff_data(dest_file, source_full, task_vars))
if self._play_context.check_mode:
self._remove_tempfile_if_content_defined(content, content_tempfile)
changed = True
module_return = dict(changed=True)
continue
# Define a remote directory that we will copy the file to.
tmp_src = self._connection._shell.join_path(tmp, 'source')
remote_path = None
if not raw:
remote_path = self._transfer_file(source_full, tmp_src)
else:
self._transfer_file(source_full, dest_file)
# We have copied the file remotely and no longer require our content_tempfile
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._loader.cleanup_tmp_file(source_full)
# fix file permissions when the copy is done as a different user
if remote_path:
self._fixup_perms2((tmp, remote_path))
if raw:
# Continue to next iteration if raw is defined.
continue
# Run the copy module
# src and dest here come after original and override them
# we pass dest only to make sure it includes trailing slash in case of recursive copy
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
dest=dest,
original_basename=source_rel,
)
)
# remove action plugin only keys
for key in ('content', 'decrypt'):
if key in new_module_args:
del new_module_args[key]
module_return = self._execute_module(module_name='copy',
module_args=new_module_args, task_vars=task_vars,
tmp=tmp, delete_remote_tmp=delete_remote_tmp)
module_executed = True
else:
# no need to transfer the file, already correct hash, but still need to call
# the file module in case we want to change attributes
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._loader.cleanup_tmp_file(source_full)
if raw:
# Continue to next iteration if raw is defined.
self._remove_tmp_path(tmp)
continue
# Fix for https://github.com/ansible/ansible-modules-core/issues/1568.
# If checksums match, and follow = True, find out if 'dest' is a link. If so,
# change it to point to the source of the link.
if follow:
dest_status_nofollow = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=False)
if dest_status_nofollow['islnk'] and 'lnk_source' in dest_status_nofollow.keys():
dest = dest_status_nofollow['lnk_source']
# Build temporary module_args.
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=source_rel,
dest=dest,
original_basename=source_rel
)
)
# Execute the file module.
module_return = self._execute_module(module_name='file',
module_args=new_module_args, task_vars=task_vars,
tmp=tmp, delete_remote_tmp=delete_remote_tmp)
module_executed = True
if not module_return.get('checksum'):
module_return['checksum'] = local_checksum
if module_return.get('failed'):
result.update(module_return)
if not delete_remote_tmp:
self._remove_tmp_path(tmp)
return result
if module_return.get('changed'):
changed = True
# the file module returns the file path as 'path', but
# the copy module uses 'dest', so add it if it's not there
if 'path' in module_return and 'dest' not in module_return:
module_return['dest'] = module_return['path']
# reset the mode
self._task.args['mode'] = mode_value
# Delete tmp path if we were recursive or if we did not execute a module.
if not delete_remote_tmp or (delete_remote_tmp and not module_executed):
self._remove_tmp_path(tmp)
if module_executed and len(source_files) == 1:
result.update(module_return)
else:
result.update(dict(dest=dest, src=source, changed=changed))
if diffs:
result['diff'] = diffs
return result
def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp()
f = os.fdopen(fd, 'wb')
content = to_bytes(content)
try:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
f.close()
return content_tempfile
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
os.remove(content_tempfile)
| gpl-3.0 | -2,902,739,737,089,247,000 | 42.817919 | 130 | 0.563287 | false |
lowitty/server | libsDarwin/twisted/trial/_dist/test/test_disttrial.py | 10 | 13156 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.trial._dist.disttrial}.
"""
import os
import sys
from cStringIO import StringIO
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.defer import fail, succeed
from twisted.internet.task import Cooperator, deferLater
from twisted.internet.main import CONNECTION_DONE
from twisted.internet import reactor
from twisted.python.failure import Failure
from twisted.python.lockfile import FilesystemLock
from twisted.test.test_cooperator import FakeScheduler
from twisted.trial.unittest import TestCase
from twisted.trial.reporter import Reporter, TreeReporter
from twisted.trial.reporter import UncleanWarningsReporterWrapper
from twisted.trial.runner import TrialSuite, ErrorHolder
from twisted.trial._dist.disttrial import DistTrialRunner
from twisted.trial._dist.distreporter import DistReporter
from twisted.trial._dist.worker import LocalWorker
class FakeTransport(object):
"""
A simple fake process transport.
"""
def writeToChild(self, fd, data):
"""
Ignore write calls.
"""
class FakeReactor(object):
"""
A simple fake reactor for testing purposes.
"""
spawnCount = 0
stopCount = 0
runCount = 0
def spawnProcess(self, worker, *args, **kwargs):
worker.makeConnection(FakeTransport())
self.spawnCount += 1
def stop(self):
self.stopCount += 1
def run(self):
self.runCount += 1
def addSystemEventTrigger(self, *args, **kw):
pass
class EternalTerminationPredicateFactory(object):
"""
A rigged terminationPredicateFactory for which time never pass.
"""
def __call__(self):
"""
See: L{task._Timer}
"""
return False
class DistTrialRunnerTests(TestCase):
"""
Tests for L{DistTrialRunner}.
"""
def setUp(self):
"""
Create a runner for testing.
"""
self.runner = DistTrialRunner(TreeReporter, 4, [],
workingDirectory=self.mktemp())
self.runner._stream = StringIO()
def getFakeSchedulerAndEternalCooperator(self):
"""
Helper to create fake scheduler and cooperator in tests.
The cooperator has a termination timer which will never inform
the scheduler that the task needs to be terminated.
@return: L{tuple} of (scheduler, cooperator)
"""
scheduler = FakeScheduler()
cooperator = Cooperator(
scheduler=scheduler,
terminationPredicateFactory=EternalTerminationPredicateFactory,
)
return scheduler, cooperator
def test_writeResults(self):
"""
L{DistTrialRunner.writeResults} writes to the stream specified in the
init.
"""
stringIO = StringIO()
result = DistReporter(Reporter(stringIO))
self.runner.writeResults(result)
self.assertTrue(stringIO.tell() > 0)
def test_createLocalWorkers(self):
"""
C{createLocalWorkers} iterates the list of protocols and create one
L{LocalWorker} for each.
"""
protocols = [object() for x in xrange(4)]
workers = self.runner.createLocalWorkers(protocols, "path")
for s in workers:
self.assertIsInstance(s, LocalWorker)
self.assertEqual(4, len(workers))
def test_launchWorkerProcesses(self):
"""
Given a C{spawnProcess} function, C{launchWorkerProcess} launches a
python process with a existing path as its argument.
"""
protocols = [ProcessProtocol() for i in range(4)]
arguments = []
environment = {}
def fakeSpawnProcess(processProtocol, executable, args=(), env={},
path=None, uid=None, gid=None, usePTY=0,
childFDs=None):
arguments.append(executable)
arguments.extend(args)
environment.update(env)
self.runner.launchWorkerProcesses(
fakeSpawnProcess, protocols, ["foo"])
self.assertEqual(arguments[0], arguments[1])
self.assertTrue(os.path.exists(arguments[2]))
self.assertEqual("foo", arguments[3])
self.assertEqual(os.pathsep.join(sys.path),
environment["TRIAL_PYTHONPATH"])
def test_run(self):
"""
C{run} starts the reactor exactly once and spawns each of the workers
exactly once.
"""
fakeReactor = FakeReactor()
suite = TrialSuite()
for i in xrange(10):
suite.addTest(TestCase())
self.runner.run(suite, fakeReactor)
self.assertEqual(fakeReactor.runCount, 1)
self.assertEqual(fakeReactor.spawnCount, self.runner._workerNumber)
def test_runUsedDirectory(self):
"""
L{DistTrialRunner} checks if the test directory is already locked, and
if it is generates a name based on it.
"""
class FakeReactorWithLock(FakeReactor):
def spawnProcess(oself, worker, *args, **kwargs):
self.assertEqual(os.path.abspath(worker._logDirectory),
os.path.abspath(
os.path.join(workingDirectory + "-1",
str(oself.spawnCount))))
localLock = FilesystemLock(workingDirectory + "-1.lock")
self.assertFalse(localLock.lock())
oself.spawnCount += 1
worker.makeConnection(FakeTransport())
worker._ampProtocol.run = lambda *args: succeed(None)
newDirectory = self.mktemp()
os.mkdir(newDirectory)
workingDirectory = os.path.join(newDirectory, "_trial_temp")
lock = FilesystemLock(workingDirectory + ".lock")
lock.lock()
self.addCleanup(lock.unlock)
self.runner._workingDirectory = workingDirectory
fakeReactor = FakeReactorWithLock()
suite = TrialSuite()
for i in xrange(10):
suite.addTest(TestCase())
self.runner.run(suite, fakeReactor)
def test_minimalWorker(self):
"""
L{DistTrialRunner} doesn't try to start more workers than the number of
tests.
"""
fakeReactor = FakeReactor()
self.runner.run(TestCase(), fakeReactor)
self.assertEqual(fakeReactor.runCount, 1)
self.assertEqual(fakeReactor.spawnCount, 1)
def test_runUncleanWarnings(self):
"""
Running with the C{unclean-warnings} option makes L{DistTrialRunner}
uses the L{UncleanWarningsReporterWrapper}.
"""
fakeReactor = FakeReactor()
self.runner._uncleanWarnings = True
result = self.runner.run(TestCase(), fakeReactor)
self.assertIsInstance(result, DistReporter)
self.assertIsInstance(result.original,
UncleanWarningsReporterWrapper)
def test_runWithoutTest(self):
"""
When the suite contains no test, L{DistTrialRunner} takes a shortcut
path without launching any process or starting the reactor.
"""
fakeReactor = object()
suite = TrialSuite()
result = self.runner.run(suite, fakeReactor)
self.assertIsInstance(result, DistReporter)
output = self.runner._stream.getvalue()
self.assertIn("Running 0 test", output)
self.assertIn("PASSED", output)
def test_runWithoutTestButWithAnError(self):
"""
Even if there is no test, the suite can contain an error (most likely,
an import error): this should make the run fail, and the error should
be printed.
"""
fakeReactor = object()
error = ErrorHolder("an error", Failure(RuntimeError("foo bar")))
result = self.runner.run(error, fakeReactor)
self.assertIsInstance(result, DistReporter)
output = self.runner._stream.getvalue()
self.assertIn("Running 0 test", output)
self.assertIn("foo bar", output)
self.assertIn("an error", output)
self.assertIn("errors=1", output)
self.assertIn("FAILED", output)
def test_runUnexpectedError(self):
"""
If for some reasons we can't connect to the worker process, the test
suite catches and fails.
"""
class FakeReactorWithFail(FakeReactor):
def spawnProcess(self, worker, *args, **kwargs):
worker.makeConnection(FakeTransport())
self.spawnCount += 1
worker._ampProtocol.run = self.failingRun
def failingRun(self, case, result):
return fail(RuntimeError("oops"))
scheduler, cooperator = self.getFakeSchedulerAndEternalCooperator()
fakeReactor = FakeReactorWithFail()
result = self.runner.run(TestCase(), fakeReactor,
cooperator.cooperate)
self.assertEqual(fakeReactor.runCount, 1)
self.assertEqual(fakeReactor.spawnCount, 1)
scheduler.pump()
self.assertEqual(1, len(result.original.failures))
def test_runStopAfterTests(self):
"""
L{DistTrialRunner} calls C{reactor.stop} and unlocks the test directory
once the tests have run.
"""
functions = []
class FakeReactorWithSuccess(FakeReactor):
def spawnProcess(self, worker, *args, **kwargs):
worker.makeConnection(FakeTransport())
self.spawnCount += 1
worker._ampProtocol.run = self.succeedingRun
def succeedingRun(self, case, result):
return succeed(None)
def addSystemEventTrigger(oself, phase, event, function):
self.assertEqual('before', phase)
self.assertEqual('shutdown', event)
functions.append(function)
workingDirectory = self.runner._workingDirectory
fakeReactor = FakeReactorWithSuccess()
self.runner.run(TestCase(), fakeReactor)
def check():
localLock = FilesystemLock(workingDirectory + ".lock")
self.assertTrue(localLock.lock())
self.assertEqual(1, fakeReactor.stopCount)
# We don't wait for the process deferreds here, so nothing is
# returned by the function before shutdown
self.assertIdentical(None, functions[0]())
return deferLater(reactor, 0, check)
def test_runWaitForProcessesDeferreds(self):
"""
L{DistTrialRunner} waits for the worker processes to stop when the
reactor is stopping, and then unlocks the test directory, not trying to
stop the reactor again.
"""
functions = []
workers = []
class FakeReactorWithEvent(FakeReactor):
def spawnProcess(self, worker, *args, **kwargs):
worker.makeConnection(FakeTransport())
workers.append(worker)
def addSystemEventTrigger(oself, phase, event, function):
self.assertEqual('before', phase)
self.assertEqual('shutdown', event)
functions.append(function)
workingDirectory = self.runner._workingDirectory
fakeReactor = FakeReactorWithEvent()
self.runner.run(TestCase(), fakeReactor)
def check(ign):
# Let the AMP deferreds fire
return deferLater(reactor, 0, realCheck)
def realCheck():
localLock = FilesystemLock(workingDirectory + ".lock")
self.assertTrue(localLock.lock())
# Stop is not called, as it ought to have been called before
self.assertEqual(0, fakeReactor.stopCount)
workers[0].processEnded(Failure(CONNECTION_DONE))
return functions[0]().addCallback(check)
def test_runUntilFailure(self):
"""
L{DistTrialRunner} can run in C{untilFailure} mode where it will run
the given tests until they fail.
"""
called = []
class FakeReactorWithSuccess(FakeReactor):
def spawnProcess(self, worker, *args, **kwargs):
worker.makeConnection(FakeTransport())
self.spawnCount += 1
worker._ampProtocol.run = self.succeedingRun
def succeedingRun(self, case, result):
called.append(None)
if len(called) == 5:
return fail(RuntimeError("oops"))
return succeed(None)
fakeReactor = FakeReactorWithSuccess()
scheduler, cooperator = self.getFakeSchedulerAndEternalCooperator()
result = self.runner.run(
TestCase(), fakeReactor, cooperate=cooperator.cooperate,
untilFailure=True)
scheduler.pump()
self.assertEqual(5, len(called))
self.assertFalse(result.wasSuccessful())
output = self.runner._stream.getvalue()
self.assertIn("PASSED", output)
self.assertIn("FAIL", output)
| mit | 2,705,487,625,181,921,000 | 31.645161 | 79 | 0.616221 | false |
rxuriguera/bibtexIndexMaker | src/bibim/references/format/formatter.py | 1 | 1717 |
# Copyright 2010 Ramon Xuriguera
#
# This file is part of BibtexIndexMaker.
#
# BibtexIndexMaker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BibtexIndexMaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BibtexIndexMaker. If not, see <http://www.gnu.org/licenses/>.
class ReferenceFormatter(object):
def __init__(self):
pass
def format_reference(self, reference, format_generator):
"""
Sets the 'entry' attribute of 'reference'
"""
format_generator.setup_new_reference()
format_generator.generate_header()
fields = reference.get_fields()
for field in fields:
field = reference.get_field(field)
if not field.value:
continue
generate_method = 'generate_' + field.name
try:
generate_method = getattr(format_generator, generate_method)
generate_method(field.value)
except AttributeError:
format_generator.generate_default(field.name, field.value)
format_generator.generate_footer()
reference.entry = format_generator.get_generated_reference()
reference.format = format_generator.format
| gpl-3.0 | 2,981,039,575,606,235,600 | 34.040816 | 76 | 0.655213 | false |
bgilbert/scanvark | scanvark/config.py | 1 | 1818 | #
# Scanvark -- a Gtk-based batch scanning program
#
# Copyright (c) 2012 Benjamin Gilbert
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division
import yaml
class ScanvarkConfig(object):
def __init__(self, conffile):
with open(conffile) as fh:
config = yaml.safe_load(fh)
self.device = config['device']
self.device_config = config.get('scan-settings', {})
self.source_single = config.get('single-source', None)
self.source_double = config.get('double-source', None)
self.prepend_new_pages = config.get('page-order') == 'reverse'
def get_rotation(key):
val = config.get('rotate', 0)
return config.get(key, val)
self.rotate_odd = get_rotation('rotate-odd')
self.rotate_even = get_rotation('rotate-even')
self.jpeg_quality = config.get('jpeg-quality', 95)
self.thumbnail_size = config.get('thumbnail-size', (200, 150))
defaults = config.get('defaults', {})
self.default_color = defaults.get('color', True)
self.default_double_sided = defaults.get('double-sided', False)
self.default_resolution = defaults.get('resolution', 150)
| gpl-2.0 | 6,266,500,709,855,719,000 | 36.875 | 73 | 0.676018 | false |
LingJiJian/LangTransUtil | lk.py | 1 | 4280 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author B-y [email protected]
import os
import re
import json
import sys
class Singleton(type):
"""docstring for Singleton"""
def __init__(self, name,bases,dic):
super(Singleton, self).__init__(name,bases,dic)
self.instance = None
def __call__(self,*args,**kwargs):
if self.instance is None:
self.instance = super(Singleton,self).__call__(*args,**kwargs)
return self.instance
class FileManager:
""" 文件管理器 """
__metaclass__ = Singleton
def __init__(self):
super(FileManager, self).__init__()
self.__scanFilePaths = {} #文件内容
self.__tranWordArrs = {} #匹配到的 字符串
self.__tranWordDic = {} #匹配到的 字符串
def setInDir(self,path):
self.__inDir = path
def setOutDir(self,path):
self.__outDir = path
def setLogCallFuc(self,func):
self.__logCallFunc = func
def run(self):
self.__preload()
self.__scanInDir(self.__inDir)
self.__progressFiles()
self.__exportFile()
#预加载 配置
def __preload(self):
path = sys.path[0]
if os.path.isfile(path):
path = os.path.dirname(path)
pFile = open(os.path.join(path,"config.json"),"r")
self.__config = json.loads(pFile.read())
keyArr = []
valArr = []
self._tmpkeyValFlag = False
if os.path.exists(self.__outDir):
def onHandle(tmpStr):
if self._tmpkeyValFlag:
valArr.append(tmpStr);
else:
keyArr.append(tmpStr);
self._tmpkeyValFlag = not self._tmpkeyValFlag;
pFile = open(self.__outDir,"r")
self.__scanWordInContent(pFile.read(),onHandle)
for i,v in enumerate(keyArr):
self.__tranWordDic[ keyArr[i] ] = valArr[i]
else:
self.__tranWordDic = {}
#扫描目录
def __scanInDir(self,path):
arr = os.listdir(path)
for line in arr:
if self.__isIgnoreScan(line):
pass
else:
filepath = os.path.join(path,line)
if os.path.isdir(filepath):
self.__scanInDir(filepath)
else:
if os.path.splitext(filepath)[1] in self.__config["scan_suffix"]:
pFile = open(filepath,"r")
try:
self.__scanFilePaths[filepath] = pFile.read()
self.__tranWordArrs[filepath] = []
finally:
pFile.close()
#执行扫描行为
def __progressFiles(self):
for path,content in self.__scanFilePaths.items():
def onHandle(tmpStr):
if self.has_zh(tmpStr.decode('utf-8')):
key = "\"a"+self.__getWordIdx()+"\"";
if not self.__tranWordDic.has_key(key) :
self.__tranWordDic[ key ] = tmpStr
self.__tranWordArrs[path].append({"key":key,"val":tmpStr})
self.__scanWordInContent(content,onHandle)
self.__logCallFunc({"isFinish":True})
#在文件内容中扫描中文
def __scanWordInContent(self,content,func):
tmpStr = ""
markFlag = False
for i,ch in enumerate(content):
if ch == "\"":
if content[i-1] == "\\":
if markFlag:
tmpStr += "\""
continue;
markFlag = not markFlag;
if markFlag == False :
tmpStr += "\""
func(tmpStr)
tmpStr = ""
if markFlag :
tmpStr += ch
def has_zh(self,txt):
zhPattern = re.compile(u'[\u4e00-\u9fa5]+')
ret = False
if zhPattern.search(txt):
ret = True
else:
ret = False
return ret
#是否忽略扫描的文件
def __isIgnoreScan(self,path):
ret = False
for ignore_path in self.__config["ignore_path"]:
# print(os.path.join(self.__inDir,ignore_path), os.path.join(self.__inDir,path))
if os.path.join(self.__inDir,ignore_path) == os.path.join(self.__inDir,path):
ret = True
break
return ret
def __getWordIdx(self):
idx = 10000;
while True:
if self.__tranWordDic.has_key("\"a"+str(idx)+"\""):
idx += 1
continue;
else:
return str(idx);
#输出文件
def __exportFile(self):
content = "i18n = {} \n";
for k,v in self.__tranWordDic.items():
content += "i18n[" + k + "] = " + self.__tranWordDic[k] + "\n";
pFile = open(self.__outDir,"w")
pFile.write(content)
pFile.close()
for path,content in self.__scanFilePaths.items():
if len(self.__tranWordArrs[path]) > 0 :
for param in self.__tranWordArrs[path]:
content = content.replace(param.get("val"),"i18n["+param.get("key")+"]")
self.__scanFilePaths[path] = content
pFile = open(path,"w")
pFile.write(content)
pFile.close()
| mit | 7,191,918,083,643,452,000 | 23.60355 | 83 | 0.619769 | false |
leethargo/geonet | geonet/network.py | 1 | 3958 | '''
Data structures for (Steiner) tree networks
'''
import networkx as nx
class Net(object):
'''Network'''
def __init__(self, nodes, arcs):
'''
nodes: node IDs
arcs: tuples of node IDs (tail, head)
'''
self.dg = nx.DiGraph()
self.dg.add_nodes_from(nodes)
self.dg.add_edges_from(arcs)
def get_nodes(self):
return self.dg.nodes()
def get_arcs(self):
return self.dg.edges()
def get_degree(self, n):
return self.dg.degree(n)
def get_neighbors(self, n):
return self.dg.predecessors(n) + self.dg.successors(n)
def __repr__(self):
_nodes = ', '.join([repr(n) for n in self.get_nodes()])
_arcs = ', '.join([repr(a) for a in self.get_arcs()])
return 'Net([%s], [%s])' % (_nodes, _arcs)
# http://stackoverflow.com/questions/390250/
def __eq__(self, other):
if isinstance(other, self.__class__):
# unfortunately, networkx.DiGraph does not implement __eq__
return all([
other.dg.node == self.dg.node,
other.dg.edge == self.dg.edge,
])
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class SteinerTree(Net):
'''Steiner tree with some node positions fixed'''
def __init__(self, nodes, arcs, pos):
'''
nodes: node IDs
arcs: tuples of node IDs (tail, head)
pos: map from (terminal) node IDs to position tuple
'''
super(SteinerTree, self).__init__(nodes, arcs)
for k,v in pos.items():
self.dg.node[k]['pos'] = v
def is_steiner(self, n):
return not self.is_terminal(n)
def is_terminal(self, n):
return 'pos' in self.dg.node[n]
def get_terminal_nodes(self):
return [n for n in self.get_nodes() if self.is_terminal(n)]
def get_steiner_nodes(self):
return [n for n in self.get_nodes() if self.is_steiner(n)]
def get_position(self, t):
if not self.is_terminal(t):
raise KeyError("Not a terminal: %s" % t)
return self.dg.node[t]['pos']
def get_terminal_positions(self):
return {t: self.get_position(t) for t in self.get_terminal_nodes()}
def is_full_steiner_topology(self):
'''or is the tree degenerate?
three criteria are applied:
1. number of Steiner nodes equals the number of terminals - 2
2. Steiner nodes have degree 3
3. Terminals have degree 1 and are connected to Steiner nodes
'''
terms = self.get_terminal_nodes()
steins = self.get_steiner_nodes()
# special cases for n < 3
if len(terms) < 3 and len(steins) == 0:
return True
# general case
if len(steins) != len(terms) - 2:
return False
if any(self.get_degree(s) != 3 for s in steins):
return False
if any(self.get_degree(t) != 1 for t in terms):
return False
for t in terms:
neighbors = self.get_neighbors(t)
assert len(neighbors) == 1
n = neighbors[0]
if self.is_terminal(n):
return False
return True
def __repr__(self):
_nodes = ', '.join([repr(n) for n in self.get_nodes()])
_arcs = ', '.join([repr(a) for a in self.get_arcs()])
_pos = ', '.join('%s:%s' % (t, self.get_position(t))
for t in self.get_terminal_nodes())
return 'SteinerTree([%s], [%s], {%s})' % (_nodes, _arcs, _pos)
def __eq__(self, other):
return super(SteinerTree, self).__eq__(other) and \
other.get_terminal_positions() == self.get_terminal_positions()
def merge_pos(tree, steiner_pos):
'''build dict as union from terminal and steiner positions'''
pos = dict(tree.get_terminal_positions())
pos.update(steiner_pos)
return pos
| mit | 1,535,741,604,945,658,400 | 29.921875 | 75 | 0.54952 | false |
chiviak/headphones | lib/unidecode/x021.py | 62 | 3964 | data = (
'', # 0x00
'', # 0x01
'C', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'H', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'N', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'P', # 0x19
'Q', # 0x1a
'', # 0x1b
'', # 0x1c
'R', # 0x1d
'', # 0x1e
'', # 0x1f
'(sm)', # 0x20
'TEL', # 0x21
'(tm)', # 0x22
'', # 0x23
'Z', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'K', # 0x2a
'A', # 0x2b
'', # 0x2c
'', # 0x2d
'e', # 0x2e
'e', # 0x2f
'E', # 0x30
'F', # 0x31
'F', # 0x32
'M', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'FAX', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'D', # 0x45
'd', # 0x46
'e', # 0x47
'i', # 0x48
'j', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'F', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
' 1/3 ', # 0x53
' 2/3 ', # 0x54
' 1/5 ', # 0x55
' 2/5 ', # 0x56
' 3/5 ', # 0x57
' 4/5 ', # 0x58
' 1/6 ', # 0x59
' 5/6 ', # 0x5a
' 1/8 ', # 0x5b
' 3/8 ', # 0x5c
' 5/8 ', # 0x5d
' 7/8 ', # 0x5e
' 1/', # 0x5f
'I', # 0x60
'II', # 0x61
'III', # 0x62
'IV', # 0x63
'V', # 0x64
'VI', # 0x65
'VII', # 0x66
'VIII', # 0x67
'IX', # 0x68
'X', # 0x69
'XI', # 0x6a
'XII', # 0x6b
'L', # 0x6c
'C', # 0x6d
'D', # 0x6e
'M', # 0x6f
'i', # 0x70
'ii', # 0x71
'iii', # 0x72
'iv', # 0x73
'v', # 0x74
'vi', # 0x75
'vii', # 0x76
'viii', # 0x77
'ix', # 0x78
'x', # 0x79
'xi', # 0x7a
'xii', # 0x7b
'l', # 0x7c
'c', # 0x7d
'd', # 0x7e
'm', # 0x7f
'(D', # 0x80
'D)', # 0x81
'((|))', # 0x82
')', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'-', # 0x90
'|', # 0x91
'-', # 0x92
'|', # 0x93
'-', # 0x94
'|', # 0x95
'\\', # 0x96
'/', # 0x97
'\\', # 0x98
'/', # 0x99
'-', # 0x9a
'-', # 0x9b
'~', # 0x9c
'~', # 0x9d
'-', # 0x9e
'|', # 0x9f
'-', # 0xa0
'|', # 0xa1
'-', # 0xa2
'-', # 0xa3
'-', # 0xa4
'|', # 0xa5
'-', # 0xa6
'|', # 0xa7
'|', # 0xa8
'-', # 0xa9
'-', # 0xaa
'-', # 0xab
'-', # 0xac
'-', # 0xad
'-', # 0xae
'|', # 0xaf
'|', # 0xb0
'|', # 0xb1
'|', # 0xb2
'|', # 0xb3
'|', # 0xb4
'|', # 0xb5
'^', # 0xb6
'V', # 0xb7
'\\', # 0xb8
'=', # 0xb9
'V', # 0xba
'^', # 0xbb
'-', # 0xbc
'-', # 0xbd
'|', # 0xbe
'|', # 0xbf
'-', # 0xc0
'-', # 0xc1
'|', # 0xc2
'|', # 0xc3
'=', # 0xc4
'|', # 0xc5
'=', # 0xc6
'=', # 0xc7
'|', # 0xc8
'=', # 0xc9
'|', # 0xca
'=', # 0xcb
'=', # 0xcc
'=', # 0xcd
'=', # 0xce
'=', # 0xcf
'=', # 0xd0
'|', # 0xd1
'=', # 0xd2
'|', # 0xd3
'=', # 0xd4
'|', # 0xd5
'\\', # 0xd6
'/', # 0xd7
'\\', # 0xd8
'/', # 0xd9
'=', # 0xda
'=', # 0xdb
'~', # 0xdc
'~', # 0xdd
'|', # 0xde
'|', # 0xdf
'-', # 0xe0
'|', # 0xe1
'-', # 0xe2
'|', # 0xe3
'-', # 0xe4
'-', # 0xe5
'-', # 0xe6
'|', # 0xe7
'-', # 0xe8
'|', # 0xe9
'|', # 0xea
'|', # 0xeb
'|', # 0xec
'|', # 0xed
'|', # 0xee
'|', # 0xef
'-', # 0xf0
'\\', # 0xf1
'\\', # 0xf2
'|', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-3.0 | -2,967,146,598,459,825,700 | 14.424125 | 18 | 0.288597 | false |
dav94/plastex | plasTeX/Packages/float.py | 8 | 1426 | #!/usr/bin/env python
import new
from plasTeX import Command, Environment
from plasTeX.Base.LaTeX.Floats import Float, Caption
class newfloat(Command):
args = 'name:str pos:str capfile:str [ reset:str ]'
def invoke(self, tex):
Command.invoke(self, tex)
name = str(self.attributes['name'])
# Create the float class and the caption class
floatcls = new.classobj(name, (Float,), {})
captioncls = new.classobj('caption', (Caption,),
{'macroName':'caption', 'counter':name})
floatcls.caption = captioncls
c = self.ownerDocument.context
c.addGlobal(name, floatcls)
# Create a counter
resetby = self.attributes['reset'] or 'chapter'
c.newcounter(name, resetby, 0, format='${the%s}.${%s}' % (resetby,name))
# Create the float name macro
c.newcommand(name+'name', 0, name)
class floatstyle(Command):
args = 'style:str'
class restylefloat(Command):
args = 'float:str'
class floatname(Command):
args = 'float:str name:str'
def invoke(self, tex):
Command.invoke(self, tex)
float = str(self.attributes['float'])
name = self.attributes['name']
c = self.ownerDocument.context
c.newcommand(float+'name', 0, name)
class floatplacement(Command):
args = 'float:str pos:str'
class listof(Command):
args = 'float:str title'
| mit | -2,980,041,299,201,317,000 | 28.708333 | 80 | 0.619215 | false |
fengzhyuan/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause | 8,661,456,473,112,505,000 | 29.101266 | 74 | 0.598402 | false |
tuomas777/parkkihubi | parkings/tests/api/utils.py | 1 | 3212 | import json
import uuid
from rest_framework.authtoken.models import Token
ALL_METHODS = ('get', 'post', 'put', 'patch', 'delete')
def token_authenticate(api_client, user):
token, _ = Token.objects.get_or_create(user=user)
api_client.credentials(HTTP_AUTHORIZATION='ApiKey ' + token.key)
return api_client
def get(api_client, url, status_code=200):
response = api_client.get(url)
assert response.status_code == status_code, '%s %s' % (response.status_code, response.data)
return json.loads(response.content.decode('utf-8'))
def post(api_client, url, data=None, status_code=201):
response = api_client.post(url, data)
assert response.status_code == status_code, '%s %s' % (response.status_code, response.data)
return json.loads(response.content.decode('utf-8'))
def put(api_client, url, data=None, status_code=200):
response = api_client.put(url, data)
assert response.status_code == status_code, '%s %s' % (response.status_code, response.data)
return json.loads(response.content.decode('utf-8'))
def patch(api_client, url, data=None, status_code=200):
response = api_client.patch(url, data)
assert response.status_code == status_code, '%s %s' % (response.status_code, response.data)
return json.loads(response.content.decode('utf-8'))
def delete(api_client, url, status_code=204):
response = api_client.delete(url)
assert response.status_code == status_code, '%s %s' % (response.status_code, response.data)
def check_method_status_codes(api_client, urls, methods, status_code, **kwargs):
# accept also a single url as a string
if isinstance(urls, str):
urls = (urls,)
for url in urls:
for method in methods:
response = getattr(api_client, method)(url)
assert response.status_code == status_code, (
'%s %s expected %s, got %s %s' % (method, url, status_code, response.status_code, response.data)
)
error_code = kwargs.get('error_code')
if error_code:
assert response.data['code'] == error_code, (
'%s %s expected error_code %s, got %s' % (method, url, error_code, response.data['code'])
)
def check_list_endpoint_base_fields(data):
assert set(data.keys()) == {'next', 'previous', 'count', 'results'}
def check_required_fields(api_client, url, expected_required_fields, detail_endpoint=False):
method = put if detail_endpoint else post
# send empty data to get all required fields in an error message, they will be in form
# { "<field name>": ["This field is required"], "<field name 2>": ["This field is required"], ...}
response_data = method(api_client, url, {}, 400)
required_fields = set()
for field in response_data:
if isinstance(response_data[field], list) and 'This field is required.' in response_data[field]:
required_fields.add(field)
assert required_fields == expected_required_fields, '%s != %s' % (required_fields, expected_required_fields)
def get_ids_from_results(results, as_set=True):
id_list = [uuid.UUID(result['id']) for result in results]
return set(id_list) if as_set else id_list
| mit | -7,520,562,445,394,078,000 | 37.698795 | 112 | 0.655666 | false |
michaelhush/M-LOOP | docs/conf.py | 1 | 10253 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# M-LOOP documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 24 11:34:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax'
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_include_private_with_doc = True
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'M-LOOP'
copyright = '2016, Michael R Hush'
author = 'Michael R Hush'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.2'
# The full version, including alpha/beta/rc tags.
release = '3.2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
# exclude_patterns = ['_templates']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Custom sidebar templates, maps document names to template names.
html_sidebars = { '**': ['about.html','navigation.html','relations.html', 'searchbox.html'], }
#'globaltoc.html',
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'logo':'M-LOOP_logo.png',
'logo_name':True,
'description':'Machine-Learning Online Optimization Package',
'github_user':'michaelhush',
'github_repo':'M-LOOP',
'github_banner':True,
'font_family':"Arial, Helvetica, sans-serif",
'head_font_family':"Arial, Helvetica, sans-serif",
'analytics_id':'UA-83520804-1'}
#'github_button':True,
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'M-LOOP v3.2.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '_static/M-LOOP_logo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/M-LOOP_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'M-LOOPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'M-LOOP.tex', 'M-LOOP Documentation',
'Michael R Hush', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = 'M-LOOP_logo.pdf'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'm-loop', 'M-LOOP Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'M-LOOP', 'M-LOOP Documentation',
author, 'M-LOOP', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -4,014,117,061,047,745,000 | 31.549206 | 95 | 0.7075 | false |
rouxcode/django-admin-sort | admin_sort/tests/testapp/models.py | 1 | 5140 | # -*- coding: utf-8 -*-
from django.db import models
from admin_sort.models import SortableModelMixin
class Author(SortableModelMixin, models.Model):
"""
SortableModelMixin: on save, intercept and first update needed other
instances, then save
"""
name = models.CharField('Name', null=True, blank=True, max_length=255)
my_order = models.PositiveIntegerField(default=0, blank=False, null=False)
position_field = 'my_order'
insert_position = 'last'
class Meta:
ordering = ('my_order', )
def __unicode__(self):
return self.name
class SortableBook(models.Model):
"""
the classic sortable change list: dndrop sorting, using SortableAdminMixin
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
my_order = models.PositiveIntegerField(default=0, blank=False, null=False)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
class Meta(object):
ordering = ('my_order',)
def __unicode__(self):
return self.title
class AnotherSortableBook(models.Model):
"""
the other sortable change list: dropdowns sorting,
using DropdownSortableAdminMixin
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
my_order = models.PositiveIntegerField(default=0, blank=False, null=False)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
class Meta(object):
ordering = ('my_order',)
def __unicode__(self):
return self.title
class Chapter(models.Model):
"""
various SortableInlineMixon modes
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
another_book = models.ForeignKey(
AnotherSortableBook, null=True, on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
another_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', 'another_order', )
def __unicode__(self):
return 'Chapter: {0}'.format(self.title)
class Notes(models.Model):
"""
various SortableInlineMixon modes
"""
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
another_book = models.ForeignKey(
AnotherSortableBook, null=True, on_delete=models.SET_NULL)
note = models.CharField('Note', null=True, blank=True, max_length=255)
another_field = models.CharField(
'Note2', null=True, blank=True, max_length=255)
one_more = models.CharField(
'Note3 (simulating tabular inlines)',
null=True, blank=True, max_length=255)
my_order = models.PositiveIntegerField(blank=False, null=True)
another_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', 'another_order', )
def __unicode__(self):
return 'Note: {0}'.format(self.note)
class ChapterExtraZero(models.Model):
"""
various SortableInlineMixon modes (testing "extra" on admin.Meta)
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', '-title')
def __unicode__(self):
return 'ChapterExtraZero: {0}'.format(self.title)
class NotesExtraZero(models.Model):
"""
various SortableInlineMixon modes (testing "extra" on admin.Meta)
"""
another_field = models.CharField(
'Note2', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', 'another_field')
def __unicode__(self):
return 'NotesExtraZero: {0}'.format(self.another_field)
class Another(models.Model):
"""
normal inline - affected in any way!?
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', '-title')
def __unicode__(self):
return 'Another: {0}'.format(self.title)
class AnotherOne(models.Model):
"""
normal inline - affected in any way!?
"""
another_field = models.CharField(
'Note2', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
def __unicode__(self):
return 'AnotherOne: {0}'.format(self.another_field)
| mit | -3,920,569,112,322,875,400 | 31.531646 | 78 | 0.647665 | false |
ngoix/OCRF | sklearn/linear_model/ransac.py | 14 | 17163 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
from ..utils.validation import has_fit_parameter
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
NOTE: residual_metric is deprecated from 0.18 and will be removed in 0.20
Use ``loss`` instead.
loss: string, callable, optional, default "absolute_loss"
String inputs, "absolute_loss" and "squared_loss" are supported which
find the absolute loss and squared loss per sample
respectively.
If ``loss`` is a callable, then it should be a function that takes
two arrays as inputs, the true and predicted value and returns a 1-D
array with the ``i``th value of the array corresponding to the loss
on `X[i]`.
If the loss on a sample is greater than the ``residual_threshold``, then
this sample is classified as an outlier.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
loss='absolute_loss', random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
sample_weight: array-like, shape = [n_samples]
Individual weights for each sample
raises error if sample_weight is passed and base_estimator
fit method does not support it.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is not None:
warnings.warn(
"'residual_metric' will be removed in version 0.20. Use "
"'loss' instead.", DeprecationWarning)
if self.loss == "absolute_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda \
y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
elif self.loss == "squared_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda \
y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
elif callable(self.loss):
loss_function = self.loss
else:
raise ValueError(
"loss should be 'absolute_loss', 'squared_loss' or a callable."
"Got %s. " % self.loss)
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,
"sample_weight")
estimator_name = type(base_estimator).__name__
if (sample_weight is not None and not
estimator_fit_has_sample_weight):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
if sample_weight is None:
base_estimator.fit(X_subset, y_subset)
else:
base_estimator.fit(X_subset, y_subset,
sample_weight=sample_weight[subset_idxs])
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
# XXX: Deprecation: Remove this if block in 0.20
if self.residual_metric is not None:
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = self.residual_metric(diff)
else:
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause | 4,517,217,463,929,067,500 | 37.222717 | 81 | 0.590782 | false |
openhatch/new-mini-tasks | vendor/packages/Django/django/core/management/sql.py | 104 | 7942 | from __future__ import unicode_literals
import codecs
import os
import re
from django.conf import settings
from django.core.management.base import CommandError
from django.db import models
from django.db.models import get_models
from django.utils._os import upath
def sql_create(app, style, connection):
"Returns a list of the CREATE TABLE SQL statements for the given app."
if connection.settings_dict['ENGINE'] == 'django.db.backends.dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set ENGINE for the database.
raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" +
"because you haven't properly specified the ENGINE setting for the database.\n" +
"see: https://docs.djangoproject.com/en/dev/ref/settings/#databases")
# Get installed models, so we generate REFERENCES right.
# We trim models from the current app so that the sqlreset command does not
# generate invalid SQL (leaving models out of known_models is harmless, so
# we can be conservative).
app_models = models.get_models(app, include_auto_created=True)
final_output = []
tables = connection.introspection.table_names()
known_models = set([model for model in connection.introspection.installed_models(tables) if model not in app_models])
pending_references = {}
for model in app_models:
output, references = connection.creation.sql_create_model(model, style, known_models)
final_output.extend(output)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in known_models:
final_output.extend(connection.creation.sql_for_pending_references(refto, style, pending_references))
final_output.extend(connection.creation.sql_for_pending_references(model, style, pending_references))
# Keep track of the fact that we've created the table for this model.
known_models.add(model)
# Handle references to tables that are from other apps
# but don't exist physically.
not_installed_models = set(pending_references.keys())
if not_installed_models:
alter_sql = []
for model in not_installed_models:
alter_sql.extend(['-- ' + sql for sql in
connection.creation.sql_for_pending_references(model, style, pending_references)])
if alter_sql:
final_output.append('-- The following references should be added but depend on non-existent tables:')
final_output.extend(alter_sql)
return final_output
def sql_delete(app, style, connection):
"Returns a list of the DROP TABLE SQL statements for the given app."
# This should work even if a connection isn't available
try:
cursor = connection.cursor()
except:
cursor = None
# Figure out which tables already exist
if cursor:
table_names = connection.introspection.table_names(cursor)
else:
table_names = []
output = []
# Output DROP TABLE statements for standard application tables.
to_delete = set()
references_to_delete = {}
app_models = models.get_models(app, include_auto_created=True)
for model in app_models:
if cursor and connection.introspection.table_name_converter(model._meta.db_table) in table_names:
# The table exists, so it needs to be dropped
opts = model._meta
for f in opts.local_fields:
if f.rel and f.rel.to not in to_delete:
references_to_delete.setdefault(f.rel.to, []).append((model, f))
to_delete.add(model)
for model in app_models:
if connection.introspection.table_name_converter(model._meta.db_table) in table_names:
output.extend(connection.creation.sql_destroy_model(model, references_to_delete, style))
# Close database connection explicitly, in case this output is being piped
# directly into a database client, to avoid locking issues.
if cursor:
cursor.close()
connection.close()
return output[::-1] # Reverse it, to deal with table dependencies.
def sql_flush(style, connection, only_django=False, reset_sequences=True):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True)
else:
tables = connection.introspection.table_names()
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs)
return statements
def sql_custom(app, style, connection):
"Returns a list of the custom table modifying SQL statements for the given app."
output = []
app_models = get_models(app)
for model in app_models:
output.extend(custom_sql_for_model(model, style, connection))
return output
def sql_indexes(app, style, connection):
"Returns a list of the CREATE INDEX SQL statements for all models in the given app."
output = []
for model in models.get_models(app):
output.extend(connection.creation.sql_indexes_for_model(model, style))
return output
def sql_all(app, style, connection):
"Returns a list of CREATE TABLE SQL, initial-data inserts, and CREATE INDEX SQL for the given module."
return sql_create(app, style, connection) + sql_custom(app, style, connection) + sql_indexes(app, style, connection)
def _split_statements(content):
comment_re = re.compile(r"^((?:'[^']*'|[^'])*?)--.*$")
statements = []
statement = []
for line in content.split("\n"):
cleaned_line = comment_re.sub(r"\1", line).strip()
if not cleaned_line:
continue
statement.append(cleaned_line)
if cleaned_line.endswith(";"):
statements.append(" ".join(statement))
statement = []
return statements
def custom_sql_for_model(model, style, connection):
opts = model._meta
app_dir = os.path.normpath(os.path.join(os.path.dirname(upath(models.get_app(model._meta.app_label).__file__)), 'sql'))
output = []
# Post-creation SQL should come before any initial SQL data is loaded.
# However, this should not be done for models that are unmanaged or
# for fields that are part of a parent model (via model inheritance).
if opts.managed:
post_sql_fields = [f for f in opts.local_fields if hasattr(f, 'post_create_sql')]
for f in post_sql_fields:
output.extend(f.post_create_sql(style, model._meta.db_table))
# Find custom SQL, if it's available.
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = [os.path.join(app_dir, "%s.%s.sql" % (opts.object_name.lower(), backend_name)),
os.path.join(app_dir, "%s.sql" % opts.object_name.lower())]
for sql_file in sql_files:
if os.path.exists(sql_file):
with codecs.open(sql_file, 'U', encoding=settings.FILE_CHARSET) as fp:
# Some backends can't execute more than one SQL statement at a time,
# so split into separate statements.
output.extend(_split_statements(fp.read()))
return output
def emit_post_sync_signal(created_models, verbosity, interactive, db):
# Emit the post_sync signal for every application.
for app in models.get_apps():
app_name = app.__name__.split('.')[-2]
if verbosity >= 2:
print("Running post-sync handlers for application %s" % app_name)
models.signals.post_syncdb.send(sender=app, app=app,
created_models=created_models, verbosity=verbosity,
interactive=interactive, db=db)
| apache-2.0 | -5,693,812,492,929,659,000 | 39.728205 | 123 | 0.667086 | false |
datamade/pyhacrf | pyhacrf/pyhacrf.py | 1 | 13946 | # Authors: Dirko Coetsee
# License: 3-clause BSD
""" Implements a Hidden Alignment Conditional Random Field (HACRF). """
from __future__ import absolute_import
import numpy as np
import lbfgs
from .algorithms import forward, backward
from .algorithms import forward_predict, forward_max_predict
from .algorithms import gradient, gradient_sparse, populate_sparse_features, sparse_multiply
from . import adjacent
from .state_machine import DefaultStateMachine
class Hacrf(object):
""" Hidden Alignment Conditional Random Field with L2 regularizer.
Parameters
----------
l2_regularization : float, optional (default=0.0)
The regularization parameter.
optimizer : function, optional (default=None)
The optimizing function that should be used minimize the negative log posterior.
The function should have the signature:
min_objective, argmin_objective, ... = fmin(obj, x0, **optimizer_kwargs),
where obj is a function that returns
the objective function and its gradient given a parameter vector; and x0 is the initial parameter vector.
optimizer_kwargs : dictionary, optional (default=None)
The keyword arguments to pass to the optimizing function. Only used when `optimizer` is also specified.
state_machine : Instance of `GeneralStateMachine` or `DefaultStateMachine`, optional (default=`DefaultStateMachine`)
The state machine to use to generate the lattice.
viterbi : Boolean, optional (default=False).
Whether to use Viterbi (max-sum) decoding for predictions (not training)
instead of the default sum-product algorithm.
References
----------
See *A Conditional Random Field for Discriminatively-trained Finite-state String Edit Distance*
by McCallum, Bellare, and Pereira, and the report *Conditional Random Fields for Noisy text normalisation*
by Dirko Coetsee.
"""
def __init__(self,
l2_regularization=0.0,
optimizer=None,
optimizer_kwargs=None,
state_machine=None):
self.parameters = None
self.classes = None
self.l2_regularization = l2_regularization
self._optimizer = optimizer
self._optimizer_kwargs = optimizer_kwargs
self._optimizer_result = None
self._state_machine = state_machine
self._states_to_classes = None
self._evaluation_count = None
if (state_machine is None or
isinstance(state_machine, DefaultStateMachine)):
self._Model = _AdjacentModel
else:
self._Model = _GeneralModel
def fit(self, X, y, verbosity=0):
"""Fit the model according to the given training data.
Parameters
----------
X : List of ndarrays, one for each training example.
Each training example's shape is (string1_len, string2_len, n_features), where
string1_len and string2_len are the length of the two training strings and n_features the
number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
self.classes = list(set(y))
n_points = len(y)
if len(X) != n_points:
raise Exception('Number of training points should be the same as training labels.')
if not self._state_machine:
self._state_machine = DefaultStateMachine(self.classes)
# Initialize the parameters given the state machine, features, and target classes.
self.parameters = self._initialize_parameters(self._state_machine, X[0].shape[2])
# Create a new model object for each training example
models = [self._Model(self._state_machine, x, ty) for x, ty in zip(X, y)]
self._evaluation_count = 0
def _objective(parameters):
gradient = np.zeros(self.parameters.shape)
ll = 0.0 # Log likelihood
# TODO: Embarrassingly parallel
for model in models:
dll, dgradient = model.forward_backward(parameters.reshape(self.parameters.shape))
ll += dll
gradient += dgradient
parameters_without_bias = np.array(parameters, dtype='float64') # exclude the bias parameters from being regularized
parameters_without_bias[0] = 0
ll -= self.l2_regularization * np.dot(parameters_without_bias.T, parameters_without_bias)
gradient = gradient.flatten() - 2.0 * self.l2_regularization * parameters_without_bias
if verbosity > 0:
if self._evaluation_count == 0:
print('{:10} {:10} {:10}'.format('Iteration', 'Log-likelihood', '|gradient|'))
if self._evaluation_count % verbosity == 0:
print('{:10} {:10.4} {:10.4}'.format(self._evaluation_count, ll, (abs(gradient).sum())))
self._evaluation_count += 1
# TODO: Allow some of the parameters to be frozen. ie. not trained. Can later also completely remove
# TODO: the computation associated with these parameters.
return -ll, -gradient
def _objective_copy_gradient(paramers, g):
nll, ngradient = _objective(paramers)
g[:] = ngradient
return nll
if self._optimizer:
self.optimizer_result = self._optimizer(_objective, self.parameters.flatten(), **self._optimizer_kwargs)
self.parameters = self.optimizer_result[0].reshape(self.parameters.shape)
else:
optimizer = lbfgs.LBFGS()
final_betas = optimizer.minimize(_objective_copy_gradient,
x0=self.parameters.flatten(),
progress=None)
self.optimizer_result = final_betas
self.parameters = final_betas.reshape(self.parameters.shape)
self.parameters = np.asfortranarray(self.parameters)
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : List of ndarrays, one for each training example.
Each training example's shape is (string1_len, string2_len, n_features, where
string1_len and string2_len are the length of the two training strings and n_features the
number of features.
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
predictions = [self._Model(self._state_machine, x).predict(self.parameters.T)
for x in X]
predictions = np.array(predictions)
return predictions
def fast_pair(self, x):
predictions = self._Model(self._state_machine, x).predict(self.parameters.T)
return predictions
def predict(self, X):
"""Predict the class for X.
The predicted class for each sample in X is returned.
Parameters
----------
X : List of ndarrays, one for each training example.
Each training example's shape is (string1_len,
string2_len, n_features), where string1_len and
string2_len are the length of the two training strings and
n_features the number of features.
Returns
-------
y : iterable of shape = [n_samples]
The predicted classes.
"""
return [self.classes[prediction.argmax()] for prediction in self.predict_proba(X)]
@staticmethod
def _initialize_parameters(state_machine, n_features):
""" Helper to create initial parameter vector with the correct shape. """
return np.zeros((state_machine.n_states
+ state_machine.n_transitions,
n_features))
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return {'l2_regularization': self.l2_regularization,
'optimizer': self._optimizer,
'optimizer_kwargs': self._optimizer_kwargs}
def set_params(self, l2_regularization=0.0, optimizer=None, optimizer_kwargs=None):
"""Set the parameters of this estimator.
Returns
-------
self
"""
self.l2_regularization = l2_regularization
self._optimizer = optimizer
self._optimizer_kwargs = optimizer_kwargs
return self
class _Model(object):
def __init__(self, state_machine, x, y=None):
self.state_machine = state_machine
self.states_to_classes = state_machine.states_to_classes
self.x = x
self.y = y
self.forward_backward = self.dense_forward_backward
def predict(self, parameters):
""" Run forward algorithm to find the predicted distribution over classes. """
x_dot_parameters = np.matmul(self.x, parameters)
probs = self._forward_predict(x_dot_parameters)
return probs
def dense_forward_backward(self, parameters):
""" Run the forward backward algorithm with the given parameters. """
I, J, K = self.x.shape
x_dot_parameters = np.dot(self.x,
parameters.T)
alpha = self._forward(x_dot_parameters)
beta = self._backward(x_dot_parameters)
classes_to_ints = {k: i
for i, k
in enumerate(set(self.states_to_classes.values()))}
states_to_classes = np.array([classes_to_ints[self.states_to_classes[state]]
for state
in range(max(self.states_to_classes.keys()) + 1)],
dtype='int64')
ll, deriv = gradient(alpha, beta, parameters, states_to_classes,
self.x, classes_to_ints[self.y], I, J, K)
return ll, deriv
def sparse_forward_backward(self, parameters):
""" Run the forward backward algorithm with the given parameters. """
I, J, K = self.x.shape
C = self.sparse_x[0].shape[2]
S, _ = parameters.shape
x_dot_parameters = np.zeros((I, J, S))
sparse_multiply(x_dot_parameters,
self.sparse_x[0],
self.sparse_x[1],
parameters.T,
I, J, K, C, S)
alpha = self._forward(x_dot_parameters)
beta = self._backward(x_dot_parameters)
classes_to_ints = {k: i
for i, k
in enumerate(set(self.states_to_classes.values()))}
states_to_classes = np.array([classes_to_ints[self.states_to_classes[state]]
for state
in range(max(self.states_to_classes.keys()) + 1)],
dtype='int64')
ll, deriv = gradient_sparse(alpha, beta,
parameters,
states_to_classes,
self.sparse_x[0],
self.sparse_x[1],
classes_to_ints[self.y],
I, J,
self.sparse_x[0].shape[2])
return ll, deriv
def _construct_sparse_features(self, x):
""" Helper to construct a sparse representation of the features. """
I, J, K = x.shape
new_array_height = (x != 0).sum(axis=2).max()
index_array = -np.ones((I, J, new_array_height), dtype='int64')
value_array = -np.ones((I, J, new_array_height), dtype='float64')
populate_sparse_features(x, index_array, value_array, I, J, K)
return index_array, value_array
class _GeneralModel(_Model):
def __init__(self, state_machine, x, y=None):
super(_GeneralModel, self).__init__(state_machine, x, y)
self._lattice = self.state_machine.build_lattice(self.x)
def _forward(self, x_dot_parameters):
""" Helper to calculate the forward weights. """
return forward(self._lattice, x_dot_parameters,
self.state_machine.n_states)
def _backward(self, x_dot_parameters):
""" Helper to calculate the backward weights. """
I, J, _ = self.x.shape
return backward(self._lattice, x_dot_parameters, I, J,
self.state_machine.n_states)
def _forward_predict(self, x_dot_parameters):
return forward_predict(self._lattice, x_dot_parameters,
self.state_machine.n_states)
class _AdjacentModel(_Model):
def _forward(self, x_dot_parameters) :
return adjacent.forward(x_dot_parameters,
self.state_machine.n_states)
def _backward(self, x_dot_parameters) :
print(x_dot_parameters)
return adjacent.backward(x_dot_parameters,
self.state_machine.n_states)
def _forward_predict(self, x_dot_parameters):
return adjacent.forward_predict(x_dot_parameters,
self.state_machine.n_states)
| bsd-3-clause | 7,427,845,139,190,377,000 | 38.507082 | 129 | 0.577728 | false |
BWeatherMaine/WXGIF | libs/images2gif.py | 2 | 28499 | # -*- coding: utf-8 -*-
# Copyright (c) 2010, Almar Klein, Ant1, Marius van Voorden
#
# This code is subject to the (new) BSD license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module images2gif
Provides functionality for reading and writing animated GIF images.
Use writeGif to write a series of numpy arrays or PIL images as an
animated GIF. Use readGif to read an animated gif as a series of numpy
arrays.
Acknowledgements
----------------
Many thanks to Ant1 for:
* noting the use of "palette=PIL.Image.ADAPTIVE", which significantly
improves the results.
* the modifications to save each image with its own palette, or optionally
the global palette (if its the same).
Many thanks to Marius van Voorden for porting the NeuQuant quantization
algorithm of Anthony Dekker to Python (See the NeuQuant class for its
license).
This code is based on gifmaker (in the scripts folder of the source
distribution of PIL)
Some implementation details are ased on gif file structure as provided
by wikipedia.
"""
import os
try:
import PIL
from PIL import Image, ImageChops
from PIL.GifImagePlugin import getheader, getdata
except ImportError:
PIL = None
try:
import numpy as np
except ImportError:
np = None
try:
from scipy.spatial import cKDTree
except ImportError:
cKDTree = None
# getheader gives a 87a header and a color palette (two elements in a list).
# getdata()[0] gives the Image Descriptor up to (including) "LZW min code size".
# getdatas()[1:] is the image data itself in chuncks of 256 bytes (well
# technically the first byte says how many bytes follow, after which that
# amount (max 255) follows).
def checkImages(images):
""" checkImages(images)
Check numpy images and correct intensity range etc.
The same for all movie formats.
"""
# Init results
images2 = []
for im in images:
if PIL and isinstance(im, PIL.Image.Image):
# We assume PIL images are allright
images2.append(im)
elif np and isinstance(im, np.ndarray):
# Check and convert dtype
if im.dtype == np.uint8:
images2.append(im) # Ok
elif im.dtype in [np.float32, np.float64]:
im = im.copy()
im[im<0] = 0
im[im>1] = 1
im *= 255
images2.append( im.astype(np.uint8) )
else:
im = im.astype(np.uint8)
images2.append(im)
# Check size
if im.ndim == 2:
pass # ok
elif im.ndim == 3:
if im.shape[2] not in [3,4]:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('Invalid image type: ' + str(type(im)))
# Done
return images2
def intToBin(i):
""" Integer to two bytes """
# devide in two parts (bytes)
i1 = i % 256
i2 = int( i/256)
# make string (little endian)
return chr(i1) + chr(i2)
def getheaderAnim(im):
""" Animation header. To replace the getheader()[0] """
bb = "GIF89a"
bb += intToBin(im.size[0])
bb += intToBin(im.size[1])
bb += "\x87\x00\x00"
return bb
def getImageDescriptor(im):
""" Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
wether additional colours comes in play that require a redefined palette
Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
"""
bb = '\x2C' # Image separator,
bb += intToBin( 0 ) # Left position
bb += intToBin( 0 ) # Top position
bb += intToBin( im.size[0] ) # image width
bb += intToBin( im.size[1] ) # image height
bb += '\x87' # packed field : local color table flag1, interlace0, sorted table0, reserved00, lct size111=7=2^(7+1)=256.
# LZW minimum size code now comes later, begining of [image data] blocks
return bb
#def getAppExt(loops=float('inf')):
#compile error commented by zcwang
def getAppExt(loops=float(0)):
""" Application extention. Part that specifies amount of loops.
If loops is inf, it goes on infinitely.
"""
if loops == 0:
loops = 2**16-1
#bb = "" # application extension should not be used
# (the extension interprets zero loops
# to mean an infinite number of loops)
# Mmm, does not seem to work
if True:
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
# if loops == float('inf'):
if loops == float(0):
loops = 2**16-1
bb += intToBin(loops)
bb += '\x00' # end
return bb
def getGraphicsControlExt(duration=0.1):
""" Graphics Control Extension. A sort of header at the start of
each image. Specifies transparancy and duration. """
bb = '\x21\xF9\x04'
bb += '\x08' # no transparancy
bb += intToBin( int(duration*100) ) # in 100th of seconds
bb += '\x00' # no transparant color
bb += '\x00' # end
return bb
def _writeGifToFile(fp, images, durations, loops):
""" Given a set of images writes the bytes to the specified stream.
"""
# Obtain palette for all images and count each occurance
palettes, occur = [], []
for im in images:
palettes.append(im.palette.getdata()[1])
for palette in palettes:
occur.append( palettes.count( palette ) )
# Select most-used palette as the global one (or first in case no max)
globalPalette = palettes[ occur.index(max(occur)) ]
# Init
frames = 0
firstFrame = True
for im, palette in zip(images, palettes):
if firstFrame:
# Write header
# Gather info
header = getheaderAnim(im)
appext = getAppExt(loops)
# Write
fp.write(header)
fp.write(globalPalette)
fp.write(appext)
# Next frame is not the first
firstFrame = False
if True:
# Write palette and image data
# Gather info
data = getdata(im)
imdes, data = data[0], data[1:]
graphext = getGraphicsControlExt(durations[frames])
# Make image descriptor suitable for using 256 local color palette
lid = getImageDescriptor(im)
# Write local header
if palette != globalPalette:
# Use local color palette
fp.write(graphext)
fp.write(lid) # write suitable image descriptor
fp.write(palette) # write local color table
fp.write('\x08') # LZW minimum size code
else:
# Use global color palette
fp.write(graphext)
fp.write(imdes) # write suitable image descriptor
# Write image data
for d in data:
fp.write(d)
# Prepare for next round
frames = frames + 1
fp.write(";") # end gif
return frames
## Exposed functions
def writeGif(filename, images, duration=0.1, repeat=True, dither=False, nq=0):
""" writeGif(filename, images, duration=0.1, repeat=True, dither=False)
Write an animated gif from the specified images.
Parameters
----------
filename : string
The name of the file to write the image to.
images : list
Should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
duration : scalar or list of scalars
The duration for all frames, or (if a list) for each frame.
repeat : bool or integer
The amount of loops. If True, loops infinitetely.
dither : bool
Whether to apply dithering
nq : integer
If nonzero, applies the NeuQuant quantization algorithm to create
the color palette. This algorithm is superior, but slower than
the standard PIL algorithm. The value of nq is the quality
parameter. 1 represents the best quality. 10 is in general a
good tradeoff between quality and speed.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to write animated gif files.")
# Check images
images = checkImages(images)
# Check loops
if repeat is False:
loops = 1
elif repeat is True:
loops = 0 # zero means infinite
else:
loops = int(repeat)
# Convert to PIL images
images2 = []
for im in images:
if isinstance(im, Image.Image):
images2.append(im)
elif np and isinstance(im, np.ndarray):
if im.ndim==3 and im.shape[2]==3:
im = Image.fromarray(im,'RGB')
elif im.ndim==2:
im = Image.fromarray(im,'L')
images2.append(im)
# Convert to paletted PIL images
images, images2 = images2, []
if nq >= 1:
# NeuQuant algorithm
for im in images:
im = im.convert("RGBA") # NQ assumes RGBA
nq = NeuQuant(im, int(nq)) # Learn colors from image
if dither:
im = im.convert("RGB").quantize(palette=nq.paletteImage())
else:
im = nq.quantize(im) # Use to quantize the image itself
images2.append(im)
else:
# Adaptive PIL algorithm
AD = Image.ADAPTIVE
for im in images:
im = im.convert('P', palette=AD, dither=dither)
images2.append(im)
# Check duration
if hasattr(duration, '__len__'):
if len(duration) == len(images2):
durations = [d for d in duration]
else:
raise ValueError("len(duration) doesn't match amount of images.")
else:
duration = [duration for im in images2]
# Open file
fp = open(filename, 'wb')
# Write
try:
n = _writeGifToFile(fp, images2, duration, loops)
finally:
fp.close()
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: '+str(filename))
# Load file using PIL
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell()+1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append( PIL.Image.fromarray(im) )
# Done
return images
class NeuQuant:
""" NeuQuant(image, samplefac=10, colors=256)
samplefac should be an integer number of 1 or higher, 1
being the highest quality, but the slowest performance.
With avalue of 10, one tenth of all pixels are used during
training. This value seems a nice tradeof between speed
and quality.
colors is the amount of colors to reduce the image to. This
should best be a power of two.
See also:
http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
License of the NeuQuant Neural-Net Quantization Algorithm
---------------------------------------------------------
Copyright (c) 1994 Anthony Dekker
Ported to python by Marius van Voorden in 2010
NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
See "Kohonen neural networks for optimal colour quantization"
in "network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
for a discussion of the algorithm.
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
Any party obtaining a copy of these files from the author, directly or
indirectly, is granted, free of charge, a full and unrestricted irrevocable,
world-wide, paid up, royalty-free, nonexclusive right and license to deal
in this software and documentation files (the "Software"), including without
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons who receive
copies from any such party to do so, with the only requirement being
that this copyright notice remain intact.
"""
NCYCLES = None # Number of learning cycles
NETSIZE = None # Number of colours used
SPECIALS = None # Number of reserved colours used
BGCOLOR = None # Reserved background colour
CUTNETSIZE = None
MAXNETPOS = None
INITRAD = None # For 256 colours, radius starts at 32
RADIUSBIASSHIFT = None
RADIUSBIAS = None
INITBIASRADIUS = None
RADIUSDEC = None # Factor of 1/30 each cycle
ALPHABIASSHIFT = None
INITALPHA = None # biased by 10 bits
GAMMA = None
BETA = None
BETAGAMMA = None
network = None # The network itself
colormap = None # The network itself
netindex = None # For network lookup - really 256
bias = None # Bias and freq arrays for learning
freq = None
pimage = None
# Four primes near 500 - assume no image has a length so large
# that it is divisible by all four primes
PRIME1 = 499
PRIME2 = 491
PRIME3 = 487
PRIME4 = 503
MAXPRIME = PRIME4
pixels = None
samplefac = None
a_s = None
def setconstants(self, samplefac, colors):
self.NCYCLES = 100 # Number of learning cycles
self.NETSIZE = colors # Number of colours used
self.SPECIALS = 3 # Number of reserved colours used
self.BGCOLOR = self.SPECIALS-1 # Reserved background colour
self.CUTNETSIZE = self.NETSIZE - self.SPECIALS
self.MAXNETPOS = self.NETSIZE - 1
self.INITRAD = self.NETSIZE/8 # For 256 colours, radius starts at 32
self.RADIUSBIASSHIFT = 6
self.RADIUSBIAS = 1 << self.RADIUSBIASSHIFT
self.INITBIASRADIUS = self.INITRAD * self.RADIUSBIAS
self.RADIUSDEC = 30 # Factor of 1/30 each cycle
self.ALPHABIASSHIFT = 10 # Alpha starts at 1
self.INITALPHA = 1 << self.ALPHABIASSHIFT # biased by 10 bits
self.GAMMA = 1024.0
self.BETA = 1.0/1024.0
self.BETAGAMMA = self.BETA * self.GAMMA
self.network = np.empty((self.NETSIZE, 3), dtype='float64') # The network itself
self.colormap = np.empty((self.NETSIZE, 4), dtype='int32') # The network itself
self.netindex = np.empty(256, dtype='int32') # For network lookup - really 256
self.bias = np.empty(self.NETSIZE, dtype='float64') # Bias and freq arrays for learning
self.freq = np.empty(self.NETSIZE, dtype='float64')
self.pixels = None
self.samplefac = samplefac
self.a_s = {}
def __init__(self, image, samplefac=10, colors=256):
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy for the NeuQuant algorithm.")
# Check image
if image.size[0] * image.size[1] < NeuQuant.MAXPRIME:
raise IOError("Image is too small")
assert image.mode == "RGBA"
# Initialize
self.setconstants(samplefac, colors)
self.pixels = np.fromstring(image.tostring(), np.uint32)
self.setUpArrays()
self.learn()
self.fix()
self.inxbuild()
def writeColourMap(self, rgb, outstream):
for i in range(self.NETSIZE):
bb = self.colormap[i,0];
gg = self.colormap[i,1];
rr = self.colormap[i,2];
out.write(rr if rgb else bb)
out.write(gg)
out.write(bb if rgb else rr)
return self.NETSIZE
def setUpArrays(self):
self.network[0,0] = 0.0 # Black
self.network[0,1] = 0.0
self.network[0,2] = 0.0
self.network[1,0] = 255.0 # White
self.network[1,1] = 255.0
self.network[1,2] = 255.0
# RESERVED self.BGCOLOR # Background
for i in range(self.SPECIALS):
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
for i in range(self.SPECIALS, self.NETSIZE):
p = self.network[i]
p[:] = (255.0 * (i-self.SPECIALS)) / self.CUTNETSIZE
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
# Omitted: setPixels
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha*(n[0] - b))
n[1] -= (alpha*(n[1] - g))
n[2] -= (alpha*(n[2] - r))
def geta(self, alpha, rad):
try:
return self.a_s[(alpha, rad)]
except KeyError:
length = rad*2-1
mid = length/2
q = np.array(range(mid-1,-1,-1)+range(-1,mid))
a = alpha*(rad*rad - q*q)/(rad*rad)
a[mid] = 0
self.a_s[(alpha, rad)] = a
return a
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i+rad <= self.NETSIZE:
hi = i+rad
end = rad*2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i+rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo+1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def specialFind(self, b, g, r):
for i in range(self.SPECIALS):
n = self.network[i]
if n[0] == b and n[1] == g and n[2] == r:
return i
return -1
def learn(self):
biasRadius = self.INITBIASRADIUS
alphadec = 30 + ((self.samplefac-1)/3)
lengthcount = self.pixels.size
samplepixels = lengthcount / self.samplefac
delta = samplepixels / self.NCYCLES
alpha = self.INITALPHA
i = 0;
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print "Beginning 1D learning: samplepixels =",samplepixels," rad =", rad
step = 0
pos = 0
if lengthcount%NeuQuant.PRIME1 != 0:
step = NeuQuant.PRIME1
elif lengthcount%NeuQuant.PRIME2 != 0:
step = NeuQuant.PRIME2
elif lengthcount%NeuQuant.PRIME3 != 0:
step = NeuQuant.PRIME3
else:
step = NeuQuant.PRIME4
i = 0
printed_string = ''
while i < samplepixels:
if i%100 == 99:
tmp = '\b'*len(printed_string)
printed_string = str((i+1)*100/samplepixels)+"%\n"
print tmp + printed_string,
p = self.pixels[pos]
r = (p >> 16) & 0xff
g = (p >> 8) & 0xff
b = (p ) & 0xff
if i == 0: # Remember background colour
self.network[self.BGCOLOR] = [b, g, r]
j = self.specialFind(b, g, r)
if j < 0:
j = self.contest(b, g, r)
if j >= self.SPECIALS: # Don't learn for specials
a = (1.0 * alpha) / self.INITALPHA
self.altersingle(a, j, b, g, r)
if rad > 0:
self.alterneigh(a, rad, j, b, g, r)
pos = (pos+step)%lengthcount
i += 1
if i%delta == 0:
alpha -= alpha / alphadec
biasRadius -= biasRadius / self.RADIUSDEC
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print "Finished 1D learning: final alpha =",(1.0*alpha)/self.INITALPHA,"!"
def fix(self):
for i in range(self.NETSIZE):
for j in range(3):
x = int(0.5 + self.network[i,j])
x = max(0, x)
x = min(255, x)
self.colormap[i,j] = x
self.colormap[i,3] = i
def inxbuild(self):
previouscol = 0
startpos = 0
for i in range(self.NETSIZE):
p = self.colormap[i]
q = None
smallpos = i
smallval = p[1] # Index on g
# Find smallest in i..self.NETSIZE-1
for j in range(i+1, self.NETSIZE):
q = self.colormap[j]
if q[1] < smallval: # Index on g
smallpos = j
smallval = q[1] # Index on g
q = self.colormap[smallpos]
# Swap p (i) and q (smallpos) entries
if i != smallpos:
p[:],q[:] = q, p.copy()
# smallval entry is now in position i
if smallval != previouscol:
self.netindex[previouscol] = (startpos+i) >> 1
for j in range(previouscol+1, smallval):
self.netindex[j] = i
previouscol = smallval
startpos = i
self.netindex[previouscol] = (startpos+self.MAXNETPOS) >> 1
for j in range(previouscol+1, 256): # Really 256
self.netindex[j] = self.MAXNETPOS
def paletteImage(self):
""" PIL weird interface for making a paletted image: create an image which
already has the palette, and use that in Image.quantize. This function
returns this palette image. """
if self.pimage is None:
palette = []
for i in range(self.NETSIZE):
palette.extend(self.colormap[i][:3])
palette.extend([0]*(256-self.NETSIZE)*3)
# a palette image to use for quant
self.pimage = Image.new("P", (1, 1), 0)
self.pimage.putpalette(palette)
return self.pimage
def quantize(self, image):
""" Use a kdtree to quickly find the closest palette colors for the pixels """
if cKDTree:
return self.quantize_with_scipy(image)
else:
print 'Scipy not available, falling back to slower version.'
return self.quantize_without_scipy(image)
def quantize_with_scipy(self, image):
w,h = image.size
px = np.asarray(image).copy()
px2 = px[:,:,:3].reshape((w*h,3))
kdtree = cKDTree(self.colormap[:,:3],leafsize=10)
result = kdtree.query(px2)
colorindex = result[1]
print "Distance:", (result[0].sum()/(w*h))
px2[:] = self.colormap[colorindex,:3]
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def quantize_without_scipy(self, image):
"""" This function can be used if no scipy is availabe.
It's 7 times slower though.
"""
w,h = image.size
px = np.asarray(image).copy()
memo = {}
for j in range(w):
for i in range(h):
key = (px[i,j,0],px[i,j,1],px[i,j,2])
try:
val = memo[key]
except KeyError:
val = self.convert(key)
memo[key] = val
px[i,j,0],px[i,j,1],px[i,j,2] = val
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def convert(self, (r, g, b)):
i = self.inxsearch(r, g, b)
return self.colormap[i,:3]
def inxsearch(self, r, g, b):
"""Search for BGR values 0..255 and return colour index"""
dists = (self.colormap[:,:3] - np.array([r,g,b]))
a= np.argmin((dists*dists).sum(1))
return a
if __name__ == '__main__':
im = np.zeros((200,200), dtype=np.uint8)
im[10:30,:] = 100
im[:,80:120] = 255
im[-50:-40,:] = 50
images = [im*1.0, im*0.8, im*0.6, im*0.4, im*0]
writeGif('lala3.gif',images, duration=0.5, dither=0)
| apache-2.0 | 2,702,143,883,687,072,000 | 32.927381 | 124 | 0.567143 | false |
Pegase745/gitlab-freak | gitlab_freak/helpers.py | 2 | 2832 | from __future__ import absolute_import, unicode_literals
from distutils.version import LooseVersion
from sqlalchemy.sql.expression import ClauseElement
from flask import Flask
import json
import requests
from gitlab_freak.models import db, ProjectDependency
import gitlab
app = Flask(__name__)
app.config.from_envvar('GITLAB_FREAK_SETTINGS')
git = gitlab.Gitlab(app.config['GITLAB_ENDPOINT'], app.config['GITLAB_TOKEN'])
def get_or_create(session, model, defaults=None, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance, False
else:
params = dict(
(k, v) for k, v in kwargs.iteritems() if not isinstance(v, ClauseElement))
params.update(defaults or {})
instance = model(**params)
session.add(instance)
return instance, True
def nodeLatestVersion(dependency, project_id):
r = requests.get('%s%s/latest' % (app.config['NPM_REGISTRY'], dependency))
latestVersion = r.json().get('version')
try:
dep = ProjectDependency.by_project(project_id, dependency)
dep.latest_version = latestVersion
if LooseVersion(dep.actual_version) < LooseVersion(latestVersion):
dep.status = 'ko'
else:
dep.status = 'ok'
db.session.commit()
except Exception, e:
app.logger.error(e)
db.session.rollback()
def nodeDepsFetcher(project_id):
# Get dependencies from package.json
project = git.getproject(project_id)
depFileEncoded = git.getfile(project_id, 'package.json',
project['default_branch'])
# Decode from base64
deps = json.loads(depFileEncoded.get('content').decode('base64'))
mainDeps = deps.get('dependencies')
devDeps = deps.get('devDependencies')
# Insert in project_dependency
# TODO create single function for that
for mDep, mVersion in list(mainDeps.items()):
mdep, created = get_or_create(db.session, ProjectDependency,
project_id=project_id, name=mDep,
actual_version=mVersion)
if not created:
app.logger.info('[%s] Dep %s already exist' % (project_id, mDep))
db.session.commit()
nodeLatestVersion(mDep, project_id)
for devDep, devVersion in list(devDeps.items()):
ddep, created = get_or_create(db.session, ProjectDependency,
project_id=project_id, name=devDep,
actual_version=devVersion, dev=True)
if not created:
app.logger.info('[%s] Dev dep %s already exist' %
(project_id, devDep))
db.session.commit()
nodeLatestVersion(devDep, project_id)
return True
| mit | -1,155,476,737,873,296,600 | 31.930233 | 86 | 0.62041 | false |
fabiand/anaconda | pyanaconda/constants_text.py | 2 | 1811 | #
# constants_text.py: text mode constants
#
# Copyright (C) 2000, 2001, 2002 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# pylint: disable=W0401
from pyanaconda.constants import *
from pyanaconda.i18n import _, N_
class Translator:
"""A simple class to facilitate on-the-fly translation for newt buttons"""
def __init__(self, button, check):
self.button = button
self.check = check
def __getitem__(self, which):
if which == 0:
return _(self.button)
elif which == 1:
return self.check
raise IndexError
def __len__(self):
return 2
TEXT_OK_STR = N_("OK")
TEXT_OK_CHECK = "ok"
TEXT_OK_BUTTON = Translator(TEXT_OK_STR, TEXT_OK_CHECK)
TEXT_CANCEL_STR = N_("Cancel")
TEXT_CANCEL_CHECK = "cancel"
TEXT_CANCEL_BUTTON = Translator(TEXT_CANCEL_STR, TEXT_CANCEL_CHECK)
TEXT_YES_STR = N_("Yes")
TEXT_YES_CHECK = "yes"
TEXT_YES_BUTTON = Translator(TEXT_YES_STR, TEXT_YES_CHECK)
TEXT_NO_STR = N_("No")
TEXT_NO_CHECK = "no"
TEXT_NO_BUTTON = Translator(TEXT_NO_STR, TEXT_NO_CHECK)
# Make the return calls from the UIScreen input() function more clear
INPUT_PROCESSED = None
INPUT_DISCARDED = False
| gpl-2.0 | 2,279,819,971,241,057,800 | 30.224138 | 78 | 0.694092 | false |
sonata-nfv/son-cli | setup.py | 5 | 3428 | # Copyright (c) 2015 SONATA-NFV, UBIWHERE
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, UBIWHERE
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
from setuptools import setup, find_packages
import codecs
import os.path as path
# buildout build system
# http://www.buildout.org/en/latest/docs/tutorial.html
# setup() documentation:
# http://python-packaging-user-guide.readthedocs.org/en/
# latest/distributing/#setup-py
cwd = path.dirname(__file__)
longdesc = codecs.open(path.join(cwd, 'README.md'), 'r', 'utf-8').read()
name = 'sonata-cli'
setup(
name=name,
license='Apache License, Version 2.0',
version='3.0',
url='https://github.com/sonata-nfv/son-cli',
author_email='[email protected]',
long_description=longdesc,
package_dir={'': 'src'},
packages=find_packages('src'), # dependency resolution
namespace_packages=['son', ],
include_package_data=True,
package_data= {
'son': ['schema/tests/son-schema/*', 'workspace/samples/*',
'monitor/docker_compose_files/*', 'monitor/grafana/*',
'monitor/prometheus/*', 'monitor/*.exp',
'validate/eventcfg.yml']
},
# in jenkins, the last package in the list is installed first
install_requires=['setuptools', 'pyaml', 'jsonschema', 'validators',
'requests>2.4.2', 'coloredlogs<=5.1.1', 'paramiko',
'termcolor', 'tabulate', 'networkx<=1.12', 'Flask',
'PyJWT>=1.4.2', 'docker==2.0.2', 'scipy', 'numpy',
'watchdog', 'Flask-Cors', 'flask_cache', 'redis',
'pycrypto', 'matplotlib', 'prometheus_client',
'requests-toolbelt==0.8.0'],
zip_safe=False,
entry_points={
'console_scripts': [
'son-workspace=son.workspace.workspace:main',
'son-package=son.package.package:main',
'son-monitor=son.monitor.monitor:main',
'son-profile=son.profile.profile:main',
'son-validate=son.validate.validate:main',
'son-validate-api=son.validate.api.api:main',
'son-access=son.access.access:main'
],
},
test_suite='son',
setup_requires=['pytest-runner'],
tests_require=['pytest']
)
| apache-2.0 | -726,700,184,759,282,700 | 40.301205 | 77 | 0.622229 | false |
fnaum/rez | src/rez/vendor/lockfile/sqlitelockfile.py | 487 | 5540 | from __future__ import absolute_import, division
import time
import os
try:
unicode
except NameError:
unicode = str
from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked
class SQLiteLockFile(LockBase):
"Demonstrate SQL-based locking."
testdb = None
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = SQLiteLockFile('somefile')
>>> lock = SQLiteLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
self.lock_file = unicode(self.lock_file)
self.unique_name = unicode(self.unique_name)
if SQLiteLockFile.testdb is None:
import tempfile
_fd, testdb = tempfile.mkstemp()
os.close(_fd)
os.unlink(testdb)
del _fd, tempfile
SQLiteLockFile.testdb = testdb
import sqlite3
self.connection = sqlite3.connect(SQLiteLockFile.testdb)
c = self.connection.cursor()
try:
c.execute("create table locks"
"("
" lock_file varchar(32),"
" unique_name varchar(32)"
")")
except sqlite3.OperationalError:
pass
else:
self.connection.commit()
import atexit
atexit.register(os.unlink, SQLiteLockFile.testdb)
def acquire(self, timeout=None):
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
elif timeout <= 0:
wait = 0
else:
wait = timeout / 10
cursor = self.connection.cursor()
while True:
if not self.is_locked():
# Not locked. Try to lock it.
cursor.execute("insert into locks"
" (lock_file, unique_name)"
" values"
" (?, ?)",
(self.lock_file, self.unique_name))
self.connection.commit()
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) > 1:
# Nope. Someone else got there. Remove our lock.
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
else:
# Yup. We're done, so go home.
return
else:
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) == 1:
# We're the locker, so go home.
return
# Maybe we should wait a bit longer.
if timeout is not None and time.time() > end_time:
if timeout > 0:
# No more waiting.
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock and we are impatient..
raise AlreadyLocked("%s is already locked" % self.path)
# Well, okay. We'll give it a bit longer.
time.sleep(wait)
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me (by %s)" %
(self.unique_name, self._who_is_locking()))
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
def _who_is_locking(self):
cursor = self.connection.cursor()
cursor.execute("select unique_name from locks"
" where lock_file = ?",
(self.lock_file,))
return cursor.fetchone()[0]
def is_locked(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?",
(self.lock_file,))
rows = cursor.fetchall()
return not not rows
def i_am_locking(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?"
" and unique_name = ?",
(self.lock_file, self.unique_name))
return not not cursor.fetchall()
def break_lock(self):
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where lock_file = ?",
(self.lock_file,))
self.connection.commit()
| lgpl-3.0 | 6,717,877,301,793,169,000 | 34.741935 | 75 | 0.470397 | false |
Vagab0nd/SiCKRAGE | lib3/dogpile/cache/proxy.py | 2 | 2601 | """
Proxy Backends
------------------
Provides a utility and a decorator class that allow for modifying the behavior
of different backends without altering the class itself or having to extend the
base backend.
.. versionadded:: 0.5.0 Added support for the :class:`.ProxyBackend` class.
"""
from .api import CacheBackend
class ProxyBackend(CacheBackend):
"""A decorator class for altering the functionality of backends.
Basic usage::
from dogpile.cache import make_region
from dogpile.cache.proxy import ProxyBackend
class MyFirstProxy(ProxyBackend):
def get(self, key):
# ... custom code goes here ...
return self.proxied.get(key)
def set(self, key, value):
# ... custom code goes here ...
self.proxied.set(key)
class MySecondProxy(ProxyBackend):
def get(self, key):
# ... custom code goes here ...
return self.proxied.get(key)
region = make_region().configure(
'dogpile.cache.dbm',
expiration_time = 3600,
arguments = {
"filename":"/path/to/cachefile.dbm"
},
wrap = [ MyFirstProxy, MySecondProxy ]
)
Classes that extend :class:`.ProxyBackend` can be stacked
together. The ``.proxied`` property will always
point to either the concrete backend instance or
the next proxy in the chain that a method can be
delegated towards.
.. versionadded:: 0.5.0
"""
def __init__(self, *args, **kwargs):
self.proxied = None
def wrap(self, backend):
""" Take a backend as an argument and setup the self.proxied property.
Return an object that be used as a backend by a :class:`.CacheRegion`
object.
"""
assert isinstance(backend, CacheBackend) or isinstance(
backend, ProxyBackend
)
self.proxied = backend
return self
#
# Delegate any functions that are not already overridden to
# the proxies backend
#
def get(self, key):
return self.proxied.get(key)
def set(self, key, value):
self.proxied.set(key, value)
def delete(self, key):
self.proxied.delete(key)
def get_multi(self, keys):
return self.proxied.get_multi(keys)
def set_multi(self, mapping):
self.proxied.set_multi(mapping)
def delete_multi(self, keys):
self.proxied.delete_multi(keys)
def get_mutex(self, key):
return self.proxied.get_mutex(key)
| gpl-3.0 | -5,758,399,686,115,150,000 | 26.378947 | 79 | 0.600538 | false |
benekastah/rock-paper-scissors | rps.py | 1 | 11894 | # pylint: disable=missing-docstring
from collections import OrderedDict, defaultdict
import select
import socket
import sys
class Style(object):
RESET = 0
BOLD = 1
UNDERSCORE = 4
BLINK = 5
INVERT = 7
CONCEAL = 8
FG_BLACK = 30
FG_RED = 31
FG_GREEN = 32
FG_YELLOW = 33
FG_BLUE = 34
FG_MAGENTA = 35
FG_CYAN = 36
FG_WHITE = 37
BG_BLACK = 40
BG_RED = 41
BG_GREEN = 42
BG_YELLOW = 44
BG_BLUE = 44
BG_MAGENTA = 45
BG_CYAN = 46
BG_WHITE = 47
@staticmethod
def encode(*attrs):
return ''.join(['\033[', ';'.join(str(a) for a in attrs), 'm'])
@staticmethod
def wrap(text, attrs=None):
if not attrs:
attrs = [Style.RESET]
start = Style.encode(*attrs)
end = Style.encode(Style.RESET)
return ''.join([start, str(text), end])
class Move(object):
superior = None
inferior = None
def __repr__(self):
return Style.wrap(self.__class__.__name__,
[Style.BG_WHITE, Style.FG_BLACK, Style.BOLD])
def __cmp__(self, other):
if isinstance(other, self.superior):
return -1
elif isinstance(other, self.inferior):
return 1
elif isinstance(other, self.__class__):
return 0
else:
raise TypeError('Can\'t compare {0} with {1}'.format(self, other))
class ROCK(Move):
def __init__(self):
self.superior = PAPER
self.inferior = SCISSORS
class PAPER(Move):
def __init__(self):
self.superior = SCISSORS
self.inferior = ROCK
class SCISSORS(Move):
def __init__(self):
self.superior = ROCK
self.inferior = PAPER
class Game(object):
winning_score = 3
def __init__(self, name, lobby):
self.name = name
self.lobby = lobby
self.winner = None
self.players = set()
self.moves = {}
self.reset_score()
def reset_score(self):
self.score = defaultdict(lambda: 0)
def add_player(self, player):
if self.full:
player.prompt('Game is already full')
return None
self.players.add(player)
return True
def other_player(self, player):
diff = self.players - {player}
(result,) = diff
return result
def try_run(self):
if self.full:
self.prompt_move()
return True
else:
return False
def prompt_move(self, player=None):
if player is not None:
ps = [self.players[player]]
else:
ps = self.players
for p in ps:
p.prompt('\nMake your move!')
@property
def full(self):
return len(self.players) >= 2
@property
def gameover(self):
return self.winner is not None
def end_game(self):
for p in self.players:
p.game = None
p.prompt()
del self.lobby.games[self.name]
def sendall(self, msg):
for p in self.players:
p.send(msg)
def show_score(self):
return '\n'.join('{0}: {1}'.format(p, self.score[p])
for p in self.players)
def play(self, player, move):
other = self.other_player(player)
if player not in self.players:
player.send('You aren\'t a player in this game')
return
if self.gameover:
player.send('Player {} already won'.format(self.winner))
if not self.full:
player.send('Wait until the game is full before playing...')
move_upper = move.upper()
if move_upper in ('R', 'ROCK'):
self.moves[player] = ROCK()
elif move_upper in ('P', 'PAPER'):
self.moves[player] = PAPER()
elif move_upper in ('S', 'SCISSORS'):
self.moves[player] = SCISSORS()
else:
player.prompt(''.join([
'Invalid move: "{}"'.format(move),
' Choose one of: (R)OCK, (P)APER or (S)CISSORS'
]))
return
if len(self.moves) == 2:
self.sendall('\n')
_players = list(self.players)
for p1, p2 in zip(_players, reversed(_players)):
p1.send('{0} threw {1}'.format(p2, self.moves[p2]))
winner = None
if self.moves[player] > self.moves[other]:
winner = player
elif self.moves[other] > self.moves[player]:
winner = other
if winner is not None:
self.score[winner] += 1
if self.score[winner] >= self.winning_score:
self.winner = winner
self.sendall('\n'.join([
'Player {} wins the game!'.format(winner),
self.show_score(),
]))
else:
self.sendall('\n'.join([
'Player {} wins the round!'.format(winner),
self.show_score(),
]))
else:
self.sendall('Tie')
self.moves = {}
if self.gameover:
self.end_game()
else:
self.prompt_move()
else:
player.send('Waiting for other player to play...')
def __repr__(self):
s = [Style.wrap(self.name, [Style.FG_GREEN])]
if self.full:
s.append(Style.wrap('(FULL)', [Style.FG_RED, Style.BOLD]))
if len(self.players):
s.append('with')
if len(self.players) == 2:
s.append('{0}, {1}'.format(*list(self.players)))
else:
(p,) = self.players
s.append(str(p))
return ' '.join(s)
class Lobby(object):
def __init__(self):
self.games = OrderedDict()
def new_game(self, name):
if name in self.games:
return 'Name "{}" taken'.format(name)
game = Game(name, lobby=self)
self.games[name] = game
return game
def list_games(self):
ls = '\n'.join(' {0}'.format(g) for _, g in self.games.iteritems())
if not ls:
ls = 'No games'
return ls
def get_game(self, name):
return self.games.get(name)
def help(self):
return '\n'.join([
'Commands:',
' ?: show this text',
' c <name>: create new game with <name>',
' j <name>: join existing game with <name>',
' l: list games',
' who: list players',
])
class Player(object):
def __init__(self, sock, lobby):
self.socket = sock
self.lobby = lobby
self.name = None
self.game = None
def prompt(self, txt=''):
if txt and not txt.endswith('\n'):
txt += '\n'
game_prompt = ''
if self.game:
if self.game.full:
game_prompt = 'playing {0} against {1} '.format(
Style.wrap(self.game.name, [Style.FG_GREEN]),
self.game.other_player(self))
else:
return
txt += '{}> '.format(game_prompt)
self.socket.send(txt)
def prompt_name(self):
self.socket.send('Please enter your name: ')
def send(self, txt):
if txt and not txt.endswith('\n'):
txt += '\n'
self.socket.send(txt)
def create_game(self, name):
game = self.lobby.new_game(name)
if isinstance(game, basestring):
msg = game
self.prompt(msg)
return
self.join_game(name)
return game
def join_game(self, name):
game = self.lobby.get_game(name)
if not game:
self.prompt('No game "{}"'.format(name))
elif game.full:
self.prompt('Game is full')
else:
game.add_player(self)
self.game = game
if not self.game.try_run():
self.send('Waiting for other player...')
def play(self, move):
self.game.play(self, move)
def fileno(self):
return self.socket.fileno()
def __repr__(self):
return Style.wrap(self.name, [Style.FG_BLUE])
def main(host, port):
"""Start a rock-paper-scissors server"""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print 'Binding to {0}:{1}'.format(host, port)
server.bind((host, int(port)))
server.listen(1)
lobby = Lobby()
read_list = [server]
write_list = []
notifications = []
def disconnect(sock):
read_list.remove(sock)
write_list.remove(sock)
while True:
readable, writable, _ = select.select(read_list, write_list, [])
notify = '\n'.join(notifications)
notifications = []
if notify:
for sock in writable:
if isinstance(sock, Player):
sock.send(notify)
sock.prompt()
for sock in readable:
if sock is server:
new_client, _ = server.accept()
player = Player(new_client, lobby)
read_list.append(player)
write_list.append(player)
player.prompt_name()
elif isinstance(sock, Player):
player = sock
if notify:
player.send(notify)
data = player.socket.recv(1024)
if not data:
disconnect(player)
continue
else:
data = data.strip()
if player.game:
player.play(data)
else:
if not player.name:
if data:
player.name = data
player.prompt(Style.wrap(
'Welcome to Rock Paper Scissors! Type "?" '
'for help',
[Style.FG_MAGENTA]))
else:
player.prompt_name()
continue
if data == '?':
player.prompt(lobby.help())
elif data == 'l':
player.prompt(lobby.list_games())
elif data == 'who':
players = []
for p in read_list:
if isinstance(p, Player):
player_text = [' ', str(p)]
if p.game:
player_text.append(' in ')
player_text.append(Style.wrap(
p.game.name, [Style.FG_GREEN]))
players.append(''.join(player_text))
player.prompt('\n'.join(players))
elif data.startswith('c '):
name = data[2:]
game = player.create_game(name)
notifications.append('{0} created game {1}'.format(
player, Style.wrap(game.name, [Style.FG_GREEN])))
elif data.startswith('j '):
name = data[2:]
player.join_game(name)
notifications.append('{0} joined game {1}'.format(
player, Style.wrap(name, [Style.FG_GREEN])))
else:
player.prompt('Unrecognized command: {}'.format(data))
else:
disconnect(sock)
if __name__ == '__main__':
main(*sys.argv[1:])
| bsd-3-clause | -2,717,137,522,057,526,300 | 28.440594 | 78 | 0.471498 | false |
sol/aeson | tests/JSONTestSuite/parsers/test_json-jq.py | 5 | 1169 | #!/usr/bin/env python
import os
import subprocess
import sys
jq_paths = ["/usr/local/bin/jq", "/Users/nst/bin/jq"]
dir_path = "/Users/nst/Projects/dropbox/JSON/test_cases/"
existing_jq_paths = [p for p in jq_paths if os.path.exists(p)]
if len(existing_jq_paths) == 0:
print "-- cannot find jq"
sys.exit(1)
jq = existing_jq_paths[0]
for root, dirs, files in os.walk(dir_path):
json_files = (f for f in files if f.endswith(".json"))
for filename in json_files:
path = os.path.join(root, filename)
print "*"*80
print path
parsing_success = subprocess.call([jq, ".", path]) == 0
if filename.startswith("y_") and parsing_success == False:
print "jq\tSHOULD_HAVE_PASSED\t%s" % (filename)
elif filename.startswith("n_") and parsing_success == True:
print "jq\tSHOULD_HAVE_FAILED\t%s" % (filename)
elif filename.startswith("i_") and parsing_success == True:
print "jq\tIMPLEMENTATION_PASS\t%s" % (filename)
elif filename.startswith("i_") and parsing_success == False:
print "jq\tIMPLEMENTATION_FAIL\t%s" % (filename)
| bsd-3-clause | 3,933,764,163,440,656,000 | 33.382353 | 68 | 0.610778 | false |
cxxgtxy/tensorflow | tensorflow/contrib/data/python/kernel_tests/sequence_dataset_op_test.py | 6 | 8032 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SequenceDatasetTest(test.TestCase):
def testRepeatTensorDataset(self):
"""Test a dataset that repeats its input multiple times."""
components = [np.array(1), np.array([1, 2, 3]), np.array(37.0)]
# This placeholder can be fed when dataset-definition subgraph
# runs (i.e. `init_op` below) to configure the number of
# repetitions used in a particular iterator.
count_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensors(components)
.repeat(count_placeholder).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test a finite repetition.
sess.run(init_op, feed_dict={count_placeholder: 3})
for _ in range(3):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test a different finite repetition.
sess.run(init_op, feed_dict={count_placeholder: 7})
for _ in range(7):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test an empty repetition.
sess.run(init_op, feed_dict={count_placeholder: 0})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test an infinite repetition.
# NOTE(mrry): There's not a good way to test that the sequence
# actually is infinite.
sess.run(init_op, feed_dict={count_placeholder: -1})
for _ in range(17):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
def testTakeTensorDataset(self):
components = [np.arange(10)]
count_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.take(count_placeholder).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Take fewer than input size
sess.run(init_op, feed_dict={count_placeholder: 4})
for i in range(4):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Take more than input size
sess.run(init_op, feed_dict={count_placeholder: 25})
for i in range(10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Take all of input
sess.run(init_op, feed_dict={count_placeholder: -1})
for i in range(10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Take nothing
sess.run(init_op, feed_dict={count_placeholder: 0})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSkipTensorDataset(self):
components = [np.arange(10)]
count_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.skip(count_placeholder).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Skip fewer than input size, we should skip
# the first 4 elements and then read the rest.
sess.run(init_op, feed_dict={count_placeholder: 4})
for i in range(4, 10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Skip more than input size: get nothing.
sess.run(init_op, feed_dict={count_placeholder: 25})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Skip exactly input size.
sess.run(init_op, feed_dict={count_placeholder: 10})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Set -1 for 'count': skip the entire dataset.
sess.run(init_op, feed_dict={count_placeholder: -1})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Skip nothing
sess.run(init_op, feed_dict={count_placeholder: 0})
for i in range(0, 10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRepeatRepeatTensorDataset(self):
"""Test the composition of repeat datasets."""
components = [np.array(1), np.array([1, 2, 3]), np.array(37.0)]
inner_count = array_ops.placeholder(dtypes.int64, shape=[])
outer_count = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensors(components).repeat(inner_count)
.repeat(outer_count).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
sess.run(init_op, feed_dict={inner_count: 7, outer_count: 14})
for _ in range(7 * 14):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRepeatEmptyDataset(self):
"""Test that repeating an empty dataset does not hang."""
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10).skip(10)
.repeat(-1).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaisesRegexp(
errors.OutOfRangeError,
"Attempted to repeat an empty dataset infinitely."):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| apache-2.0 | -1,134,669,635,925,275,000 | 37.066351 | 80 | 0.659985 | false |
wavelets/zipline | zipline/utils/test_utils.py | 5 | 3103 | from contextlib import contextmanager
from logbook import FileHandler
from zipline.finance.blotter import ORDER_STATUS
from six import itervalues
import pandas as pd
def to_utc(time_str):
return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC')
def setup_logger(test, path='test.log'):
test.log_handler = FileHandler(path)
test.log_handler.push_application()
def teardown_logger(test):
test.log_handler.pop_application()
test.log_handler.close()
def drain_zipline(test, zipline):
output = []
transaction_count = 0
msg_counter = 0
# start the simulation
for update in zipline:
msg_counter += 1
output.append(update)
if 'daily_perf' in update:
transaction_count += \
len(update['daily_perf']['transactions'])
return output, transaction_count
def assert_single_position(test, zipline):
output, transaction_count = drain_zipline(test, zipline)
if 'expected_transactions' in test.zipline_test_config:
test.assertEqual(
test.zipline_test_config['expected_transactions'],
transaction_count
)
else:
test.assertEqual(
test.zipline_test_config['order_count'],
transaction_count
)
# the final message is the risk report, the second to
# last is the final day's results. Positions is a list of
# dicts.
closing_positions = output[-2]['daily_perf']['positions']
# confirm that all orders were filled.
# iterate over the output updates, overwriting
# orders when they are updated. Then check the status on all.
orders_by_id = {}
for update in output:
if 'daily_perf' in update:
if 'orders' in update['daily_perf']:
for order in update['daily_perf']['orders']:
orders_by_id[order['id']] = order
for order in itervalues(orders_by_id):
test.assertEqual(
order['status'],
ORDER_STATUS.FILLED,
"")
test.assertEqual(
len(closing_positions),
1,
"Portfolio should have one position."
)
sid = test.zipline_test_config['sid']
test.assertEqual(
closing_positions[0]['sid'],
sid,
"Portfolio should have one position in " + str(sid)
)
return output, transaction_count
class ExceptionSource(object):
def __init__(self):
pass
def get_hash(self):
return "ExceptionSource"
def __iter__(self):
return self
def next(self):
5 / 0
def __next__(self):
5 / 0
class ExceptionTransform(object):
def __init__(self):
self.window_length = 1
pass
def get_hash(self):
return "ExceptionTransform"
def update(self, event):
assert False, "An assertion message"
@contextmanager
def nullctx():
"""
Null context manager. Useful for conditionally adding a contextmanager in
a single line, e.g.:
with SomeContextManager() if some_expr else nullcontext:
do_stuff()
"""
yield
| apache-2.0 | 4,010,933,101,628,797,000 | 22.687023 | 78 | 0.614244 | false |
steebchen/youtube-dl | youtube_dl/extractor/sohu.py | 50 | 6911 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
int_or_none,
try_get,
)
class SohuIE(InfoExtractor):
_VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?'
# Sohu videos give different MD5 sums on Travis CI and my machine
_TESTS = [{
'note': 'This video is available only in Mainland China',
'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super',
'info_dict': {
'id': '382479172',
'ext': 'mp4',
'title': 'MV:Far East Movement《The Illest》',
},
'skip': 'On available in China',
}, {
'url': 'http://tv.sohu.com/20150305/n409385080.shtml',
'info_dict': {
'id': '409385080',
'ext': 'mp4',
'title': '《2015湖南卫视羊年元宵晚会》唐嫣《花好月圆》',
}
}, {
'url': 'http://my.tv.sohu.com/us/232799889/78693464.shtml',
'info_dict': {
'id': '78693464',
'ext': 'mp4',
'title': '【爱范品】第31期:MWC见不到的奇葩手机',
}
}, {
'note': 'Multipart video',
'url': 'http://my.tv.sohu.com/pl/8384802/78910339.shtml',
'info_dict': {
'id': '78910339',
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
},
'playlist': [{
'info_dict': {
'id': '78910339_part1',
'ext': 'mp4',
'duration': 294,
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
}
}, {
'info_dict': {
'id': '78910339_part2',
'ext': 'mp4',
'duration': 300,
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
}
}, {
'info_dict': {
'id': '78910339_part3',
'ext': 'mp4',
'duration': 150,
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
}
}]
}, {
'note': 'Video with title containing dash',
'url': 'http://my.tv.sohu.com/us/249884221/78932792.shtml',
'info_dict': {
'id': '78932792',
'ext': 'mp4',
'title': 'youtube-dl testing video',
},
'params': {
'skip_download': True
}
}]
def _real_extract(self, url):
def _fetch_data(vid_id, mytv=False):
if mytv:
base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid='
else:
base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
return self._download_json(
base_data_url + vid_id, video_id,
'Downloading JSON data for %s' % vid_id,
headers=self.geo_verification_headers())
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
mytv = mobj.group('mytv') is not None
webpage = self._download_webpage(url, video_id)
title = re.sub(r' - 搜狐视频$', '', self._og_search_title(webpage))
vid = self._html_search_regex(
r'var vid ?= ?["\'](\d+)["\']',
webpage, 'video path')
vid_data = _fetch_data(vid, mytv)
if vid_data['play'] != 1:
if vid_data.get('status') == 12:
raise ExtractorError(
'%s said: There\'s something wrong in the video.' % self.IE_NAME,
expected=True)
else:
self.raise_geo_restricted(
'%s said: The video is only licensed to users in Mainland China.' % self.IE_NAME)
formats_json = {}
for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'):
vid_id = vid_data['data'].get('%sVid' % format_id)
if not vid_id:
continue
vid_id = compat_str(vid_id)
formats_json[format_id] = vid_data if vid == vid_id else _fetch_data(vid_id, mytv)
part_count = vid_data['data']['totalBlocks']
playlist = []
for i in range(part_count):
formats = []
for format_id, format_data in formats_json.items():
allot = format_data['allot']
data = format_data['data']
clips_url = data['clipsURL']
su = data['su']
video_url = 'newflv.sohu.ccgslb.net'
cdnId = None
retries = 0
while 'newflv.sohu.ccgslb.net' in video_url:
params = {
'prot': 9,
'file': clips_url[i],
'new': su[i],
'prod': 'flash',
'rb': 1,
}
if cdnId is not None:
params['idc'] = cdnId
download_note = 'Downloading %s video URL part %d of %d' % (
format_id, i + 1, part_count)
if retries > 0:
download_note += ' (retry #%d)' % retries
part_info = self._parse_json(self._download_webpage(
'http://%s/?%s' % (allot, compat_urllib_parse_urlencode(params)),
video_id, download_note), video_id)
video_url = part_info['url']
cdnId = part_info.get('nid')
retries += 1
if retries > 5:
raise ExtractorError('Failed to get video URL')
formats.append({
'url': video_url,
'format_id': format_id,
'filesize': int_or_none(
try_get(data, lambda x: x['clipsBytes'][i])),
'width': int_or_none(data.get('width')),
'height': int_or_none(data.get('height')),
'fps': int_or_none(data.get('fps')),
})
self._sort_formats(formats)
playlist.append({
'id': '%s_part%d' % (video_id, i + 1),
'title': title,
'duration': vid_data['data']['clipsDuration'][i],
'formats': formats,
})
if len(playlist) == 1:
info = playlist[0]
info['id'] = video_id
else:
info = {
'_type': 'multi_video',
'entries': playlist,
'id': video_id,
'title': title,
}
return info
| unlicense | 547,829,533,164,062,500 | 32.034653 | 101 | 0.437584 | false |
brettgoldstein3/brettgoldstein-site | lib/flask/app.py | 345 | 76786 | # -*- coding: utf-8 -*-
"""
flask.app
~~~~~~~~~
This module implements the central WSGI application object.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from threading import Lock
from datetime import timedelta
from itertools import chain
from functools import update_wrapper
from werkzeug.datastructures import ImmutableDict
from werkzeug.routing import Map, Rule, RequestRedirect, BuildError
from werkzeug.exceptions import HTTPException, InternalServerError, \
MethodNotAllowed, BadRequest
from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \
locked_cached_property, _endpoint_from_view_func, find_package
from . import json
from .wrappers import Request, Response
from .config import ConfigAttribute, Config
from .ctx import RequestContext, AppContext, _AppCtxGlobals
from .globals import _request_ctx_stack, request, session, g
from .sessions import SecureCookieSessionInterface
from .module import blueprint_is_module
from .templating import DispatchingJinjaLoader, Environment, \
_default_template_ctx_processor
from .signals import request_started, request_finished, got_request_exception, \
request_tearing_down, appcontext_tearing_down
from ._compat import reraise, string_types, text_type, integer_types
# a lock used for logger initialization
_logger_lock = Lock()
def _make_timedelta(value):
if not isinstance(value, timedelta):
return timedelta(seconds=value)
return value
def setupmethod(f):
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args, **kwargs):
if self.debug and self._got_first_request:
raise AssertionError('A setup function was called after the '
'first request was handled. This usually indicates a bug '
'in the application where a module was not imported '
'and decorators or other functionality was called too late.\n'
'To fix this make sure to import all your view modules, '
'database models and everything related at a central place '
'before the application starts serving requests.')
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f)
class Flask(_PackageBoundObject):
"""The flask object implements a WSGI application and acts as the central
object. It is passed the name of the module or package of the
application. Once it is created it will act as a central registry for
the view functions, the URL rules, template configuration and much more.
The name of the package is used to resolve resources from inside the
package or the folder the module is contained in depending on if the
package parameter resolves to an actual python package (a folder with
an `__init__.py` file inside) or a standard module (just a `.py` file).
For more information about resource loading, see :func:`open_resource`.
Usually you create a :class:`Flask` instance in your main module or
in the `__init__.py` file of your package like this::
from flask import Flask
app = Flask(__name__)
.. admonition:: About the First Parameter
The idea of the first parameter is to give Flask an idea what
belongs to your application. This name is used to find resources
on the file system, can be used by extensions to improve debugging
information and a lot more.
So it's important what you provide there. If you are using a single
module, `__name__` is always the correct value. If you however are
using a package, it's usually recommended to hardcode the name of
your package there.
For example if your application is defined in `yourapplication/app.py`
you should create it with one of the two versions below::
app = Flask('yourapplication')
app = Flask(__name__.split('.')[0])
Why is that? The application will work even with `__name__`, thanks
to how resources are looked up. However it will make debugging more
painful. Certain extensions can make assumptions based on the
import name of your application. For example the Flask-SQLAlchemy
extension will look for the code in your application that triggered
an SQL query in debug mode. If the import name is not properly set
up, that debugging information is lost. (For example it would only
pick up SQL queries in `yourapplication.app` and not
`yourapplication.views.frontend`)
.. versionadded:: 0.7
The `static_url_path`, `static_folder`, and `template_folder`
parameters were added.
.. versionadded:: 0.8
The `instance_path` and `instance_relative_config` parameters were
added.
:param import_name: the name of the application package
:param static_url_path: can be used to specify a different path for the
static files on the web. Defaults to the name
of the `static_folder` folder.
:param static_folder: the folder with static files that should be served
at `static_url_path`. Defaults to the ``'static'``
folder in the root path of the application.
:param template_folder: the folder that contains the templates that should
be used by the application. Defaults to
``'templates'`` folder in the root path of the
application.
:param instance_path: An alternative instance path for the application.
By default the folder ``'instance'`` next to the
package or module is assumed to be the instance
path.
:param instance_relative_config: if set to `True` relative filenames
for loading the config are assumed to
be relative to the instance path instead
of the application root.
"""
#: The class that is used for request objects. See :class:`~flask.Request`
#: for more information.
request_class = Request
#: The class that is used for response objects. See
#: :class:`~flask.Response` for more information.
response_class = Response
#: The class that is used for the :data:`~flask.g` instance.
#:
#: Example use cases for a custom class:
#:
#: 1. Store arbitrary attributes on flask.g.
#: 2. Add a property for lazy per-request database connectors.
#: 3. Return None instead of AttributeError on expected attributes.
#: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
#:
#: In Flask 0.9 this property was called `request_globals_class` but it
#: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
#: flask.g object is not application context scoped.
#:
#: .. versionadded:: 0.10
app_ctx_globals_class = _AppCtxGlobals
# Backwards compatibility support
def _get_request_globals_class(self):
return self.app_ctx_globals_class
def _set_request_globals_class(self, value):
from warnings import warn
warn(DeprecationWarning('request_globals_class attribute is now '
'called app_ctx_globals_class'))
self.app_ctx_globals_class = value
request_globals_class = property(_get_request_globals_class,
_set_request_globals_class)
del _get_request_globals_class, _set_request_globals_class
#: The debug flag. Set this to `True` to enable debugging of the
#: application. In debug mode the debugger will kick in when an unhandled
#: exception occurs and the integrated server will automatically reload
#: the application if changes in the code are detected.
#:
#: This attribute can also be configured from the config with the `DEBUG`
#: configuration key. Defaults to `False`.
debug = ConfigAttribute('DEBUG')
#: The testing flag. Set this to `True` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate unittest helpers that have an
#: additional runtime cost which should not be enabled by default.
#:
#: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
#: `TESTING` configuration key. Defaults to `False`.
testing = ConfigAttribute('TESTING')
#: If a secret key is set, cryptographic components can use this to
#: sign cookies and other things. Set this to a complex random value
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
#: `SECRET_KEY` configuration key. Defaults to `None`.
secret_key = ConfigAttribute('SECRET_KEY')
#: The secure cookie uses this for the name of the session cookie.
#:
#: This attribute can also be configured from the config with the
#: `SESSION_COOKIE_NAME` configuration key. Defaults to ``'session'``
session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
#: A :class:`~datetime.timedelta` which is used to set the expiration
#: date of a permanent session. The default is 31 days which makes a
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
#: `PERMANENT_SESSION_LIFETIME` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
get_converter=_make_timedelta)
#: Enable this if you want to use the X-Sendfile feature. Keep in
#: mind that the server has to support this. This only affects files
#: sent with the :func:`send_file` method.
#:
#: .. versionadded:: 0.2
#:
#: This attribute can also be configured from the config with the
#: `USE_X_SENDFILE` configuration key. Defaults to `False`.
use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
#: The name of the logger to use. By default the logger name is the
#: package name passed to the constructor.
#:
#: .. versionadded:: 0.4
logger_name = ConfigAttribute('LOGGER_NAME')
#: Enable the deprecated module support? This is active by default
#: in 0.7 but will be changed to False in 0.8. With Flask 1.0 modules
#: will be removed in favor of Blueprints
enable_modules = True
#: The logging format used for the debug logger. This is only used when
#: the application is in debug mode, otherwise the attached logging
#: handler does the formatting.
#:
#: .. versionadded:: 0.3
debug_log_format = (
'-' * 80 + '\n' +
'%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' +
'%(message)s\n' +
'-' * 80
)
#: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`.
#:
#: .. versionadded:: 0.10
json_encoder = json.JSONEncoder
#: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`.
#:
#: .. versionadded:: 0.10
json_decoder = json.JSONDecoder
#: Options that are passed directly to the Jinja2 environment.
jinja_options = ImmutableDict(
extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
)
#: Default configuration parameters.
default_config = ImmutableDict({
'DEBUG': False,
'TESTING': False,
'PROPAGATE_EXCEPTIONS': None,
'PRESERVE_CONTEXT_ON_EXCEPTION': None,
'SECRET_KEY': None,
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'USE_X_SENDFILE': False,
'LOGGER_NAME': None,
'SERVER_NAME': None,
'APPLICATION_ROOT': None,
'SESSION_COOKIE_NAME': 'session',
'SESSION_COOKIE_DOMAIN': None,
'SESSION_COOKIE_PATH': None,
'SESSION_COOKIE_HTTPONLY': True,
'SESSION_COOKIE_SECURE': False,
'MAX_CONTENT_LENGTH': None,
'SEND_FILE_MAX_AGE_DEFAULT': 12 * 60 * 60, # 12 hours
'TRAP_BAD_REQUEST_ERRORS': False,
'TRAP_HTTP_EXCEPTIONS': False,
'PREFERRED_URL_SCHEME': 'http',
'JSON_AS_ASCII': True,
'JSON_SORT_KEYS': True,
'JSONIFY_PRETTYPRINT_REGULAR': True,
})
#: The rule object to use for URL rules created. This is used by
#: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
#:
#: .. versionadded:: 0.7
url_rule_class = Rule
#: the test client that is used with when `test_client` is used.
#:
#: .. versionadded:: 0.7
test_client_class = None
#: the session interface to use. By default an instance of
#: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
#:
#: .. versionadded:: 0.8
session_interface = SecureCookieSessionInterface()
def __init__(self, import_name, static_path=None, static_url_path=None,
static_folder='static', template_folder='templates',
instance_path=None, instance_relative_config=False):
_PackageBoundObject.__init__(self, import_name,
template_folder=template_folder)
if static_path is not None:
from warnings import warn
warn(DeprecationWarning('static_path is now called '
'static_url_path'), stacklevel=2)
static_url_path = static_path
if static_url_path is not None:
self.static_url_path = static_url_path
if static_folder is not None:
self.static_folder = static_folder
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError('If an instance path is provided it must be '
'absolute. A relative path was given instead.')
#: Holds the path to the instance folder.
#:
#: .. versionadded:: 0.8
self.instance_path = instance_path
#: The configuration dictionary as :class:`Config`. This behaves
#: exactly like a regular dictionary but supports additional methods
#: to load a config from files.
self.config = self.make_config(instance_relative_config)
# Prepare the deferred setup of the logger.
self._logger = None
self.logger_name = self.import_name
#: A dictionary of all view functions registered. The keys will
#: be function names which are also used to generate URLs and
#: the values are the function objects themselves.
#: To register a view function, use the :meth:`route` decorator.
self.view_functions = {}
# support for the now deprecated `error_handlers` attribute. The
# :attr:`error_handler_spec` shall be used now.
self._error_handlers = {}
#: A dictionary of all registered error handlers. The key is `None`
#: for error handlers active on the application, otherwise the key is
#: the name of the blueprint. Each key points to another dictionary
#: where they key is the status code of the http exception. The
#: special key `None` points to a list of tuples where the first item
#: is the class for the instance check and the second the error handler
#: function.
#:
#: To register a error handler, use the :meth:`errorhandler`
#: decorator.
self.error_handler_spec = {None: self._error_handlers}
#: A list of functions that are called when :meth:`url_for` raises a
#: :exc:`~werkzeug.routing.BuildError`. Each function registered here
#: is called with `error`, `endpoint` and `values`. If a function
#: returns `None` or raises a `BuildError` the next function is
#: tried.
#:
#: .. versionadded:: 0.9
self.url_build_error_handlers = []
#: A dictionary with lists of functions that should be called at the
#: beginning of the request. The key of the dictionary is the name of
#: the blueprint this function is active for, `None` for all requests.
#: This can for example be used to open database connections or
#: getting hold of the currently logged in user. To register a
#: function here, use the :meth:`before_request` decorator.
self.before_request_funcs = {}
#: A lists of functions that should be called at the beginning of the
#: first request to this instance. To register a function here, use
#: the :meth:`before_first_request` decorator.
#:
#: .. versionadded:: 0.8
self.before_first_request_funcs = []
#: A dictionary with lists of functions that should be called after
#: each request. The key of the dictionary is the name of the blueprint
#: this function is active for, `None` for all requests. This can for
#: example be used to open database connections or getting hold of the
#: currently logged in user. To register a function here, use the
#: :meth:`after_request` decorator.
self.after_request_funcs = {}
#: A dictionary with lists of functions that are called after
#: each request, even if an exception has occurred. The key of the
#: dictionary is the name of the blueprint this function is active for,
#: `None` for all requests. These functions are not allowed to modify
#: the request, and their return values are ignored. If an exception
#: occurred while processing the request, it gets passed to each
#: teardown_request function. To register a function here, use the
#: :meth:`teardown_request` decorator.
#:
#: .. versionadded:: 0.7
self.teardown_request_funcs = {}
#: A list of functions that are called when the application context
#: is destroyed. Since the application context is also torn down
#: if the request ends this is the place to store code that disconnects
#: from databases.
#:
#: .. versionadded:: 0.9
self.teardown_appcontext_funcs = []
#: A dictionary with lists of functions that can be used as URL
#: value processor functions. Whenever a URL is built these functions
#: are called to modify the dictionary of values in place. The key
#: `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#:
#: .. versionadded:: 0.7
self.url_value_preprocessors = {}
#: A dictionary with lists of functions that can be used as URL value
#: preprocessors. The key `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#: of URL values before they are used as the keyword arguments of the
#: view function. For each function registered this one should also
#: provide a :meth:`url_defaults` function that adds the parameters
#: automatically again that were removed that way.
#:
#: .. versionadded:: 0.7
self.url_default_functions = {}
#: A dictionary with list of functions that are called without argument
#: to populate the template context. The key of the dictionary is the
#: name of the blueprint this function is active for, `None` for all
#: requests. Each returns a dictionary that the template context is
#: updated with. To register a function here, use the
#: :meth:`context_processor` decorator.
self.template_context_processors = {
None: [_default_template_ctx_processor]
}
#: all the attached blueprints in a dictionary by name. Blueprints
#: can be attached multiple times so this dictionary does not tell
#: you how often they got attached.
#:
#: .. versionadded:: 0.7
self.blueprints = {}
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
#: similar things. For backwards compatibility extensions should register
#: themselves like this::
#:
#: if not hasattr(app, 'extensions'):
#: app.extensions = {}
#: app.extensions['extensionname'] = SomeObject()
#:
#: The key must match the name of the `flaskext` module. For example in
#: case of a "Flask-Foo" extension in `flaskext.foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
self.extensions = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug.routing import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(BaseConverter.to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = Map()
# tracks internally if the application already handled at least one
# request.
self._got_first_request = False
self._before_request_lock = Lock()
# register the static folder for the application. Do that even
# if the folder does not exist. First of all it might be created
# while the server is running (usually happens during development)
# but also because google appengine stores static files somewhere
# else when mapped with the .yml file.
if self.has_static_folder:
self.add_url_rule(self.static_url_path + '/<path:filename>',
endpoint='static',
view_func=self.send_static_file)
def _get_error_handlers(self):
from warnings import warn
warn(DeprecationWarning('error_handlers is deprecated, use the '
'new error_handler_spec attribute instead.'), stacklevel=1)
return self._error_handlers
def _set_error_handlers(self, value):
self._error_handlers = value
self.error_handler_spec[None] = value
error_handlers = property(_get_error_handlers, _set_error_handlers)
del _get_error_handlers, _set_error_handlers
@locked_cached_property
def name(self):
"""The name of the application. This is usually the import name
with the difference that it's guessed from the run file if the
import name is main. This name is used as a display name when
Flask needs the name of the application. It can be set and overridden
to change the value.
.. versionadded:: 0.8
"""
if self.import_name == '__main__':
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@property
def propagate_exceptions(self):
"""Returns the value of the `PROPAGATE_EXCEPTIONS` configuration
value in case it's set, otherwise a sensible default is returned.
.. versionadded:: 0.7
"""
rv = self.config['PROPAGATE_EXCEPTIONS']
if rv is not None:
return rv
return self.testing or self.debug
@property
def preserve_context_on_exception(self):
"""Returns the value of the `PRESERVE_CONTEXT_ON_EXCEPTION`
configuration value in case it's set, otherwise a sensible default
is returned.
.. versionadded:: 0.7
"""
rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
if rv is not None:
return rv
return self.debug
@property
def logger(self):
"""A :class:`logging.Logger` object for this application. The
default configuration is to log to stderr if the application is
in debug mode. This logger can be used to (surprise) log messages.
Here some examples::
app.logger.debug('A value for debugging')
app.logger.warning('A warning occurred (%d apples)', 42)
app.logger.error('An error occurred')
.. versionadded:: 0.3
"""
if self._logger and self._logger.name == self.logger_name:
return self._logger
with _logger_lock:
if self._logger and self._logger.name == self.logger_name:
return self._logger
from flask.logging import create_logger
self._logger = rv = create_logger(self)
return rv
@locked_cached_property
def jinja_env(self):
"""The Jinja2 environment used to load templates."""
return self.create_jinja_environment()
@property
def got_first_request(self):
"""This attribute is set to `True` if the application started
handling the first request.
.. versionadded:: 0.8
"""
return self._got_first_request
def make_config(self, instance_relative=False):
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
return Config(root_path, self.default_config)
def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance')
def open_instance_resource(self, resource, mode='rb'):
"""Opens a resource from the application's instance folder
(:attr:`instance_path`). Otherwise works like
:meth:`open_resource`. Instance resources can also be opened for
writing.
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
return open(os.path.join(self.instance_path, resource), mode)
def create_jinja_environment(self):
"""Creates the Jinja2 environment based on :attr:`jinja_options`
and :meth:`select_jinja_autoescape`. Since 0.7 this also adds
the Jinja2 globals and filters after initialization. Override
this function to customize the behavior.
.. versionadded:: 0.5
"""
options = dict(self.jinja_options)
if 'autoescape' not in options:
options['autoescape'] = self.select_jinja_autoescape
rv = Environment(self, **options)
rv.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages,
config=self.config,
# request, session and g are normally added with the
# context processor for efficiency reasons but for imported
# templates we also want the proxies in there.
request=request,
session=session,
g=g
)
rv.filters['tojson'] = json.tojson_filter
return rv
def create_global_jinja_loader(self):
"""Creates the loader for the Jinja2 environment. Can be used to
override just the loader and keeping the rest unchanged. It's
discouraged to override this function. Instead one should override
the :meth:`jinja_loader` function instead.
The global loader dispatches between the loaders of the application
and the individual blueprints.
.. versionadded:: 0.7
"""
return DispatchingJinjaLoader(self)
def init_jinja_globals(self):
"""Deprecated. Used to initialize the Jinja2 globals.
.. versionadded:: 0.5
.. versionchanged:: 0.7
This method is deprecated with 0.7. Override
:meth:`create_jinja_environment` instead.
"""
def select_jinja_autoescape(self, filename):
"""Returns `True` if autoescaping should be active for the given
template name.
.. versionadded:: 0.5
"""
if filename is None:
return False
return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
def update_template_context(self, context):
"""Update the template context with some commonly used variables.
This injects request, session, config and g into the template
context as well as everything template context processors want
to inject. Note that the as of Flask 0.6, the original values
in the context will not be overridden if a context processor
decides to return a value with the same key.
:param context: the context as a dictionary that is updated in place
to add extra variables.
"""
funcs = self.template_context_processors[None]
reqctx = _request_ctx_stack.top
if reqctx is not None:
bp = reqctx.request.blueprint
if bp is not None and bp in self.template_context_processors:
funcs = chain(funcs, self.template_context_processors[bp])
orig_ctx = context.copy()
for func in funcs:
context.update(func())
# make sure the original values win. This makes it possible to
# easier add new variables in context processors without breaking
# existing views.
context.update(orig_ctx)
def run(self, host=None, port=None, debug=None, **options):
"""Runs the application on a local development server. If the
:attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to `True` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME`` variable.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if
present.
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
Werkzeug server. See
:func:`werkzeug.serving.run_simple` for more
information.
"""
from werkzeug.serving import run_simple
if host is None:
host = '127.0.0.1'
if port is None:
server_name = self.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
if debug is not None:
self.debug = bool(debug)
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
try:
run_simple(host, port, self, **options)
finally:
# reset the first request information if the development server
# resetted normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
def test_client(self, use_cookies=True):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
Note that if you are testing for assertions or exceptions in your
application code, you must set ``app.testing = True`` in order for the
exceptions to propagate to the test client. Otherwise, the exception
will be handled by the application (not visible to the test client) and
the only indication of an AssertionError or other exception will be a
500 status code response to the test client. See the :attr:`testing`
attribute. For example::
app.testing = True
client = app.test_client()
The test client can be used in a `with` block to defer the closing down
of the context until the end of the `with` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
added support for `with` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
"""
cls = self.test_client_class
if cls is None:
from flask.testing import FlaskClient as cls
return cls(self, self.response_class, use_cookies=use_cookies)
def open_session(self, request):
"""Creates or opens a new session. Default implementation stores all
session data in a signed cookie. This requires that the
:attr:`secret_key` is set. Instead of overriding this method
we recommend replacing the :class:`session_interface`.
:param request: an instance of :attr:`request_class`.
"""
return self.session_interface.open_session(self, request)
def save_session(self, session, response):
"""Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`. Instead of overriding this
method we recommend replacing the :class:`session_interface`.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class`
"""
return self.session_interface.save_session(self, session, response)
def make_null_session(self):
"""Creates a new instance of a missing session. Instead of overriding
this method we recommend replacing the :class:`session_interface`.
.. versionadded:: 0.7
"""
return self.session_interface.make_null_session(self)
def register_module(self, module, **options):
"""Registers a module with this application. The keyword argument
of this function are the same as the ones for the constructor of the
:class:`Module` class and will override the values of the module if
provided.
.. versionchanged:: 0.7
The module system was deprecated in favor for the blueprint
system.
"""
assert blueprint_is_module(module), 'register_module requires ' \
'actual module objects. Please upgrade to blueprints though.'
if not self.enable_modules:
raise RuntimeError('Module support was disabled but code '
'attempted to register a module named %r' % module)
else:
from warnings import warn
warn(DeprecationWarning('Modules are deprecated. Upgrade to '
'using blueprints. Have a look into the documentation for '
'more information. If this module was registered by a '
'Flask-Extension upgrade the extension or contact the author '
'of that extension instead. (Registered %r)' % module),
stacklevel=2)
self.register_blueprint(module, **options)
@setupmethod
def register_blueprint(self, blueprint, **options):
"""Registers a blueprint on the application.
.. versionadded:: 0.7
"""
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint\'s name collision occurred between %r and ' \
'%r. Both share the same name "%s". Blueprints that ' \
'are created on the fly need unique names.' % \
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, options, first_registration)
@setupmethod
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
decorator. If a view_func is provided it will be registered with the
endpoint.
Basically this example::
@app.route('/')
def index():
pass
Is equivalent to the following::
def index():
pass
app.add_url_rule('/', 'index', index)
If the view_func is not provided you will need to connect the endpoint
to a view function like so::
app.view_functions['index'] = index
Internally :meth:`route` invokes :meth:`add_url_rule` so if you want
to customize the behavior via subclassing you only need to change
this method.
For more information refer to :ref:`url-route-registrations`.
.. versionchanged:: 0.2
`view_func` parameter added.
.. versionchanged:: 0.6
`OPTIONS` is added automatically as method.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param view_func: the function to call when serving a request to the
provided endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
options['endpoint'] = endpoint
methods = options.pop('methods', None)
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
# a tuple of only `GET` as default.
if methods is None:
methods = getattr(view_func, 'methods', None) or ('GET',)
methods = set(methods)
# Methods that should always be added
required_methods = set(getattr(view_func, 'required_methods', ()))
# starting with Flask 0.8 the view_func object can disable and
# force-enable the automatic options handling.
provide_automatic_options = getattr(view_func,
'provide_automatic_options', None)
if provide_automatic_options is None:
if 'OPTIONS' not in methods:
provide_automatic_options = True
required_methods.add('OPTIONS')
else:
provide_automatic_options = False
# Add the required methods now.
methods |= required_methods
# due to a werkzeug bug we need to make sure that the defaults are
# None if they are an empty dictionary. This should not be necessary
# with Werkzeug 0.7
options['defaults'] = options.get('defaults') or None
rule = self.url_rule_class(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options
self.url_map.add(rule)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
if old_func is not None and old_func is not view_func:
raise AssertionError('View function mapping is overwriting an '
'existing endpoint function: %s' % endpoint)
self.view_functions[endpoint] = view_func
def route(self, rule, **options):
"""A decorator that is used to register a view function for a
given URL rule. This does the same thing as :meth:`add_url_rule`
but is intended for decorator usage::
@app.route('/')
def index():
return 'Hello World'
For more information refer to :ref:`url-route-registrations`.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
def decorator(f):
endpoint = options.pop('endpoint', None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
@setupmethod
def endpoint(self, endpoint):
"""A decorator to register a function as an endpoint.
Example::
@app.endpoint('example.endpoint')
def example():
return "example"
:param endpoint: the name of the endpoint
"""
def decorator(f):
self.view_functions[endpoint] = f
return f
return decorator
@setupmethod
def errorhandler(self, code_or_exception):
"""A decorator that is used to register a function give a given
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
You can also register a function as error handler without using
the :meth:`errorhandler` decorator. The following example is
equivalent to the one above::
def page_not_found(error):
return 'This page does not exist', 404
app.error_handler_spec[None][404] = page_not_found
Setting error handlers via assignments to :attr:`error_handler_spec`
however is discouraged as it requires fiddling with nested dictionaries
and the special case for arbitrary exception types.
The first `None` refers to the active blueprint. If the error
handler should be application wide `None` shall be used.
.. versionadded:: 0.7
One can now additionally also register custom exception types
that do not necessarily have to be a subclass of the
:class:`~werkzeug.exceptions.HTTPException` class.
:param code: the code as integer for the handler
"""
def decorator(f):
self._register_error_handler(None, code_or_exception, f)
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Alternative error attach function to the :meth:`errorhandler`
decorator that is more straightforward to use for non decorator
usage.
.. versionadded:: 0.7
"""
self._register_error_handler(None, code_or_exception, f)
@setupmethod
def _register_error_handler(self, key, code_or_exception, f):
if isinstance(code_or_exception, HTTPException):
code_or_exception = code_or_exception.code
if isinstance(code_or_exception, integer_types):
assert code_or_exception != 500 or key is None, \
'It is currently not possible to register a 500 internal ' \
'server error on a per-blueprint level.'
self.error_handler_spec.setdefault(key, {})[code_or_exception] = f
else:
self.error_handler_spec.setdefault(key, {}).setdefault(None, []) \
.append((code_or_exception, f))
@setupmethod
def template_filter(self, name=None):
"""A decorator that is used to register custom template filter.
You can specify a name for the filter, otherwise the function
name will be used. Example::
@app.template_filter()
def reverse(s):
return s[::-1]
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_template_filter(self, f, name=None):
"""Register a custom template filter. Works exactly like the
:meth:`template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
self.jinja_env.filters[name or f.__name__] = f
@setupmethod
def template_test(self, name=None):
"""A decorator that is used to register custom template test.
You can specify a name for the test, otherwise the function
name will be used. Example::
@app.template_test()
def is_prime(n):
if n == 2:
return True
for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
if n % i == 0:
return False
return True
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_template_test(self, f, name=None):
"""Register a custom template test. Works exactly like the
:meth:`template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
self.jinja_env.tests[name or f.__name__] = f
@setupmethod
def template_global(self, name=None):
"""A decorator that is used to register a custom template global function.
You can specify a name for the global function, otherwise the function
name will be used. Example::
@app.template_global()
def double(n):
return 2 * n
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_template_global(self, f, name=None):
"""Register a custom template global function. Works exactly like the
:meth:`template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
self.jinja_env.globals[name or f.__name__] = f
@setupmethod
def before_request(self, f):
"""Registers a function to run before each request."""
self.before_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def before_first_request(self, f):
"""Registers a function to be run before the first request to this
instance of the application.
.. versionadded:: 0.8
"""
self.before_first_request_funcs.append(f)
@setupmethod
def after_request(self, f):
"""Register a function to be run after each request. Your function
must take one parameter, a :attr:`response_class` object and return
a new response object or the same (see :meth:`process_response`).
As of Flask 0.7 this function might not be executed at the end of the
request in case an unhandled exception occurred.
"""
self.after_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_request(self, f):
"""Register a function to be run at the end of each request,
regardless of whether there was an exception or not. These functions
are executed when the request context is popped, even if not an
actual request was performed.
Example::
ctx = app.test_request_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the request context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Generally teardown functions must take every necessary step to avoid
that they will fail. If they do execute code that might fail they
will have to surround the execution of these code by try/except
statements and log occurring errors.
When a teardown function was called because of a exception it will
be passed an error object.
.. admonition:: Debug Note
In debug mode Flask will not tear down a request on an exception
immediately. Instead if will keep it alive so that the interactive
debugger can still access it. This behavior can be controlled
by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
"""
self.teardown_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_appcontext(self, f):
"""Registers a function to be called when the application context
ends. These functions are typically also called when the request
context is popped.
Example::
ctx = app.app_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the app context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Since a request context typically also manages an application
context it would also be called when you pop a request context.
When a teardown function was called because of an exception it will
be passed an error object.
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
return f
@setupmethod
def context_processor(self, f):
"""Registers a template context processor function."""
self.template_context_processors[None].append(f)
return f
@setupmethod
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for all view
functions of the application. It's called before the view functions
are called and can modify the url values provided.
"""
self.url_value_preprocessors.setdefault(None, []).append(f)
return f
@setupmethod
def url_defaults(self, f):
"""Callback function for URL defaults for all view functions of the
application. It's called with the endpoint and values and should
update the values passed in place.
"""
self.url_default_functions.setdefault(None, []).append(f)
return f
def handle_http_exception(self, e):
"""Handles an HTTP exception. By default this will invoke the
registered error handlers and fall back to returning the
exception as response.
.. versionadded:: 0.3
"""
handlers = self.error_handler_spec.get(request.blueprint)
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
return e
if handlers and e.code in handlers:
handler = handlers[e.code]
else:
handler = self.error_handler_spec[None].get(e.code)
if handler is None:
return e
return handler(e)
def trap_http_exception(self, e):
"""Checks if an HTTP exception should be trapped or not. By default
this will return `False` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It
also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`.
This is called for all HTTP exceptions raised by a view function.
If it returns `True` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionadded:: 0.8
"""
if self.config['TRAP_HTTP_EXCEPTIONS']:
return True
if self.config['TRAP_BAD_REQUEST_ERRORS']:
return isinstance(e, BadRequest)
return False
def handle_user_exception(self, e):
"""This method is called whenever an exception occurs that should be
handled. A special case are
:class:`~werkzeug.exception.HTTPException`\s which are forwarded by
this function to the :meth:`handle_http_exception` method. This
function will either return a response value or reraise the
exception with the same traceback.
.. versionadded:: 0.7
"""
exc_type, exc_value, tb = sys.exc_info()
assert exc_value is e
# ensure not to trash sys.exc_info() at that point in case someone
# wants the traceback preserved in handle_http_exception. Of course
# we cannot prevent users from trashing it themselves in a custom
# trap_http_exception method so that's their fault then.
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
blueprint_handlers = ()
handlers = self.error_handler_spec.get(request.blueprint)
if handlers is not None:
blueprint_handlers = handlers.get(None, ())
app_handlers = self.error_handler_spec[None].get(None, ())
for typecheck, handler in chain(blueprint_handlers, app_handlers):
if isinstance(e, typecheck):
return handler(e)
reraise(exc_type, exc_value, tb)
def handle_exception(self, e):
"""Default exception handling that kicks in when an exception
occurs that is not caught. In debug mode the exception will
be re-raised immediately, otherwise it is logged and the handler
for a 500 internal server error is used. If no such handler
exists, a default 500 internal server error message is displayed.
.. versionadded:: 0.3
"""
exc_type, exc_value, tb = sys.exc_info()
got_request_exception.send(self, exception=e)
handler = self.error_handler_spec[None].get(500)
if self.propagate_exceptions:
# if we want to repropagate the exception, we can attempt to
# raise it with the whole traceback in case we can do that
# (the function was actually called from the except part)
# otherwise, we just raise the error again
if exc_value is e:
reraise(exc_type, exc_value, tb)
else:
raise e
self.log_exception((exc_type, exc_value, tb))
if handler is None:
return InternalServerError()
return handler(e)
def log_exception(self, exc_info):
"""Logs an exception. This is called by :meth:`handle_exception`
if debugging is disabled and right before the handler is called.
The default implementation logs the exception as error on the
:attr:`logger`.
.. versionadded:: 0.8
"""
self.logger.error('Exception on %s [%s]' % (
request.path,
request.method
), exc_info=exc_info)
def raise_routing_exception(self, request):
"""Exceptions that are recording during routing are reraised with
this method. During debug we are not reraising redirect requests
for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
a different error instead to help debug situations.
:internal:
"""
if not self.debug \
or not isinstance(request.routing_exception, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routing_exception
from .debughelpers import FormDataRoutingRedirect
raise FormDataRoutingRedirect(request)
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args)
def full_dispatch_request(self):
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
response = self.make_response(rv)
response = self.process_response(response)
request_finished.send(self, response=response)
return response
def try_trigger_before_first_request_functions(self):
"""Called before each request and will ensure that it triggers
the :attr:`before_first_request_funcs` and only exactly once per
application instance (which means process usually).
:internal:
"""
if self._got_first_request:
return
with self._before_request_lock:
if self._got_first_request:
return
self._got_first_request = True
for func in self.before_first_request_funcs:
func()
def make_default_options_response(self):
"""This method is called to create the default `OPTIONS` response.
This can be changed through subclassing to change the default
behavior of `OPTIONS` responses.
.. versionadded:: 0.7
"""
adapter = _request_ctx_stack.top.url_adapter
if hasattr(adapter, 'allowed_methods'):
methods = adapter.allowed_methods()
else:
# fallback for Werkzeug < 0.7
methods = []
try:
adapter.match(method='--')
except MethodNotAllowed as e:
methods = e.valid_methods
except HTTPException as e:
pass
rv = self.response_class()
rv.allow.update(methods)
return rv
def should_ignore_error(self, error):
"""This is called to figure out if an error should be ignored
or not as far as the teardown system is concerned. If this
function returns `True` then the teardown handlers will not be
passed the error.
.. versionadded:: 0.10
"""
return False
def make_response(self, rv):
"""Converts the return value from a view function to a real
response object that is an instance of :attr:`response_class`.
The following types are allowed for `rv`:
.. tabularcolumns:: |p{3.5cm}|p{9.5cm}|
======================= ===========================================
:attr:`response_class` the object is returned unchanged
:class:`str` a response object is created with the
string as body
:class:`unicode` a response object is created with the
string encoded to utf-8 as body
a WSGI function the function is called as WSGI application
and buffered as response object
:class:`tuple` A tuple in the form ``(response, status,
headers)`` where `response` is any of the
types defined here, `status` is a string
or an integer and `headers` is a list of
a dictionary with header values.
======================= ===========================================
:param rv: the return value from the view function
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
if rv is None:
raise ValueError('View function did not return a response')
if not isinstance(rv, self.response_class):
# When we create a response object directly, we let the constructor
# set the headers and status. We do this because there can be
# some extra logic involved when creating these objects with
# specific values (like default content type selection).
if isinstance(rv, (text_type, bytes, bytearray)):
rv = self.response_class(rv, headers=headers, status=status)
headers = status = None
else:
rv = self.response_class.force_type(rv, request.environ)
if status is not None:
if isinstance(status, string_types):
rv.status = status
else:
rv.status_code = status
if headers:
rv.headers.extend(headers)
return rv
def create_url_adapter(self, request):
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set up
so the request is passed explicitly.
.. versionadded:: 0.6
.. versionchanged:: 0.9
This can now also be called without a request object when the
URL adapter is created for the application context.
"""
if request is not None:
return self.url_map.bind_to_environ(request.environ,
server_name=self.config['SERVER_NAME'])
# We need at the very least the server name to be set for this
# to work.
if self.config['SERVER_NAME'] is not None:
return self.url_map.bind(
self.config['SERVER_NAME'],
script_name=self.config['APPLICATION_ROOT'] or '/',
url_scheme=self.config['PREFERRED_URL_SCHEME'])
def inject_url_defaults(self, endpoint, values):
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
funcs = self.url_default_functions.get(None, ())
if '.' in endpoint:
bp = endpoint.rsplit('.', 1)[0]
funcs = chain(funcs, self.url_default_functions.get(bp, ()))
for func in funcs:
func(endpoint, values)
def handle_url_build_error(self, error, endpoint, values):
"""Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`.
"""
exc_type, exc_value, tb = sys.exc_info()
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
if rv is not None:
return rv
except BuildError as error:
pass
# At this point we want to reraise the exception. If the error is
# still the same one we can reraise it with the original traceback,
# otherwise we raise it from here.
if error is exc_value:
reraise(exc_type, exc_value, tb)
raise error
def preprocess_request(self):
"""Called before the actual request dispatching and will
call every as :meth:`before_request` decorated function.
If any of these function returns a value it's handled as
if it was the return value from the view and further
request handling is stopped.
This also triggers the :meth:`url_value_processor` functions before
the actual :meth:`before_request` functions are called.
"""
bp = _request_ctx_stack.top.request.blueprint
funcs = self.url_value_preprocessors.get(None, ())
if bp is not None and bp in self.url_value_preprocessors:
funcs = chain(funcs, self.url_value_preprocessors[bp])
for func in funcs:
func(request.endpoint, request.view_args)
funcs = self.before_request_funcs.get(None, ())
if bp is not None and bp in self.before_request_funcs:
funcs = chain(funcs, self.before_request_funcs[bp])
for func in funcs:
rv = func()
if rv is not None:
return rv
def process_response(self, response):
"""Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
"""
ctx = _request_ctx_stack.top
bp = ctx.request.blueprint
funcs = ctx._after_request_functions
if bp is not None and bp in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
if None in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[None]))
for handler in funcs:
response = handler(response)
if not self.session_interface.is_null_session(ctx.session):
self.save_session(ctx.session, response)
return response
def do_teardown_request(self, exc=None):
"""Called after the actual request dispatching and will
call every as :meth:`teardown_request` decorated function. This is
not actually called by the :class:`Flask` object itself but is always
triggered when the request context is popped. That way we have a
tighter control over certain resources under testing environments.
.. versionchanged:: 0.9
Added the `exc` argument. Previously this was always using the
current exception information.
"""
if exc is None:
exc = sys.exc_info()[1]
funcs = reversed(self.teardown_request_funcs.get(None, ()))
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.teardown_request_funcs:
funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
for func in funcs:
rv = func(exc)
request_tearing_down.send(self, exc=exc)
def do_teardown_appcontext(self, exc=None):
"""Called when an application context is popped. This works pretty
much the same as :meth:`do_teardown_request` but for the application
context.
.. versionadded:: 0.9
"""
if exc is None:
exc = sys.exc_info()[1]
for func in reversed(self.teardown_appcontext_funcs):
func(exc)
appcontext_tearing_down.send(self, exc=exc)
def app_context(self):
"""Binds the application only. For as long as the application is bound
to the current context the :data:`flask.current_app` points to that
application. An application context is automatically created when a
request context is pushed if necessary.
Example usage::
with app.app_context():
...
.. versionadded:: 0.9
"""
return AppContext(self)
def request_context(self, environ):
"""Creates a :class:`~flask.ctx.RequestContext` from the given
environment and binds it to the current context. This must be used in
combination with the `with` statement because the request is only bound
to the current context for the duration of the `with` block.
Example usage::
with app.request_context(environ):
do_something_with(request)
The object returned can also be used without the `with` statement
which is useful for working in the shell. The example above is
doing exactly the same as this code::
ctx = app.request_context(environ)
ctx.push()
try:
do_something_with(request)
finally:
ctx.pop()
.. versionchanged:: 0.3
Added support for non-with statement usage and `with` statement
is now passed the ctx object.
:param environ: a WSGI environment
"""
return RequestContext(self, environ)
def test_request_context(self, *args, **kwargs):
"""Creates a WSGI environment from the given values (see
:func:`werkzeug.test.EnvironBuilder` for more information, this
function accepts the same arguments).
"""
from flask.testing import make_test_environ_builder
builder = make_test_environ_builder(self, *args, **kwargs)
try:
return self.request_context(builder.get_environ())
finally:
builder.close()
def wsgi_app(self, environ, start_response):
"""The actual WSGI application. This is not implemented in
`__call__` so that middlewares can be applied without losing a
reference to the class. So instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
The behavior of the before and after request callbacks was changed
under error conditions and a new callback was added that will
always execute at the end of the request, independent on if an
error occurred or not. See :ref:`callbacks-and-errors`.
:param environ: a WSGI environment
:param start_response: a callable accepting a status code,
a list of headers and an optional
exception context to start the response
"""
ctx = self.request_context(environ)
ctx.push()
error = None
try:
try:
response = self.full_dispatch_request()
except Exception as e:
error = e
response = self.make_response(self.handle_exception(e))
return response(environ, start_response)
finally:
if self.should_ignore_error(error):
error = None
ctx.auto_pop(error)
@property
def modules(self):
from warnings import warn
warn(DeprecationWarning('Flask.modules is deprecated, use '
'Flask.blueprints instead'), stacklevel=2)
return self.blueprints
def __call__(self, environ, start_response):
"""Shortcut for :attr:`wsgi_app`."""
return self.wsgi_app(environ, start_response)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.name,
)
| apache-2.0 | -9,099,094,800,037,395,000 | 40.686211 | 84 | 0.610997 | false |
Rubisk/mcedit2 | src/mcedit2/plugins.py | 1 | 10201 | """
plugins
"""
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import logging
import itertools
import os
import imp
import traceback
from mcedit2 import editortools
from mcedit2.editortools import generate
from mcedit2.util import load_ui
from mcedit2.util.settings import Settings
from mcedit2.widgets import inspector
from mceditlib.anvil import entities
log = logging.getLogger(__name__)
import sys
sys.dont_write_bytecode = True
settings = Settings().getNamespace("plugins")
enabledPluginsSetting = settings.getOption("enabled_plugins", "json", {})
autoReloadSetting = settings.getOption("auto_reload", bool, True)
# *** plugins dialog will need to:
# v get a list of (plugin display name, plugin reference, isEnabled) tuples for loaded and
# unloaded plugins.
# v enable or disable a plugin using its reference
# - reload a plugin
# - find out if a plugin was removed from the folder or failed to compile or run
# - install a new plugin using a file chooser
# - open the plugins folder(s) in Finder/Explorer
# *** on startup:
# v scan all plugins dirs for plugins
# - check if a plugin is enabled (without executing it?)
# - load plugins set to "enabled" in settings
# *** on app foreground:
# - rescan all plugins dirs
# - show new plugins to user, ask whether to load them
# - when in dev mode (??)
# - check mod times of all plugin files under each PluginRef
# - if auto-reload is on, reload plugins
# - if auto-reload is off, ??? prompt to enable autoreload?
# --- Plugin refs ---
class PluginRef(object):
_displayName = None
def __init__(self, filename, pluginsDir):
self.filename = filename
self.pluginsDir = pluginsDir
self.pluginModule = None # None indicates the plugin is not loaded
self.loadError = None
self.unloadError = None
self.timestamps = {}
def checkTimestamps(self):
"""
Record the modification time for this plugin's file and return True if it differs
from the previously recorded time.
If self.filename specifies a directory, walks the directory tree and records the mod
times of all files and directories found.
:return:
"""
timestamps = {}
filename = os.path.join(self.pluginsDir, self.filename)
if os.path.isdir(filename):
for dirname, subdirs, files in os.walk(filename):
for child in itertools.chain(subdirs, files):
pathname = os.path.join(dirname, child)
modtime = os.stat(pathname).st_mtime
timestamps[pathname] = modtime
else:
modtime = os.stat(filename).st_mtime
timestamps[filename] = modtime
changed = timestamps != self.timestamps
self.timestamps = timestamps
return changed
def findModule(self):
"""
Returns (file, pathname, description).
May raise ImportError, EnvironmentError, maybe others?
If it is not none, caller is responsible for closing file. (see `imp.find_module`)
"""
basename, ext = os.path.splitext(self.filename)
return imp.find_module(basename, [self.pluginsDir])
def load(self):
if self.pluginModule:
return
basename, ext = os.path.splitext(self.filename)
io = None
try:
io, pathname, description = self.findModule()
log.info("Trying to load plugin from %s", self.filename)
global _currentPluginPathname
_currentPluginPathname = pathname
self.pluginModule = imp.load_module(basename, io, pathname, description)
registerModule(self.fullpath, self.pluginModule)
_currentPluginPathname = None
if hasattr(self.pluginModule, 'displayName'):
self._displayName = self.pluginModule.displayName
log.info("Loaded %s (%s)", self.filename, self.displayName)
except Exception as e:
self.loadError = traceback.format_exc()
log.exception("Error while loading plugin from %s: %r", self.filename, e)
else:
self.loadError = None
finally:
if io:
io.close()
def unload(self):
if self.pluginModule is None:
return
try:
unregisterModule(self.fullpath, self.pluginModule)
for k, v in sys.modules.iteritems():
if v == self.pluginModule:
sys.modules.pop(k)
break
except Exception as e:
self.unloadError = traceback.format_exc()
log.exception("Error while unloading plugin from %s: %r", self.filename, e)
else:
self.unloadError = None
self.pluginModule = None
@property
def isLoaded(self):
return self.pluginModule is not None
@property
def displayName(self):
if self._displayName:
return self._displayName
return os.path.splitext(os.path.basename(self.filename))[0]
def exists(self):
return os.path.exists(self.fullpath)
@property
def fullpath(self):
return os.path.join(self.pluginsDir, self.filename)
@property
def enabled(self):
enabledPlugins = enabledPluginsSetting.value()
return enabledPlugins.get(self.filename, True)
@enabled.setter
def enabled(self, value):
value = bool(value)
enabledPlugins = enabledPluginsSetting.value()
enabledPlugins[self.filename] = value
enabledPluginsSetting.setValue(enabledPlugins)
# --- Plugin finding ---
_pluginRefs = {}
def getAllPlugins():
"""
Return all known plugins as a list of `PluginRef`s
:return: list[PluginRef]
:rtype:
"""
return list(_pluginRefs.values())
def findNewPluginsInDir(pluginsDir):
if not os.path.isdir(pluginsDir):
log.warn("Plugins dir %s not found", pluginsDir)
return
log.info("Loading plugins from %s", pluginsDir)
for filename in os.listdir(pluginsDir):
if filename not in _pluginRefs:
ref = detectPlugin(filename, pluginsDir)
if ref:
ref.checkTimestamps()
_pluginRefs[filename] = ref
def detectPlugin(filename, pluginsDir):
io = None
basename, ext = os.path.splitext(filename)
if ext in (".pyc", ".pyo"):
return None
ref = PluginRef(filename, pluginsDir)
try:
io, pathname, description = ref.findModule()
except Exception as e:
log.exception("Could not detect %s as a plugin or module: %s", filename, e)
return None
else:
return ref
finally:
if io:
io.close()
# --- Plugin registration ---
_loadedModules = {}
_pluginClassesByPathname = defaultdict(list)
_currentPluginPathname = None
def registerModule(filename, pluginModule):
if hasattr(pluginModule, "register"):
pluginModule.register()
_loadedModules[filename] = pluginModule
pluginModule.__FOUND_FILENAME__ = filename
def unregisterModule(filename, pluginModule):
if hasattr(pluginModule, "unregister"):
pluginModule.unregister()
classes = _pluginClassesByPathname.pop(filename)
if classes:
for cls in classes:
_unregisterClass(cls)
_loadedModules.pop(pluginModule.__FOUND_FILENAME__)
def _registerClass(cls):
_pluginClassesByPathname[_currentPluginPathname].append(cls)
def _unregisterClass(cls):
load_ui.unregisterCustomWidget(cls)
editortools.unregisterToolClass(cls)
generate.unregisterGeneratePlugin(cls)
inspector.unregisterBlockInspectorWidget(cls)
entities.unregisterTileEntityRefClass(cls)
# --- Registration functions ---
def registerCustomWidget(cls):
"""
Register a custom QWidget class with the .ui file loader. This allows custom QWidget
classes to be used in .ui files.
>>> from PySide import QtGui
>>> @registerCustomWidget
>>> class MyWidget(QtGui.QWidget):
>>> pass
:param cls:
:type cls: class
:return:
:rtype: class
"""
_registerClass(cls)
return load_ui.registerCustomWidget(cls)
def registerToolClass(cls):
"""
Register a tool class. Class must inherit from EditorTool.
>>> from mcedit2.editortools import EditorTool
>>> @registerToolClass
>>> class MyTool(EditorTool):
>>> pass
:param cls:
:type cls: class
:return:
:rtype: class
"""
_registerClass(cls)
return editortools.registerToolClass(cls)
def registerGeneratePlugin(cls):
"""
Register a plugin for the Generate Tool. Class must inherit from GeneratePlugin.
>>> from mcedit2.editortools.generate import GeneratePlugin
>>> @registerGeneratePlugin
>>> class MyGeneratePlugin(GeneratePlugin):
>>> pass
:param cls:
:type cls:
:return:
:rtype:
"""
_registerClass(cls)
return generate.registerGeneratePlugin(cls)
def registerBlockInspectorWidget(ID, cls):
"""
Register a widget with the Block Inspector to use when inspecting TileEntities
that have the given ID.
xxx make ID an attribute of cls?
>>> from PySide import QtGui
>>> class MyBarrelInspector(QtGui.QWidget):
>>> pass
>>> registerBlockInspectorWidget("MyBarrel", MyBarrelInspector)
:param cls:
:type cls:
:return:
:rtype:
"""
_registerClass(cls)
return inspector.registerBlockInspectorWidget(ID, cls)
def registerTileEntityRefClass(ID, cls):
"""
Register a TileEntityRef class with the world loader to create when loading a TileEntity
with the given ID.
xxx specify world format here, too.
>>> from mceditlib.anvil.entities import PCTileEntityRefBase
>>> class MyBarrelRef(PCTileEntityRefBase):
>>> pass
>>> registerTileEntityRefClass("MyBarrel", MyBarrelRef)
:param cls:
:type cls:
:return:
:rtype:
"""
# xxx this is anvil.entities - delegate to correct world format
_registerClass(cls)
return entities.registerTileEntityRefClass(ID, cls)
| bsd-3-clause | -7,692,061,054,843,510,000 | 27.57423 | 92 | 0.650328 | false |
leopittelli/Django-on-App-Engine-Example | django/utils/http.py | 29 | 9645 | from __future__ import unicode_literals
import base64
import calendar
import datetime
import re
import sys
from binascii import Error as BinasciiError
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils import six
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlparse,
urlencode as original_urlencode)
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if six.PY2 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if i < 0:
raise ValueError("Negative base36 conversion input.")
if six.PY2:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def urlsafe_base64_encode(s):
"""
Encodes a bytestring in base64 for use in URLs, stripping any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decodes a base64 encoded string, adding back any trailing equal signs that
might have been stripped.
"""
s = s.encode('utf-8') # base64encode should only return ASCII.
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse(url1), urlparse(url2)
try:
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
except ValueError:
return False
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if not url:
return False
# Chrome treats \ completely as /
url = url.replace('\\', '/')
# Chrome considers any URL with more than two slashes to be absolute, but
# urlaprse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
url_info = urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
return (not url_info.netloc or url_info.netloc == host) and \
(not url_info.scheme or url_info.scheme in ['http', 'https'])
| mit | 877,517,149,493,357,400 | 34.32967 | 95 | 0.639088 | false |
sheepray/volatility | volatility/win32/modules.py | 58 | 1117 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: AAron Walters and Nick Petroni
@license: GNU General Public License 2.0
@contact: [email protected], [email protected]
@organization: Volatility Foundation
"""
#pylint: disable-msg=C0111
import volatility.win32.tasks as tasks
def lsmod(addr_space):
""" A Generator for modules """
for m in tasks.get_kdbg(addr_space).modules():
yield m
| gpl-2.0 | -4,109,279,391,068,862,000 | 31.852941 | 70 | 0.736795 | false |
mozilla-metrics/fhr-toolbox | mrjob/churn-analysis.py | 2 | 5297 | """
Analyze a historical week to understand Firefox churn.
"""
import healthreportutils
from datetime import date, datetime, timedelta
import os, shutil, csv
import sys, codecs
import traceback
import mrjob
from mrjob.job import MRJob
import tempfile
try:
import simplejson as json
except ImportError:
import json
# How many days must a user be gone to be considered "lost"?
LAG_DAYS = 49
CRITICAL_WEEKS = 9
TOTAL_DAYS = 180
main_channels = (
'nightly',
'aurora',
'beta',
'release'
)
def last_saturday(d):
"""Return the Saturday on or before the date."""
# .weekday in python starts on 0=Monday
return d - timedelta(days=(d.weekday() + 2) % 7)
def start_date(dstr):
"""
Measure Sunday-Saturday, for no particularly good reason.
"""
snapshot = datetime.strptime(dstr, "%Y-%m-%d").date()
startdate = last_saturday(snapshot)
return startdate
def date_back(start, days):
"""iter backwards from start for N days"""
date = start
for n in xrange(0, days):
yield date - timedelta(days=n)
def active_day(day):
if day is None:
return False
return any(k != "org.mozilla.crashes.crashes" for k in day)
def logexceptions(func):
def wrapper(job, k, v):
try:
for k1, v1 in func(job, k, v):
yield (k1, v1)
except:
exc = traceback.format_exc()
print >>sys.stderr, "Script exception: ", exc
raise
return wrapper
@logexceptions
@healthreportutils.FHRMapper()
def map(job, key, payload):
pingDate = payload.get("thisPingDate", "unknown")
channel = payload.channel.split("-")[0]
if channel not in main_channels:
return
days = payload.get('data', {}).get('days', {})
def get_day(d):
dstr = d.strftime("%Y-%m-%d")
return days.get(dstr, None)
version = payload.get("geckoAppInfo", {}).get("version", "?")
sd = start_date(job.options.start_date)
# Was the user active at all in the 49 days prior to the snapshot
recent_usage = 0
for d in date_back(sd, LAG_DAYS):
day = get_day(d)
if active_day(day):
recent_usage = 1
break
# For each of the "critical" 9 weeks, record both usage days and default
# status.
week_actives = []
for weekno in xrange(0, CRITICAL_WEEKS):
week_end = sd - timedelta(days=LAG_DAYS + 7 * weekno)
active_days = 0
default_browser = None
for d in date_back(week_end, 7):
day = get_day(d)
if active_day(day):
active_days += 1
if default_browser is None:
default_browser = day.get("org.mozilla.appInfo.appinfo", {}).get("isDefaultBrowser", None)
if default_browser is None:
default_browser = "?"
week_actives.append(active_days)
week_actives.append(default_browser)
prior_usage = 0
for d in date_back(sd - timedelta(days=LAG_DAYS + 7 * CRITICAL_WEEKS),
180 - (LAG_DAYS + 7 * CRITICAL_WEEKS)):
day = get_day(d)
if active_day(day):
prior_usage = True
break
osname = payload.last.get("org.mozilla.sysinfo.sysinfo", {}).get("name", "?")
locale = payload.last.get("org.mozilla.appInfo.appinfo", {}).get("locale", "?")
geo = payload.get("geoCountry", "?")
yield ("result", [channel, osname, locale, geo, pingDate, recent_usage] + week_actives + [prior_usage])
class AggJob(MRJob):
HADOOP_INPUT_FORMAT="org.apache.hadoop.mapred.SequenceFileAsTextInputFormat"
INPUT_PROTOCOL = mrjob.protocol.RawProtocol
def run_job(self):
self.stdout = tempfile.TemporaryFile()
if self.options.start_date is None:
raise Exception("--start-date is required")
# validate the start date here
start_date(self.options.start_date)
# Do the big work
super(AggJob, self).run_job()
# Produce the separated output files
outpath = self.options.output_path
if outpath is None:
outpath = os.path.expanduser("~/fhr-churnanalysis-" + self.options.start_date + ".csv")
output(self.stdout, outpath)
def configure_options(self):
super(AggJob, self).configure_options()
self.add_passthrough_option('--output-path', help="Specify output path",
default=None)
self.add_passthrough_option('--start-date', help="Specify start date",
default=None)
def mapper(self, key, value):
return map(self, key, value)
def getresults(fd):
fd.seek(0)
for line in fd:
k, v = line.split("\t")
yield json.loads(k), json.loads(v)
def unwrap(l, v):
"""
Unwrap a value into a list. Dicts are added in their repr form.
"""
if isinstance(v, (tuple, list)):
for e in v:
unwrap(l, e)
elif isinstance(v, dict):
l.append(repr(v))
elif isinstance(v, unicode):
l.append(v.encode("utf-8"))
else:
l.append(v)
def output(fd, path):
outfd = open(path, "w")
csvw = csv.writer(outfd)
for k, v in getresults(fd):
csvw.writerow(v)
if __name__ == '__main__':
AggJob.run()
| apache-2.0 | -2,219,056,109,511,884,000 | 27.478495 | 110 | 0.593544 | false |
mark-ignacio/phantomjs | src/breakpad/src/tools/gyp/tools/pretty_sln.py | 137 | 4977 | #!/usr/bin/python2.5
# Copyright 2009 Google Inc.
# All Rights Reserved.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(('^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
'}"\) = "(.*)", "(.*)", "(.*)"$'))
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile('ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, dep_list) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
if __name__ == '__main__':
main()
| bsd-3-clause | 6,764,016,432,575,439,000 | 29.169697 | 80 | 0.565401 | false |
maxrothman/aws-alfred-workflow | venv/lib/python2.7/site-packages/jmespath/functions.py | 21 | 13008 | import math
import json
import weakref
from jmespath import exceptions
from jmespath.compat import string_type as STRING_TYPE
from jmespath.compat import get_methods
# python types -> jmespath types
TYPES_MAP = {
'bool': 'boolean',
'list': 'array',
'dict': 'object',
'NoneType': 'null',
'unicode': 'string',
'str': 'string',
'float': 'number',
'int': 'number',
'OrderedDict': 'object',
'_Projection': 'array',
'_Expression': 'expref',
}
# jmespath types -> python types
REVERSE_TYPES_MAP = {
'boolean': ('bool',),
'array': ('list', '_Projection'),
'object': ('dict', 'OrderedDict',),
'null': ('None',),
'string': ('unicode', 'str'),
'number': ('float', 'int'),
'expref': ('_Expression',),
}
def populate_function_table(cls):
func_table = cls.FUNCTION_TABLE
for name, method in get_methods(cls):
signature = getattr(method, 'signature', None)
if signature is not None:
func_table[name[6:]] = {"function": method,
"signature": signature}
return cls
def builtin_function(*arguments):
def _record_arity(func):
func.signature = arguments
return func
return _record_arity
@populate_function_table
class RuntimeFunctions(object):
# The built in functions are automatically populated in the FUNCTION_TABLE
# using the @builtin_function decorator on methods defined in this class.
FUNCTION_TABLE = {
}
def __init__(self):
self._interpreter = None
@property
def interpreter(self):
if self._interpreter is None:
return None
else:
return self._interpreter()
@interpreter.setter
def interpreter(self, value):
# A weakref is used because we have
# a cyclic reference and we want to allow
# for the memory to be properly freed when
# the objects are no longer needed.
self._interpreter = weakref.ref(value)
def call_function(self, function_name, resolved_args):
try:
spec = self.FUNCTION_TABLE[function_name]
except KeyError:
raise exceptions.UnknownFunctionError(
"Unknown function: %s()" % function_name)
function = spec['function']
signature = spec['signature']
self._validate_arguments(resolved_args, signature, function_name)
return function(self, *resolved_args)
def _validate_arguments(self, args, signature, function_name):
if signature and signature[-1].get('variadic'):
if len(args) < len(signature):
raise exceptions.VariadictArityError(
len(signature), len(args), function_name)
elif len(args) != len(signature):
raise exceptions.ArityError(
len(signature), len(args), function_name)
return self._type_check(args, signature, function_name)
def _type_check(self, actual, signature, function_name):
for i in range(len(signature)):
allowed_types = signature[i]['types']
if allowed_types:
self._type_check_single(actual[i], allowed_types,
function_name)
def _type_check_single(self, current, types, function_name):
# Type checking involves checking the top level type,
# and in the case of arrays, potentially checking the types
# of each element.
allowed_types, allowed_subtypes = self._get_allowed_pytypes(types)
# We're not using isinstance() on purpose.
# The type model for jmespath does not map
# 1-1 with python types (booleans are considered
# integers in python for example).
actual_typename = type(current).__name__
if actual_typename not in allowed_types:
raise exceptions.JMESPathTypeError(
function_name, current,
self._convert_to_jmespath_type(actual_typename), types)
# If we're dealing with a list type, we can have
# additional restrictions on the type of the list
# elements (for example a function can require a
# list of numbers or a list of strings).
# Arrays are the only types that can have subtypes.
if allowed_subtypes:
self._subtype_check(current, allowed_subtypes,
types, function_name)
def _get_allowed_pytypes(self, types):
allowed_types = []
allowed_subtypes = []
for t in types:
type_ = t.split('-', 1)
if len(type_) == 2:
type_, subtype = type_
allowed_subtypes.append(REVERSE_TYPES_MAP[subtype])
else:
type_ = type_[0]
allowed_types.extend(REVERSE_TYPES_MAP[type_])
return allowed_types, allowed_subtypes
def _subtype_check(self, current, allowed_subtypes, types, function_name):
if len(allowed_subtypes) == 1:
# The easy case, we know up front what type
# we need to validate.
allowed_subtypes = allowed_subtypes[0]
for element in current:
actual_typename = type(element).__name__
if actual_typename not in allowed_subtypes:
raise exceptions.JMESPathTypeError(
function_name, element, actual_typename, types)
elif len(allowed_subtypes) > 1 and current:
# Dynamic type validation. Based on the first
# type we see, we validate that the remaining types
# match.
first = type(current[0]).__name__
for subtypes in allowed_subtypes:
if first in subtypes:
allowed = subtypes
break
else:
raise exceptions.JMESPathTypeError(
function_name, current[0], first, types)
for element in current:
actual_typename = type(element).__name__
if actual_typename not in allowed:
raise exceptions.JMESPathTypeError(
function_name, element, actual_typename, types)
@builtin_function({'types': ['number']})
def _func_abs(self, arg):
return abs(arg)
@builtin_function({'types': ['array-number']})
def _func_avg(self, arg):
return sum(arg) / float(len(arg))
@builtin_function({'types': [], 'variadic': True})
def _func_not_null(self, *arguments):
for argument in arguments:
if argument is not None:
return argument
@builtin_function({'types': []})
def _func_to_array(self, arg):
if isinstance(arg, list):
return arg
else:
return [arg]
@builtin_function({'types': []})
def _func_to_string(self, arg):
if isinstance(arg, STRING_TYPE):
return arg
else:
return json.dumps(arg, separators=(',', ':'),
default=str)
@builtin_function({'types': []})
def _func_to_number(self, arg):
if isinstance(arg, (list, dict, bool)):
return None
elif arg is None:
return None
elif isinstance(arg, (int, float)):
return arg
else:
try:
if '.' in arg:
return float(arg)
else:
return int(arg)
except ValueError:
return None
@builtin_function({'types': ['array', 'string']}, {'types': []})
def _func_contains(self, subject, search):
return search in subject
@builtin_function({'types': ['string', 'array', 'object']})
def _func_length(self, arg):
return len(arg)
@builtin_function({'types': ['string']}, {'types': ['string']})
def _func_ends_with(self, search, suffix):
return search.endswith(suffix)
@builtin_function({'types': ['string']}, {'types': ['string']})
def _func_starts_with(self, search, suffix):
return search.startswith(suffix)
@builtin_function({'types': ['array', 'string']})
def _func_reverse(self, arg):
if isinstance(arg, STRING_TYPE):
return arg[::-1]
else:
return list(reversed(arg))
@builtin_function({"types": ['number']})
def _func_ceil(self, arg):
return math.ceil(arg)
@builtin_function({"types": ['number']})
def _func_floor(self, arg):
return math.floor(arg)
@builtin_function({"types": ['string']}, {"types": ['array-string']})
def _func_join(self, separator, array):
return separator.join(array)
@builtin_function({'types': ['expref']}, {'types': ['array']})
def _func_map(self, expref, arg):
result = []
for element in arg:
result.append(self.interpreter.visit(expref.expression, element))
return result
@builtin_function({"types": ['array-number', 'array-string']})
def _func_max(self, arg):
if arg:
return max(arg)
else:
return None
@builtin_function({"types": ["object"], "variadic": True})
def _func_merge(self, *arguments):
merged = {}
for arg in arguments:
merged.update(arg)
return merged
@builtin_function({"types": ['array-number', 'array-string']})
def _func_min(self, arg):
if arg:
return min(arg)
else:
return None
@builtin_function({"types": ['array-string', 'array-number']})
def _func_sort(self, arg):
return list(sorted(arg))
@builtin_function({"types": ['array-number']})
def _func_sum(self, arg):
return sum(arg)
@builtin_function({"types": ['object']})
def _func_keys(self, arg):
# To be consistent with .values()
# should we also return the indices of a list?
return list(arg.keys())
@builtin_function({"types": ['object']})
def _func_values(self, arg):
return list(arg.values())
@builtin_function({'types': []})
def _func_type(self, arg):
if isinstance(arg, STRING_TYPE):
return "string"
elif isinstance(arg, bool):
return "boolean"
elif isinstance(arg, list):
return "array"
elif isinstance(arg, dict):
return "object"
elif isinstance(arg, (float, int)):
return "number"
elif arg is None:
return "null"
@builtin_function({'types': ['array']}, {'types': ['expref']})
def _func_sort_by(self, array, expref):
if not array:
return array
# sort_by allows for the expref to be either a number of
# a string, so we have some special logic to handle this.
# We evaluate the first array element and verify that it's
# either a string of a number. We then create a key function
# that validates that type, which requires that remaining array
# elements resolve to the same type as the first element.
required_type = self._convert_to_jmespath_type(
type(self.interpreter.visit(expref.expression, array[0])).__name__)
if required_type not in ['number', 'string']:
raise exceptions.JMESPathTypeError(
'sort_by', array[0], required_type, ['string', 'number'])
keyfunc = self._create_key_func(expref.expression,
[required_type],
'sort_by')
return list(sorted(array, key=keyfunc))
@builtin_function({'types': ['array']}, {'types': ['expref']})
def _func_min_by(self, array, expref):
keyfunc = self._create_key_func(expref.expression,
['number', 'string'],
'min_by')
return min(array, key=keyfunc)
@builtin_function({'types': ['array']}, {'types': ['expref']})
def _func_max_by(self, array, expref):
keyfunc = self._create_key_func(expref.expression,
['number', 'string'],
'min_by')
return max(array, key=keyfunc)
def _create_key_func(self, expr_node, allowed_types, function_name):
interpreter = self.interpreter
def keyfunc(x):
result = interpreter.visit(expr_node, x)
actual_typename = type(result).__name__
jmespath_type = self._convert_to_jmespath_type(actual_typename)
# allowed_types is in term of jmespath types, not python types.
if jmespath_type not in allowed_types:
raise exceptions.JMESPathTypeError(
function_name, result, jmespath_type, allowed_types)
return result
return keyfunc
def _convert_to_jmespath_type(self, pyobject):
return TYPES_MAP.get(pyobject, 'unknown')
| mit | -4,993,242,979,314,297,000 | 34.736264 | 79 | 0.564114 | false |
davidwaroquiers/pymatgen | pymatgen/analysis/tests/test_structure_matcher.py | 5 | 47994 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import itertools
import json
import os
import unittest
import numpy as np
from monty.json import MontyDecoder
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.analysis.defects.core import Interstitial, Substitution, Vacancy
from pymatgen.analysis.structure_matcher import (
ElementComparator,
FrameworkComparator,
OccupancyComparator,
OrderDisorderElementComparator,
PointDefectComparator,
StructureMatcher,
)
from pymatgen.core import PeriodicSite
from pymatgen.core.operations import SymmOp
from pymatgen.util.coord import find_in_coord_list_pbc
from pymatgen.util.testing import PymatgenTest
class StructureMatcherTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "TiO2_entries.json"), "r") as fp:
entries = json.load(fp, cls=MontyDecoder)
self.struct_list = [e.structure for e in entries]
self.oxi_structs = [
self.get_structure("Li2O"),
Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR.Li2O")),
]
def test_ignore_species(self):
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "LiFePO4.cif"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
m = StructureMatcher(ignored_species=["Li"], primitive_cell=False, attempt_supercell=True)
self.assertTrue(m.fit(s1, s2))
self.assertTrue(m.fit_anonymous(s1, s2))
groups = m.group_structures([s1, s2])
self.assertEqual(len(groups), 1)
s2.make_supercell((2, 1, 1))
ss1 = m.get_s2_like_s1(s2, s1, include_ignored_species=True)
self.assertAlmostEqual(ss1.lattice.a, 20.820740000000001)
self.assertEqual(ss1.composition.reduced_formula, "LiFePO4")
self.assertEqual(
{k.symbol: v.symbol for k, v in m.get_best_electronegativity_anonymous_mapping(s1, s2).items()},
{"Fe": "Fe", "P": "P", "O": "O"},
)
def test_get_supercell_size(self):
l = Lattice.cubic(1)
l2 = Lattice.cubic(0.9)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu", "Ag"], [[0] * 3] * 5)
s2 = Structure(l2, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
sm = StructureMatcher(supercell_size="volume")
self.assertEqual(sm._get_supercell_size(s1, s2), (1, True))
self.assertEqual(sm._get_supercell_size(s2, s1), (1, True))
sm = StructureMatcher(supercell_size="num_sites")
self.assertEqual(sm._get_supercell_size(s1, s2), (2, False))
self.assertEqual(sm._get_supercell_size(s2, s1), (2, True))
sm = StructureMatcher(supercell_size="Ag")
self.assertEqual(sm._get_supercell_size(s1, s2), (2, False))
self.assertEqual(sm._get_supercell_size(s2, s1), (2, True))
sm = StructureMatcher(supercell_size=["Ag", "Cu"])
self.assertEqual(sm._get_supercell_size(s1, s2), (1, True))
self.assertEqual(sm._get_supercell_size(s2, s1), (1, True))
sm = StructureMatcher(supercell_size="wfieoh")
self.assertRaises(ValueError, sm._get_supercell_size, s1, s2)
def test_cmp_fstruct(self):
sm = StructureMatcher()
s1 = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
s2 = np.array([[0.11, 0.22, 0.33]])
frac_tol = np.array([0.02, 0.03, 0.04])
mask = np.array([[False, False]])
mask2 = np.array([[True, False]])
self.assertRaises(ValueError, sm._cmp_fstruct, s2, s1, frac_tol, mask.T)
self.assertRaises(ValueError, sm._cmp_fstruct, s1, s2, frac_tol, mask.T)
self.assertTrue(sm._cmp_fstruct(s1, s2, frac_tol, mask))
self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol / 2, mask))
self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol, mask2))
def test_cart_dists(self):
sm = StructureMatcher()
l = Lattice.orthorhombic(1, 2, 3)
s1 = np.array([[0.13, 0.25, 0.37], [0.1, 0.2, 0.3]])
s2 = np.array([[0.11, 0.22, 0.33]])
s3 = np.array([[0.1, 0.2, 0.3], [0.11, 0.2, 0.3]])
s4 = np.array([[0.1, 0.2, 0.3], [0.1, 0.6, 0.7]])
mask = np.array([[False, False]])
mask2 = np.array([[False, True]])
mask3 = np.array([[False, False], [False, False]])
mask4 = np.array([[False, True], [False, True]])
n1 = (len(s1) / l.volume) ** (1 / 3)
n2 = (len(s2) / l.volume) ** (1 / 3)
self.assertRaises(ValueError, sm._cart_dists, s2, s1, l, mask.T, n2)
self.assertRaises(ValueError, sm._cart_dists, s1, s2, l, mask.T, n1)
d, ft, s = sm._cart_dists(s1, s2, l, mask, n1)
self.assertTrue(np.allclose(d, [0]))
self.assertTrue(np.allclose(ft, [-0.01, -0.02, -0.03]))
self.assertTrue(np.allclose(s, [1]))
# check that masking best value works
d, ft, s = sm._cart_dists(s1, s2, l, mask2, n1)
self.assertTrue(np.allclose(d, [0]))
self.assertTrue(np.allclose(ft, [0.02, 0.03, 0.04]))
self.assertTrue(np.allclose(s, [0]))
# check that averaging of translation is done properly
d, ft, s = sm._cart_dists(s1, s3, l, mask3, n1)
self.assertTrue(np.allclose(d, [0.08093341] * 2))
self.assertTrue(np.allclose(ft, [0.01, 0.025, 0.035]))
self.assertTrue(np.allclose(s, [1, 0]))
# check distances are large when mask allows no 'real' mapping
d, ft, s = sm._cart_dists(s1, s4, l, mask4, n1)
self.assertTrue(np.min(d) > 1e8)
self.assertTrue(np.min(ft) > 1e8)
def test_get_mask(self):
sm = StructureMatcher(comparator=ElementComparator())
l = Lattice.cubic(1)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu"], [[0] * 3] * 4)
s2 = Structure(l, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
result = [
[True, False, True, False],
[True, False, True, False],
[True, True, False, True],
]
m, inds, i = sm._get_mask(s1, s2, 1, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 2)
self.assertEqual(inds, [2])
# test supercell with match
result = [
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 1, 1],
]
m, inds, i = sm._get_mask(s1, s2, 2, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 2)
self.assertTrue(np.allclose(inds, np.array([4])))
# test supercell without match
result = [
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1],
]
m, inds, i = sm._get_mask(s2, s1, 2, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 0)
self.assertTrue(np.allclose(inds, np.array([])))
# test s2_supercell
result = [
[1, 1, 1],
[1, 1, 1],
[0, 0, 1],
[0, 0, 1],
[1, 1, 0],
[1, 1, 0],
[0, 0, 1],
[0, 0, 1],
]
m, inds, i = sm._get_mask(s2, s1, 2, False)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 0)
self.assertTrue(np.allclose(inds, np.array([])))
# test for multiple translation indices
s1 = Structure(l, ["Cu", "Ag", "Cu", "Ag", "Ag"], [[0] * 3] * 5)
s2 = Structure(l, ["Ag", "Cu", "Ag"], [[0] * 3] * 3)
result = [[1, 0, 1, 0, 0], [0, 1, 0, 1, 1], [1, 0, 1, 0, 0]]
m, inds, i = sm._get_mask(s1, s2, 1, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 1)
self.assertTrue(np.allclose(inds, [0, 2]))
def test_get_supercells(self):
sm = StructureMatcher(comparator=ElementComparator())
l = Lattice.cubic(1)
l2 = Lattice.cubic(0.5)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu"], [[0] * 3] * 4)
s2 = Structure(l2, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
scs = list(sm._get_supercells(s1, s2, 8, False))
for x in scs:
self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)
self.assertEqual(len(x[0]), 4)
self.assertEqual(len(x[1]), 24)
self.assertEqual(len(scs), 48)
scs = list(sm._get_supercells(s2, s1, 8, True))
for x in scs:
self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)
self.assertEqual(len(x[0]), 24)
self.assertEqual(len(x[1]), 4)
self.assertEqual(len(scs), 48)
def test_fit(self):
"""
Take two known matched structures
1) Ensure match
2) Ensure match after translation and rotations
3) Ensure no-match after large site translation
4) Ensure match after site shuffling
"""
sm = StructureMatcher()
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test rotational/translational invariance
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False, np.array([0.4, 0.7, 0.9]))
self.struct_list[1].apply_operation(op)
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test failure under large atomic translation
self.struct_list[1].translate_sites([0], [0.4, 0.4, 0.2], frac_coords=True)
self.assertFalse(sm.fit(self.struct_list[0], self.struct_list[1]))
self.struct_list[1].translate_sites([0], [-0.4, -0.4, -0.2], frac_coords=True)
# random.shuffle(editor._sites)
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test FrameworkComporator
sm2 = StructureMatcher(comparator=FrameworkComparator())
lfp = self.get_structure("LiFePO4")
nfp = self.get_structure("NaFePO4")
self.assertTrue(sm2.fit(lfp, nfp))
self.assertFalse(sm.fit(lfp, nfp))
# Test anonymous fit.
self.assertEqual(sm.fit_anonymous(lfp, nfp), True)
self.assertAlmostEqual(sm.get_rms_anonymous(lfp, nfp)[0], 0.060895871160262717)
# Test partial occupancies.
s1 = Structure(
Lattice.cubic(3),
[{"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.5}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
s2 = Structure(
Lattice.cubic(3),
[{"Fe": 0.25}, {"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.75}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
self.assertFalse(sm.fit(s1, s2))
self.assertFalse(sm.fit(s2, s1))
s2 = Structure(
Lattice.cubic(3),
[{"Mn": 0.5}, {"Mn": 0.5}, {"Mn": 0.5}, {"Mn": 0.5}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
self.assertEqual(sm.fit_anonymous(s1, s2), True)
self.assertAlmostEqual(sm.get_rms_anonymous(s1, s2)[0], 0)
# test symmetric
sm_coarse = sm = StructureMatcher(
comparator=ElementComparator(),
ltol=0.6,
stol=0.6,
angle_tol=6,
)
s1 = Structure.from_file(PymatgenTest.TEST_FILES_DIR / "fit_symm_s1.vasp")
s2 = Structure.from_file(PymatgenTest.TEST_FILES_DIR / "fit_symm_s2.vasp")
self.assertEqual(sm_coarse.fit(s1, s2), True)
self.assertEqual(sm_coarse.fit(s2, s1), False)
self.assertEqual(sm_coarse.fit(s1, s2, symmetric=True), False)
self.assertEqual(sm_coarse.fit(s2, s1, symmetric=True), False)
def test_oxi(self):
"""Test oxidation state removal matching"""
sm = StructureMatcher()
self.assertFalse(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))
sm = StructureMatcher(comparator=ElementComparator())
self.assertTrue(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))
def test_primitive(self):
"""Test primitive cell reduction"""
sm = StructureMatcher(primitive_cell=True)
self.struct_list[1].make_supercell([[2, 0, 0], [0, 3, 0], [0, 0, 1]])
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
def test_class(self):
# Tests entire class as single working unit
sm = StructureMatcher()
# Test group_structures and find_indices
out = sm.group_structures(self.struct_list)
self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])
self.assertEqual(sum(map(len, out)), len(self.struct_list))
for s in self.struct_list[::2]:
s.replace_species({"Ti": "Zr", "O": "Ti"})
out = sm.group_structures(self.struct_list, anonymous=True)
self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])
def test_mix(self):
structures = [
self.get_structure("Li2O"),
self.get_structure("Li2O2"),
self.get_structure("LiFePO4"),
]
for fname in ["POSCAR.Li2O", "POSCAR.LiFePO4"]:
structures.append(Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, fname)))
sm = StructureMatcher(comparator=ElementComparator())
groups = sm.group_structures(structures)
for g in groups:
formula = g[0].composition.reduced_formula
if formula in ["Li2O", "LiFePO4"]:
self.assertEqual(len(g), 2)
else:
self.assertEqual(len(g), 1)
def test_left_handed_lattice(self):
"""Ensure Left handed lattices are accepted"""
sm = StructureMatcher()
s = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Li3GaPCO7.json"))
self.assertTrue(sm.fit(s, s))
def test_as_dict_and_from_dict(self):
sm = StructureMatcher(
ltol=0.1,
stol=0.2,
angle_tol=2,
primitive_cell=False,
scale=False,
comparator=FrameworkComparator(),
)
d = sm.as_dict()
sm2 = StructureMatcher.from_dict(d)
self.assertEqual(sm2.as_dict(), d)
def test_no_scaling(self):
sm = StructureMatcher(ltol=0.1, stol=0.1, angle_tol=2, scale=False, comparator=ElementComparator())
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
self.assertTrue(sm.get_rms_dist(self.struct_list[0], self.struct_list[1])[0] < 0.0008)
def test_supercell_fit(self):
sm = StructureMatcher(attempt_supercell=False)
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Al3F9.json"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Al3F9_distorted.json"))
self.assertFalse(sm.fit(s1, s2))
sm = StructureMatcher(attempt_supercell=True)
self.assertTrue(sm.fit(s1, s2))
self.assertTrue(sm.fit(s2, s1))
def test_get_lattices(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l1 = Lattice.from_parameters(1, 2.1, 1.9, 90, 89, 91)
l2 = Lattice.from_parameters(1.1, 2, 2, 89, 91, 90)
s1 = Structure(l1, [], [])
s2 = Structure(l2, [], [])
lattices = list(sm._get_lattices(s=s1, target_lattice=s2.lattice))
self.assertEqual(len(lattices), 16)
l3 = Lattice.from_parameters(1.1, 2, 20, 89, 91, 90)
s3 = Structure(l3, [], [])
lattices = list(sm._get_lattices(s=s1, target_lattice=s3.lattice))
self.assertEqual(len(lattices), 0)
def test_find_match1(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [0.7, 0.5, 0.375]])
s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)
match = sm._strict_match(s1, s2, fu, s1_supercell=True, use_rms=True, break_on_match=False)
scale_matrix = match[2]
s2.make_supercell(scale_matrix)
fc = s2.frac_coords + match[3]
fc -= np.round(fc)
self.assertAlmostEqual(np.sum(fc), 0.9)
self.assertAlmostEqual(np.sum(fc[:, :2]), 0.1)
cart_dist = np.sum(match[1] * (l.volume / 3) ** (1 / 3))
self.assertAlmostEqual(cart_dist, 0.15)
def test_find_match2(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si"], [[0, 0, 0.1], [0, 0, 0.2]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [0, 0.1, -0.95]])
s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)
match = sm._strict_match(s1, s2, fu, s1_supercell=False, use_rms=True, break_on_match=False)
scale_matrix = match[2]
s2.make_supercell(scale_matrix)
s2.translate_sites(range(len(s2)), match[3])
self.assertAlmostEqual(np.sum(s2.frac_coords) % 1, 0.3)
self.assertAlmostEqual(np.sum(s2.frac_coords[:, :2]) % 1, 0)
def test_supercell_subsets(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="volume",
)
sm_no_s = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=False,
supercell_size="volume",
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
shuffle = [0, 2, 1, 3, 4, 5]
s1 = Structure.from_sites([s1[i] for i in shuffle])
# test when s1 is exact supercell of s2
result = sm.get_s2_like_s1(s1, s2)
for a, b in zip(s1, result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(s1, s2))
self.assertTrue(sm.fit(s2, s1))
self.assertTrue(sm_no_s.fit(s1, s2))
self.assertTrue(sm_no_s.fit(s2, s1))
rms = (0.048604032430991401, 0.059527539448807391)
self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2, s1), rms))
# test when the supercell is a subset of s2
subset_supercell = s1.copy()
del subset_supercell[0]
result = sm.get_s2_like_s1(subset_supercell, s2)
self.assertEqual(len(result), 6)
for a, b in zip(subset_supercell, result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(subset_supercell, s2))
self.assertTrue(sm.fit(s2, subset_supercell))
self.assertFalse(sm_no_s.fit(subset_supercell, s2))
self.assertFalse(sm_no_s.fit(s2, subset_supercell))
rms = (0.053243049896333279, 0.059527539448807336)
self.assertTrue(np.allclose(sm.get_rms_dist(subset_supercell, s2), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2, subset_supercell), rms))
# test when s2 (once made a supercell) is a subset of s1
s2_missing_site = s2.copy()
del s2_missing_site[1]
result = sm.get_s2_like_s1(s1, s2_missing_site)
for a, b in zip((s1[i] for i in (0, 2, 4, 5)), result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(s1, s2_missing_site))
self.assertTrue(sm.fit(s2_missing_site, s1))
self.assertFalse(sm_no_s.fit(s1, s2_missing_site))
self.assertFalse(sm_no_s.fit(s2_missing_site, s1))
rms = (0.029763769724403633, 0.029763769724403987)
self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2_missing_site), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2_missing_site, s1), rms))
def test_get_s2_large_s2(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=False,
attempt_supercell=True,
allow_subset=False,
supercell_size="volume",
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
l2 = Lattice.orthorhombic(1.01, 2.01, 3.01)
s2 = Structure(l2, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
s2.make_supercell([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
result = sm.get_s2_like_s1(s1, s2)
for x, y in zip(s1, result):
self.assertLess(x.distance(y), 0.08)
def test_get_mapping(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=False,
allow_subset=True,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
shuffle = [2, 0, 1, 3, 5, 4]
s1 = Structure.from_sites([s1[i] for i in shuffle])
# test the mapping
s2.make_supercell([2, 1, 1])
# equal sizes
for i, x in enumerate(sm.get_mapping(s1, s2)):
self.assertEqual(s1[x].species, s2[i].species)
del s1[0]
# s1 is subset of s2
for i, x in enumerate(sm.get_mapping(s2, s1)):
self.assertEqual(s1[i].species, s2[x].species)
# s2 is smaller than s1
del s2[0]
del s2[1]
self.assertRaises(ValueError, sm.get_mapping, s2, s1)
def test_get_supercell_matrix(self):
sm = StructureMatcher(
ltol=0.1,
stol=0.3,
angle_tol=2,
primitive_cell=False,
scale=True,
attempt_supercell=True,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [-0.7, 0.5, 0.375]])
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-2, 0, 0], [0, 1, 0], [0, 0, 1]]).all())
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s1.make_supercell([[1, -1, 0], [0, 0, -1], [0, 1, 0]])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [-0.7, 0.5, 0.375]])
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-1, -1, 0], [0, 0, -1], [0, 1, 0]]).all())
# test when the supercell is a subset
sm = StructureMatcher(
ltol=0.1,
stol=0.3,
angle_tol=2,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
)
del s1[0]
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-1, -1, 0], [0, 0, -1], [0, 1, 0]]).all())
def test_subset(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=False,
allow_subset=True,
)
l = Lattice.orthorhombic(10, 20, 30)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Ag"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
result = sm.get_s2_like_s1(s1, s2)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0, 0, 0.1])), 1)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0.7, 0.4, 0.5])), 1)
# test with fewer species in s2
s1 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
result = sm.get_s2_like_s1(s1, s2)
mindists = np.min(s1.lattice.get_all_distances(s1.frac_coords, result.frac_coords), axis=0)
self.assertLess(np.max(mindists), 1e-6)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0, 0, 0.1])), 1)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0.7, 0.4, 0.5])), 1)
# test with not enough sites in s1
# test with fewer species in s2
s1 = Structure(l, ["Si", "Ag", "Cl"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
self.assertEqual(sm.get_s2_like_s1(s1, s2), None)
def test_out_of_cell_s2_like_s1(self):
l = Lattice.cubic(5)
s1 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, -0.02], [0, 0, 0.001], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, 0.98], [0, 0, 0.99], [0.7, 0.4, 0.5]])
new_s2 = StructureMatcher(primitive_cell=False).get_s2_like_s1(s1, s2)
dists = np.sum((s1.cart_coords - new_s2.cart_coords) ** 2, axis=-1) ** 0.5
self.assertLess(np.max(dists), 0.1)
def test_disordered_primitive_to_ordered_supercell(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_atoms",
comparator=OrderDisorderElementComparator(),
)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
ls = Lattice.orthorhombic(20, 20, 30)
scoords = [[0, 0, 0], [0.75, 0.5, 0.5]]
prim = Structure(lp, [{"Na": 0.5}, {"Cl": 0.5}], pcoords)
supercell = Structure(ls, ["Na", "Cl"], scoords)
supercell.make_supercell([[-1, 1, 0], [0, 1, 1], [1, 0, 0]])
self.assertFalse(sm_sites.fit(prim, supercell))
self.assertTrue(sm_atoms.fit(prim, supercell))
self.assertRaises(ValueError, sm_atoms.get_s2_like_s1, prim, supercell)
self.assertEqual(len(sm_atoms.get_s2_like_s1(supercell, prim)), 4)
def test_ordered_primitive_to_disordered_supercell(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_atoms",
comparator=OrderDisorderElementComparator(),
)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
ls = Lattice.orthorhombic(20, 20, 30)
scoords = [[0, 0, 0], [0.5, 0, 0], [0.25, 0.5, 0.5], [0.75, 0.5, 0.5]]
s1 = Structure(lp, ["Na", "Cl"], pcoords)
s2 = Structure(ls, [{"Na": 0.5}, {"Na": 0.5}, {"Cl": 0.5}, {"Cl": 0.5}], scoords)
self.assertTrue(sm_sites.fit(s1, s2))
self.assertFalse(sm_atoms.fit(s1, s2))
def test_disordered_to_disordered(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=False,
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
coords = [[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]
s1 = Structure(lp, [{"Na": 0.5, "Cl": 0.5}, {"Na": 0.5, "Cl": 0.5}], coords)
s2 = Structure(lp, [{"Na": 0.5, "Cl": 0.5}, {"Na": 0.5, "Br": 0.5}], coords)
self.assertFalse(sm_atoms.fit(s1, s2))
def test_occupancy_comparator(self):
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
s1 = Structure(lp, [{"Na": 0.6, "K": 0.4}, "Cl"], pcoords)
s2 = Structure(lp, [{"Xa": 0.4, "Xb": 0.6}, "Cl"], pcoords)
s3 = Structure(lp, [{"Xa": 0.5, "Xb": 0.5}, "Cl"], pcoords)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OccupancyComparator(),
)
self.assertTrue(sm_sites.fit(s1, s2))
self.assertFalse(sm_sites.fit(s1, s3))
def test_electronegativity(self):
sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5)
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Na2Fe2PAsO4S4.json"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Na2Fe2PNO4Se4.json"))
self.assertEqual(
sm.get_best_electronegativity_anonymous_mapping(s1, s2),
{
Element("S"): Element("Se"),
Element("As"): Element("N"),
Element("Fe"): Element("Fe"),
Element("Na"): Element("Na"),
Element("P"): Element("P"),
Element("O"): Element("O"),
},
)
self.assertEqual(len(sm.get_all_anonymous_mappings(s1, s2)), 2)
# test include_dist
dists = {Element("N"): 0, Element("P"): 0.0010725064}
for mapping, d in sm.get_all_anonymous_mappings(s1, s2, include_dist=True):
self.assertAlmostEqual(dists[mapping[Element("As")]], d)
def test_rms_vs_minimax(self):
# This tests that structures with adjusted RMS less than stol, but minimax
# greater than stol are treated properly
# stol=0.3 gives exactly an ftol of 0.1 on the c axis
sm = StructureMatcher(ltol=0.2, stol=0.301, angle_tol=1, primitive_cell=False)
l = Lattice.orthorhombic(1, 2, 12)
sp = ["Si", "Si", "Al"]
s1 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.5]])
s2 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.6]])
self.assertArrayAlmostEqual(sm.get_rms_dist(s1, s2), (0.32 ** 0.5 / 2, 0.4))
self.assertEqual(sm.fit(s1, s2), False)
self.assertEqual(sm.fit_anonymous(s1, s2), False)
self.assertEqual(sm.get_mapping(s1, s2), None)
class PointDefectComparatorTest(PymatgenTest):
def test_defect_matching(self):
# SETUP DEFECTS FOR TESTING
# symmorphic defect test set
s_struc = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "CsSnI3.cif")) # tetragonal CsSnI3
identical_Cs_vacs = [Vacancy(s_struc, s_struc[0]), Vacancy(s_struc, s_struc[1])]
identical_I_vacs_sublattice1 = [
Vacancy(s_struc, s_struc[4]),
Vacancy(s_struc, s_struc[5]),
Vacancy(s_struc, s_struc[8]),
Vacancy(s_struc, s_struc[9]),
] # in plane halides
identical_I_vacs_sublattice2 = [
Vacancy(s_struc, s_struc[6]),
Vacancy(s_struc, s_struc[7]),
] # out of plane halides
pdc = PointDefectComparator()
# NOW TEST DEFECTS
# test vacancy matching
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], identical_Cs_vacs[0])) # trivial vacancy test
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], identical_Cs_vacs[1])) # vacancies on same sublattice
for i, j in itertools.combinations(range(4), 2):
self.assertTrue(pdc.are_equal(identical_I_vacs_sublattice1[i], identical_I_vacs_sublattice1[j]))
self.assertTrue(pdc.are_equal(identical_I_vacs_sublattice2[0], identical_I_vacs_sublattice2[1]))
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# both vacancies, but different specie types
identical_I_vacs_sublattice1[0],
)
)
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same specie type, different sublattice
identical_I_vacs_sublattice2[0],
)
)
# test substitutional matching
sub_Cs_on_I_sublattice1_set1 = PeriodicSite(
"Cs", identical_I_vacs_sublattice1[0].site.frac_coords, s_struc.lattice
)
sub_Cs_on_I_sublattice1_set2 = PeriodicSite(
"Cs", identical_I_vacs_sublattice1[1].site.frac_coords, s_struc.lattice
)
sub_Cs_on_I_sublattice2 = PeriodicSite("Cs", identical_I_vacs_sublattice2[0].site.frac_coords, s_struc.lattice)
sub_Rb_on_I_sublattice2 = PeriodicSite("Rb", identical_I_vacs_sublattice2[0].site.frac_coords, s_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial substitution test
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal( # different subs (wrong specie)
Substitution(s_struc, sub_Cs_on_I_sublattice2),
Substitution(s_struc, sub_Rb_on_I_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different subs (wrong sublattice)
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice2),
)
)
# test symmorphic interstitial matching
# (using set generated from Voronoi generator, with same sublattice given by saturatated_
# interstitial_structure function)
inter_H_sublattice1_set1 = PeriodicSite("H", [0.0, 0.75, 0.25], s_struc.lattice)
inter_H_sublattice1_set2 = PeriodicSite("H", [0.0, 0.75, 0.75], s_struc.lattice)
inter_H_sublattice2 = PeriodicSite("H", [0.57796112, 0.06923687, 0.56923687], s_struc.lattice)
inter_H_sublattice3 = PeriodicSite("H", [0.25, 0.25, 0.54018268], s_struc.lattice)
inter_He_sublattice3 = PeriodicSite("He", [0.25, 0.25, 0.54018268], s_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial interstitial test
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong sublattice)
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong sublattice)
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice3),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong specie)
Interstitial(s_struc, inter_H_sublattice3),
Interstitial(s_struc, inter_He_sublattice3),
)
)
# test non-symmorphic interstitial matching
# (using set generated from Voronoi generator, with same sublattice given by
# saturatated_interstitial_structure function)
ns_struc = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "CuCl.cif"))
ns_inter_H_sublattice1_set1 = PeriodicSite("H", [0.06924513, 0.06308959, 0.86766528], ns_struc.lattice)
ns_inter_H_sublattice1_set2 = PeriodicSite("H", [0.43691041, 0.36766528, 0.06924513], ns_struc.lattice)
ns_inter_H_sublattice2 = PeriodicSite("H", [0.06022109, 0.60196031, 0.1621814], ns_struc.lattice)
ns_inter_He_sublattice2 = PeriodicSite("He", [0.06022109, 0.60196031, 0.1621814], ns_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial interstitial test
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
Interstitial(ns_struc, ns_inter_H_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# different interstitials (wrong sublattice)
Interstitial(ns_struc, ns_inter_H_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong specie)
Interstitial(ns_struc, ns_inter_H_sublattice2),
Interstitial(ns_struc, ns_inter_He_sublattice2),
)
)
# test influence of charge on defect matching (default is to be charge agnostic)
vac_diff_chg = identical_Cs_vacs[0].copy()
vac_diff_chg.set_charge(3.0)
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], vac_diff_chg))
chargecheck_pdc = PointDefectComparator(check_charge=True) # switch to PDC which cares about charge state
self.assertFalse(chargecheck_pdc.are_equal(identical_Cs_vacs[0], vac_diff_chg))
# test different supercell size
# (comparing same defect but different supercells - default is to not check for this)
sc_agnostic_pdc = PointDefectComparator(check_primitive_cell=True)
sc_scaled_s_struc = s_struc.copy()
sc_scaled_s_struc.make_supercell([2, 2, 3])
sc_scaled_I_vac_sublatt1_ps1 = PeriodicSite(
"I",
identical_I_vacs_sublattice1[0].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt1_ps2 = PeriodicSite(
"I",
identical_I_vacs_sublattice1[1].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt2_ps = PeriodicSite(
"I",
identical_I_vacs_sublattice2[1].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt1_defect1 = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt1_ps1)
sc_scaled_I_vac_sublatt1_defect2 = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt1_ps2)
sc_scaled_I_vac_sublatt2_defect = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt2_ps)
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# trivially same defect site but between different supercells
sc_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], sc_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[1],
# same coords, different lattice structure
sc_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[1], sc_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same sublattice, different coords
sc_scaled_I_vac_sublatt1_defect2,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], sc_scaled_I_vac_sublatt1_defect2))
self.assertFalse(
sc_agnostic_pdc.are_equal(
identical_I_vacs_sublattice1[0],
# different defects (wrong sublattice)
sc_scaled_I_vac_sublatt2_defect,
)
)
# test same structure size, but scaled lattice volume
# (default is to not allow these to be equal, but check_lattice_scale=True allows for this)
vol_agnostic_pdc = PointDefectComparator(check_lattice_scale=True)
vol_scaled_s_struc = s_struc.copy()
vol_scaled_s_struc.scale_lattice(s_struc.volume * 0.95)
vol_scaled_I_vac_sublatt1_defect1 = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[4])
vol_scaled_I_vac_sublatt1_defect2 = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[5])
vol_scaled_I_vac_sublatt2_defect = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[6])
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# trivially same defect (but vol change)
vol_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(vol_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], vol_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same defect, different sublattice point (and vol change)
vol_scaled_I_vac_sublatt1_defect2,
)
)
self.assertTrue(vol_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], vol_scaled_I_vac_sublatt1_defect2))
self.assertFalse(
vol_agnostic_pdc.are_equal(
identical_I_vacs_sublattice1[0],
# different defect (wrong sublattice)
vol_scaled_I_vac_sublatt2_defect,
)
)
# test identical defect which has had entire lattice shifted
shift_s_struc = s_struc.copy()
shift_s_struc.translate_sites(range(len(s_struc)), [0.2, 0.3, 0.4], frac_coords=True, to_unit_cell=True)
shifted_identical_Cs_vacs = [
Vacancy(shift_s_struc, shift_s_struc[0]),
Vacancy(shift_s_struc, shift_s_struc[1]),
]
self.assertTrue(
pdc.are_equal(
identical_Cs_vacs[0],
# trivially same defect (but shifted)
shifted_identical_Cs_vacs[0],
)
)
self.assertTrue(
pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice point (and shifted)
shifted_identical_Cs_vacs[1],
)
)
# test uniform lattice shift within non-symmorphic structure
shift_ns_struc = ns_struc.copy()
shift_ns_struc.translate_sites(range(len(ns_struc)), [0.0, 0.6, 0.3], frac_coords=True, to_unit_cell=True)
shift_ns_inter_H_sublattice1_set1 = PeriodicSite(
"H",
ns_inter_H_sublattice1_set1.frac_coords + [0.0, 0.6, 0.3],
shift_ns_struc.lattice,
)
shift_ns_inter_H_sublattice1_set2 = PeriodicSite(
"H",
ns_inter_H_sublattice1_set2.frac_coords + [0.0, 0.6, 0.3],
shift_ns_struc.lattice,
)
self.assertTrue(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# trivially same defect (but shifted)
Interstitial(shift_ns_struc, shift_ns_inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# same defect on different sublattice point (and shifted)
Interstitial(shift_ns_struc, shift_ns_inter_H_sublattice1_set2),
)
)
# test a rotational + supercell type structure transformation (requires check_primitive_cell=True)
rotated_s_struc = s_struc.copy()
rotated_s_struc.make_supercell([[2, 1, 0], [-1, 3, 0], [0, 0, 2]])
rotated_identical_Cs_vacs = [
Vacancy(rotated_s_struc, rotated_s_struc[0]),
Vacancy(rotated_s_struc, rotated_s_struc[1]),
]
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# trivially same defect (but rotated)
rotated_identical_Cs_vacs[0],
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_Cs_vacs[0], rotated_identical_Cs_vacs[0]))
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice (and rotated)
rotated_identical_Cs_vacs[1],
)
)
self.assertTrue(
sc_agnostic_pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice point (and rotated)
rotated_identical_Cs_vacs[1],
)
)
# test a rotational + supercell + shift type structure transformation for non-symmorphic structure
rotANDshift_ns_struc = ns_struc.copy()
rotANDshift_ns_struc.translate_sites(range(len(ns_struc)), [0.0, 0.6, 0.3], frac_coords=True, to_unit_cell=True)
rotANDshift_ns_struc.make_supercell([[2, 1, 0], [-1, 3, 0], [0, 0, 2]])
ns_vac_Cs_set1 = Vacancy(ns_struc, ns_struc[0])
rotANDshift_ns_vac_Cs_set1 = Vacancy(rotANDshift_ns_struc, rotANDshift_ns_struc[0])
rotANDshift_ns_vac_Cs_set2 = Vacancy(rotANDshift_ns_struc, rotANDshift_ns_struc[1])
self.assertTrue(
sc_agnostic_pdc.are_equal(
ns_vac_Cs_set1,
# trivially same defect (but rotated and sublattice shifted)
rotANDshift_ns_vac_Cs_set1,
)
)
self.assertTrue(
sc_agnostic_pdc.are_equal(
ns_vac_Cs_set1,
# same defect on different sublattice point (shifted and rotated)
rotANDshift_ns_vac_Cs_set2,
)
)
if __name__ == "__main__":
unittest.main()
| mit | -1,015,100,965,522,234,800 | 39.88075 | 120 | 0.553965 | false |
undoware/neutron-drive | google_appengine/google/appengine/api/files/gs.py | 3 | 11379 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Google Storage specific Files API calls."""
from __future__ import with_statement
__all__ = ['create']
import os
import re
from urllib import urlencode
from xml.dom import minidom
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
from google.appengine.api.files import file as files
from google.appengine.api.files import file_service_pb
_GS_FILESYSTEM = files.GS_FILESYSTEM
_GS_PREFIX = '/' + _GS_FILESYSTEM + '/'
_MIME_TYPE_PARAMETER = 'content_type'
_CANNED_ACL_PARAMETER = 'acl'
_CONTENT_ENCODING_PARAMETER = 'content_encoding'
_CONTENT_DISPOSITION_PARAMETER = 'content_disposition'
_CACHE_CONTROL_PARAMETER = 'cache_control'
_USER_METADATA_PREFIX = 'x-goog-meta-'
_GS_RESTFUL_URL = 'commondatastorage.googleapis.com'
_GS_RESTFUL_SCOPE_READ_ONLY = (
'https://www.googleapis.com/auth/devstorage.read_only')
_GS_RESTFUL_API_VERSION = '2'
_GS_BUCKETPATH_REGEX = re.compile(r'/gs/[a-z0-9\.\-_]{3,}$')
_GS_FILEPATH_REGEX = re.compile(r'/gs/[a-z0-9\.\-_]{3,}')
def parseGlob(filename):
"""Parse a Gs filename or a filename pattern. Handle escape of '*' and '/'.
Args:
filename: a filename or filename pattern.
filename must be a valid gs filepath in the format of
'/gs/bucket/filename'. filename pattern has format '/gs/bucket/prefix*'.
filename pattern represents filenames with the given prefix in the bucket.
Please escape '*' and '\' with '\' if your filename contains them. We
recommend using Python raw string to simplify escape expressions.
Returns:
A (string, string) tuple if filename is a pattern. The first string is
the bucket name, second is the prefix or '' if prefix doesn't exist.
Properly escaped filename if filename is not a pattern.
example
'/gs/bucket1/file1' => '/gs/bucket1/file1'
'/gs/bucket2/*' => ('gs/bucket2', '') all files under bucket2
'/gs/bucket3/p*' => ('gs/bucket2', 'p') files under bucket3 with
a prefix 'p' in its name
r'/gs/bucket/file\*' => '/gs/bucket/file*'
r'/gs/bucket/file\\*' => ('/gs/bucket', r'file\') all files under bucket
with prefix r'file\'
r'/gs/bucket/file\\\*' => '/gs/bucket/file\*'
r'/gs/bucket/file\**' => ('/gs/bucket', 'file*') all files under bucket
with prefix 'file*'
Raises:
google.appengine.api.files.InvalidFileNameError if filename is illegal.
"""
if not filename:
raise files.InvalidFileNameError('filename is None.')
if not isinstance(filename, basestring):
raise files.InvalidFileNameError('filename %s should be of type string' %
filename)
match = _GS_FILEPATH_REGEX.match(filename)
if not match:
raise files.InvalidFileNameError(
'filename %s should start with/gs/bucketname', filename)
bucketname = match.group(0)
rest = filename[len(bucketname):]
if not rest or (len(rest) == 1 and rest[0] == '/'):
return bucketname, ''
if not rest.startswith('/'):
raise files.InvalidFileNameError(
'Expect / to separate bucketname and filename in %s' % filename)
i = 1
prefix = False
processed = ''
while i < len(rest):
char = rest[i]
if char == '\\':
if i + 1 == len(rest):
processed += char
else:
processed += rest[i + 1]
i += 1
elif char == '*':
if i + 1 != len(rest):
raise files.InvalidFileNameError('* as a wildcard is not the last.')
prefix = True
else:
processed += char
i += 1
if prefix:
return bucketname, processed
else:
return bucketname + '/' + processed
def listdir(path, kwargs=None):
"""Return a sorted list of filenames (matching a pattern) in the given path.
Sorting (decrease by string) is done automatically by Google Cloud Storage.
Args:
path: a Google Cloud Storage path of "/gs/bucketname" form.
kwargs: other keyword arguments to be relayed to Google Cloud Storage.
This can be used to select certain files with names matching a pattern.
Supported keywords:
marker: a string after which (exclusive) to start listing.
max_keys: the maximum number of filenames to return.
prefix: limits the returned filenames to those with this prefix. no regex.
See Google Cloud Storage documentation for more details and examples.
https://developers.google.com/storage/docs/reference-methods#getbucket
Returns:
a sorted list containing filenames (matching a pattern) from
the given path. The last filename can be used as a marker for another
request for more files.
"""
if not path:
raise files.InvalidFileNameError('Empty path')
elif not isinstance(path, basestring):
raise files.InvalidFileNameError('Expected string for path %s' % path)
elif not _GS_BUCKETPATH_REGEX.match(path):
raise files.InvalidFileNameError(
'Google storage path must have the form /gs/bucketname')
if kwargs and kwargs.has_key('max_keys'):
kwargs['max-keys'] = kwargs['max_keys']
kwargs.pop('max_keys')
if not os.environ.get('DATACENTER'):
return _listdir_local(path, kwargs)
bucketname = path[len(_GS_PREFIX):]
request_headers = {
'Authorization': 'OAuth %s' % app_identity.get_access_token(
_GS_RESTFUL_SCOPE_READ_ONLY)[0],
'x-goog-api-version': _GS_RESTFUL_API_VERSION
}
url = 'https://%s/%s' % (_GS_RESTFUL_URL, bucketname)
if kwargs:
url += '/?' + urlencode(kwargs)
response = urlfetch.fetch(url=url,
headers=request_headers,
deadline=60)
if response.status_code == 404:
raise files.InvalidFileNameError('Bucket %s does not exist.' % bucketname)
elif response.status_code == 401:
raise files.PermissionDeniedError('Permission denied to read bucket %s.' %
bucketname)
dom = minidom.parseString(response.content)
def __textValue(node):
return node.firstChild.nodeValue
error = dom.getElementsByTagName('Error')
if len(error) == 1:
details = error[0].getElementsByTagName('Details')
if len(details) == 1:
raise files.InvalidParameterError(__textValue(details[0]))
else:
code = __textValue(error[0].getElementsByTagName('Code')[0])
msg = __textValue(error[0].getElementsByTagName('Message')[0])
raise files.InvalidParameterError('%s: %s' % (code, msg))
return ['/'.join([path, __textValue(key)]) for key in
dom.getElementsByTagName('Key')]
def _listdir_local(path, kwargs):
"""Dev app server version of listdir.
See listdir for doc.
"""
request = file_service_pb.ListDirRequest()
response = file_service_pb.ListDirResponse()
request.set_path(path)
if kwargs and kwargs.has_key('marker'):
request.set_marker(kwargs['marker'])
if kwargs and kwargs.has_key('max-keys'):
request.set_max_keys(kwargs['max-keys'])
if kwargs and kwargs.has_key('prefix'):
request.set_prefix(kwargs['prefix'])
files._make_call('ListDir', request, response)
return response.filenames_list()
def create(filename,
mime_type='application/octet-stream',
acl=None,
cache_control=None,
content_encoding=None,
content_disposition=None,
user_metadata=None):
"""Create a writable googlestore file.
Args:
filename: Google Storage object name (/gs/bucket/object)
mime_type: Blob content MIME type as string.
acl: Canned acl to apply to the object as per:
http://code.google.com/apis/storage/docs/reference-headers.html#xgoogacl
If not specified (or set to None), default object acl is used.
cache_control: Cache control header to set when serving through Google
storage. If not specified, default of 3600 seconds is used.
content_encoding: If object is compressed, specify the compression method
here to set the header correctly when served through Google Storage.
content_disposition: Header to use when serving through Google Storage.
user_metadata: Dictionary specifying key value pairs to apply to the
object. Each key is prefixed with x-goog-meta- when served through
Google Storage.
Returns:
A writable file name for a Google Storage file. This file can be opened for
write by File API open function. To read the file call file::open with the
plain Google Storage filename (/gs/bucket/object).
"""
if not filename:
raise files.InvalidArgumentError('Empty filename')
elif not isinstance(filename, basestring):
raise files.InvalidArgumentError('Expected string for filename', filename)
elif not filename.startswith(_GS_PREFIX) or filename == _GS_PREFIX:
raise files.InvalidArgumentError(
'Google storage files must be of the form /gs/bucket/object', filename)
elif not mime_type:
raise files.InvalidArgumentError('Empty mime_type')
elif not isinstance(mime_type, basestring):
raise files.InvalidArgumentError('Expected string for mime_type', mime_type)
params = {_MIME_TYPE_PARAMETER: mime_type}
if acl:
if not isinstance(acl, basestring):
raise files.InvalidArgumentError('Expected string for acl', acl)
params[_CANNED_ACL_PARAMETER] = acl
if content_encoding:
if not isinstance(content_encoding, basestring):
raise files.InvalidArgumentError('Expected string for content_encoding')
else:
params[_CONTENT_ENCODING_PARAMETER] = content_encoding
if content_disposition:
if not isinstance(content_disposition, basestring):
raise files.InvalidArgumentError(
'Expected string for content_disposition')
else:
params[_CONTENT_DISPOSITION_PARAMETER] = content_disposition
if cache_control:
if not isinstance(cache_control, basestring):
raise files.InvalidArgumentError('Expected string for cache_control')
else:
params[_CACHE_CONTROL_PARAMETER] = cache_control
if user_metadata:
if not isinstance(user_metadata, dict):
raise files.InvalidArgumentError('Expected dict for user_metadata')
for key, value in user_metadata.items():
if not isinstance(key, basestring):
raise files.InvalidArgumentError(
'Expected string for key in user_metadata')
if not isinstance(value, basestring):
raise files.InvalidArgumentError(
'Expected string for value in user_metadata for key: ', key)
params[_USER_METADATA_PREFIX + key] = value
return files._create(_GS_FILESYSTEM, filename=filename, params=params)
def default_bucket_name():
"""Obtain the default Google Storage bucket name for this application.
Returns:
A string that is the name of the default bucket.
"""
return files._default_gs_bucket_name()
| bsd-3-clause | -7,534,924,071,659,558,000 | 33.068862 | 80 | 0.682222 | false |
sukiand/idapython | examples/ex_cli.py | 16 | 3448 | # -----------------------------------------------------------------------
# This is an example illustrating how to implement a CLI
# (c) Hex-Rays
#
from idaapi import NW_OPENIDB, NW_CLOSEIDB, NW_TERMIDA, NW_REMOVE, COLSTR, cli_t
#<pycode(ex_cli_ex1)>
class mycli_t(cli_t):
flags = 0
sname = "pycli"
lname = "Python CLI"
hint = "pycli hint"
def OnExecuteLine(self, line):
"""
The user pressed Enter. The CLI is free to execute the line immediately or ask for more lines.
This callback is mandatory.
@param line: typed line(s)
@return Boolean: True-executed line, False-ask for more lines
"""
print "OnExecute:", line
return True
def OnKeydown(self, line, x, sellen, vkey, shift):
"""
A keyboard key has been pressed
This is a generic callback and the CLI is free to do whatever it wants.
This callback is optional.
@param line: current input line
@param x: current x coordinate of the cursor
@param sellen: current selection length (usually 0)
@param vkey: virtual key code. if the key has been handled, it should be returned as zero
@param shift: shift state
@return:
None - Nothing was changed
tuple(line, x, sellen, vkey): if either of the input line or the x coordinate or the selection length has been modified.
It is possible to return a tuple with None elements to preserve old values. Example: tuple(new_line, None, None, None) or tuple(new_line)
"""
print "Onkeydown: line=%s x=%d sellen=%d vkey=%d shift=%d" % (line, x, sellen, vkey, shift)
return None
def OnCompleteLine(self, prefix, n, line, prefix_start):
"""
The user pressed Tab. Find a completion number N for prefix PREFIX
This callback is optional.
@param prefix: Line prefix at prefix_start (string)
@param n: completion number (int)
@param line: the current line (string)
@param prefix_start: the index where PREFIX starts in LINE (int)
@return: None if no completion could be generated otherwise a String with the completion suggestion
"""
print "OnCompleteLine: prefix=%s n=%d line=%s prefix_start=%d" % (prefix, n, line, prefix_start)
return None
#</pycode(ex_cli_ex1)>
# -----------------------------------------------------------------------
def nw_handler(code, old=0):
if code == NW_OPENIDB:
print "nw_handler(): installing CLI"
mycli.register()
elif code == NW_CLOSEIDB:
print "nw_handler(): removing CLI"
mycli.unregister()
elif code == NW_TERMIDA:
print "nw_handler(): uninstalled nw handler"
idaapi.notify_when(NW_TERMIDA | NW_OPENIDB | NW_CLOSEIDB | NW_REMOVE, nw_handler)
# -----------------------------------------------------------------------
# Already installed?
try:
mycli
# remove previous CLI
mycli.unregister()
del mycli
# remove previous handler
nw_handler(NW_TERMIDA)
except:
pass
finally:
mycli = mycli_t()
# register CLI
if mycli.register():
print "CLI installed"
# install new handler
idaapi.notify_when(NW_TERMIDA | NW_OPENIDB | NW_CLOSEIDB, nw_handler)
else:
del mycli
print "Failed to install CLI"
| bsd-3-clause | 1,608,731,421,461,309,200 | 32.48 | 149 | 0.577436 | false |
disruptek/boto | boto/sdb/db/__init__.py | 189 | 1108 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
| mit | 2,286,955,849,276,692,200 | 54.4 | 74 | 0.771661 | false |
papouso/odoo | addons/hr_payroll_account/hr_payroll_account.py | 240 | 10840 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import date, datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools import float_compare, float_is_zero
from openerp.tools.translate import _
class hr_payslip(osv.osv):
'''
Pay Slip
'''
_inherit = 'hr.payslip'
_description = 'Pay Slip'
_columns = {
'period_id': fields.many2one('account.period', 'Force Period',states={'draft': [('readonly', False)]}, readonly=True, domain=[('state','<>','done')], help="Keep empty to use the period of the validation(Payslip) date."),
'journal_id': fields.many2one('account.journal', 'Salary Journal',states={'draft': [('readonly', False)]}, readonly=True, required=True),
'move_id': fields.many2one('account.move', 'Accounting Entry', readonly=True, copy=False),
}
def _get_default_journal(self, cr, uid, context=None):
model_data = self.pool.get('ir.model.data')
res = model_data.search(cr, uid, [('name', '=', 'expenses_journal')])
if res:
return model_data.browse(cr, uid, res[0]).res_id
return False
_defaults = {
'journal_id': _get_default_journal,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if 'journal_id' in context:
vals.update({'journal_id': context.get('journal_id')})
return super(hr_payslip, self).create(cr, uid, vals, context=context)
def onchange_contract_id(self, cr, uid, ids, date_from, date_to, employee_id=False, contract_id=False, context=None):
contract_obj = self.pool.get('hr.contract')
res = super(hr_payslip, self).onchange_contract_id(cr, uid, ids, date_from=date_from, date_to=date_to, employee_id=employee_id, contract_id=contract_id, context=context)
journal_id = contract_id and contract_obj.browse(cr, uid, contract_id, context=context).journal_id.id or False
res['value'].update({'journal_id': journal_id})
return res
def cancel_sheet(self, cr, uid, ids, context=None):
move_pool = self.pool.get('account.move')
move_ids = []
move_to_cancel = []
for slip in self.browse(cr, uid, ids, context=context):
if slip.move_id:
move_ids.append(slip.move_id.id)
if slip.move_id.state == 'posted':
move_to_cancel.append(slip.move_id.id)
move_pool.button_cancel(cr, uid, move_to_cancel, context=context)
move_pool.unlink(cr, uid, move_ids, context=context)
return super(hr_payslip, self).cancel_sheet(cr, uid, ids, context=context)
def process_sheet(self, cr, uid, ids, context=None):
move_pool = self.pool.get('account.move')
period_pool = self.pool.get('account.period')
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Payroll')
timenow = time.strftime('%Y-%m-%d')
for slip in self.browse(cr, uid, ids, context=context):
line_ids = []
debit_sum = 0.0
credit_sum = 0.0
if not slip.period_id:
search_periods = period_pool.find(cr, uid, slip.date_to, context=context)
period_id = search_periods[0]
else:
period_id = slip.period_id.id
default_partner_id = slip.employee_id.address_home_id.id
name = _('Payslip of %s') % (slip.employee_id.name)
move = {
'narration': name,
'date': timenow,
'ref': slip.number,
'journal_id': slip.journal_id.id,
'period_id': period_id,
}
for line in slip.details_by_salary_rule_category:
amt = slip.credit_note and -line.total or line.total
if float_is_zero(amt, precision_digits=precision):
continue
partner_id = line.salary_rule_id.register_id.partner_id and line.salary_rule_id.register_id.partner_id.id or default_partner_id
debit_account_id = line.salary_rule_id.account_debit.id
credit_account_id = line.salary_rule_id.account_credit.id
if debit_account_id:
debit_line = (0, 0, {
'name': line.name,
'date': timenow,
'partner_id': (line.salary_rule_id.register_id.partner_id or line.salary_rule_id.account_debit.type in ('receivable', 'payable')) and partner_id or False,
'account_id': debit_account_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': amt > 0.0 and amt or 0.0,
'credit': amt < 0.0 and -amt or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False,
'tax_code_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False,
'tax_amount': line.salary_rule_id.account_tax_id and amt or 0.0,
})
line_ids.append(debit_line)
debit_sum += debit_line[2]['debit'] - debit_line[2]['credit']
if credit_account_id:
credit_line = (0, 0, {
'name': line.name,
'date': timenow,
'partner_id': (line.salary_rule_id.register_id.partner_id or line.salary_rule_id.account_credit.type in ('receivable', 'payable')) and partner_id or False,
'account_id': credit_account_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': amt < 0.0 and -amt or 0.0,
'credit': amt > 0.0 and amt or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False,
'tax_code_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False,
'tax_amount': line.salary_rule_id.account_tax_id and amt or 0.0,
})
line_ids.append(credit_line)
credit_sum += credit_line[2]['credit'] - credit_line[2]['debit']
if float_compare(credit_sum, debit_sum, precision_digits=precision) == -1:
acc_id = slip.journal_id.default_credit_account_id.id
if not acc_id:
raise osv.except_osv(_('Configuration Error!'),_('The Expense Journal "%s" has not properly configured the Credit Account!')%(slip.journal_id.name))
adjust_credit = (0, 0, {
'name': _('Adjustment Entry'),
'date': timenow,
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': 0.0,
'credit': debit_sum - credit_sum,
})
line_ids.append(adjust_credit)
elif float_compare(debit_sum, credit_sum, precision_digits=precision) == -1:
acc_id = slip.journal_id.default_debit_account_id.id
if not acc_id:
raise osv.except_osv(_('Configuration Error!'),_('The Expense Journal "%s" has not properly configured the Debit Account!')%(slip.journal_id.name))
adjust_debit = (0, 0, {
'name': _('Adjustment Entry'),
'date': timenow,
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': credit_sum - debit_sum,
'credit': 0.0,
})
line_ids.append(adjust_debit)
move.update({'line_id': line_ids})
move_id = move_pool.create(cr, uid, move, context=context)
self.write(cr, uid, [slip.id], {'move_id': move_id, 'period_id' : period_id}, context=context)
if slip.journal_id.entry_posted:
move_pool.post(cr, uid, [move_id], context=context)
return super(hr_payslip, self).process_sheet(cr, uid, [slip.id], context=context)
class hr_salary_rule(osv.osv):
_inherit = 'hr.salary.rule'
_columns = {
'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account'),
'account_tax_id':fields.many2one('account.tax.code', 'Tax Code'),
'account_debit': fields.many2one('account.account', 'Debit Account'),
'account_credit': fields.many2one('account.account', 'Credit Account'),
}
class hr_contract(osv.osv):
_inherit = 'hr.contract'
_description = 'Employee Contract'
_columns = {
'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account'),
'journal_id': fields.many2one('account.journal', 'Salary Journal'),
}
class hr_payslip_run(osv.osv):
_inherit = 'hr.payslip.run'
_description = 'Payslip Run'
_columns = {
'journal_id': fields.many2one('account.journal', 'Salary Journal', states={'draft': [('readonly', False)]}, readonly=True, required=True),
}
def _get_default_journal(self, cr, uid, context=None):
model_data = self.pool.get('ir.model.data')
res = model_data.search(cr, uid, [('name', '=', 'expenses_journal')])
if res:
return model_data.browse(cr, uid, res[0]).res_id
return False
_defaults = {
'journal_id': _get_default_journal,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,134,513,567,426,007,600 | 46.753304 | 228 | 0.563192 | false |
cristianquaglio/odoo | addons/hr_payroll_account/hr_payroll_account.py | 52 | 10905 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import date, datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools import float_compare, float_is_zero
from openerp.tools.translate import _
class hr_payslip(osv.osv):
'''
Pay Slip
'''
_inherit = 'hr.payslip'
_description = 'Pay Slip'
_columns = {
'period_id': fields.many2one('account.period', 'Force Period',states={'draft': [('readonly', False)]}, readonly=True, domain=[('state','<>','done')], help="Keep empty to use the period of the validation(Payslip) date."),
'journal_id': fields.many2one('account.journal', 'Salary Journal',states={'draft': [('readonly', False)]}, readonly=True, required=True),
'move_id': fields.many2one('account.move', 'Accounting Entry', readonly=True, copy=False),
}
def _get_default_journal(self, cr, uid, context=None):
model_data = self.pool.get('ir.model.data')
res = model_data.search(cr, uid, [('name', '=', 'expenses_journal')])
if res:
return model_data.browse(cr, uid, res[0]).res_id
return False
_defaults = {
'journal_id': _get_default_journal,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if 'journal_id' in context:
vals.update({'journal_id': context.get('journal_id')})
return super(hr_payslip, self).create(cr, uid, vals, context=context)
def onchange_contract_id(self, cr, uid, ids, date_from, date_to, employee_id=False, contract_id=False, context=None):
contract_obj = self.pool.get('hr.contract')
res = super(hr_payslip, self).onchange_contract_id(cr, uid, ids, date_from=date_from, date_to=date_to, employee_id=employee_id, contract_id=contract_id, context=context)
journal_id = contract_id and contract_obj.browse(cr, uid, contract_id, context=context).journal_id.id or (not contract_id and self._get_default_journal(cr, uid, context=None))
res['value'].update({'journal_id': journal_id})
return res
def cancel_sheet(self, cr, uid, ids, context=None):
move_pool = self.pool.get('account.move')
move_ids = []
move_to_cancel = []
for slip in self.browse(cr, uid, ids, context=context):
if slip.move_id:
move_ids.append(slip.move_id.id)
if slip.move_id.state == 'posted':
move_to_cancel.append(slip.move_id.id)
move_pool.button_cancel(cr, uid, move_to_cancel, context=context)
move_pool.unlink(cr, uid, move_ids, context=context)
return super(hr_payslip, self).cancel_sheet(cr, uid, ids, context=context)
def process_sheet(self, cr, uid, ids, context=None):
move_pool = self.pool.get('account.move')
period_pool = self.pool.get('account.period')
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Payroll')
timenow = time.strftime('%Y-%m-%d')
for slip in self.browse(cr, uid, ids, context=context):
line_ids = []
debit_sum = 0.0
credit_sum = 0.0
if not slip.period_id:
search_periods = period_pool.find(cr, uid, slip.date_to, context=context)
period_id = search_periods[0]
else:
period_id = slip.period_id.id
default_partner_id = slip.employee_id.address_home_id.id
name = _('Payslip of %s') % (slip.employee_id.name)
move = {
'narration': name,
'date': timenow,
'ref': slip.number,
'journal_id': slip.journal_id.id,
'period_id': period_id,
}
for line in slip.details_by_salary_rule_category:
amt = slip.credit_note and -line.total or line.total
if float_is_zero(amt, precision_digits=precision):
continue
partner_id = line.salary_rule_id.register_id.partner_id and line.salary_rule_id.register_id.partner_id.id or default_partner_id
debit_account_id = line.salary_rule_id.account_debit.id
credit_account_id = line.salary_rule_id.account_credit.id
if debit_account_id:
debit_line = (0, 0, {
'name': line.name,
'date': timenow,
'partner_id': (line.salary_rule_id.register_id.partner_id or line.salary_rule_id.account_debit.type in ('receivable', 'payable')) and partner_id or False,
'account_id': debit_account_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': amt > 0.0 and amt or 0.0,
'credit': amt < 0.0 and -amt or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False,
'tax_code_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False,
'tax_amount': line.salary_rule_id.account_tax_id and amt or 0.0,
})
line_ids.append(debit_line)
debit_sum += debit_line[2]['debit'] - debit_line[2]['credit']
if credit_account_id:
credit_line = (0, 0, {
'name': line.name,
'date': timenow,
'partner_id': (line.salary_rule_id.register_id.partner_id or line.salary_rule_id.account_credit.type in ('receivable', 'payable')) and partner_id or False,
'account_id': credit_account_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': amt < 0.0 and -amt or 0.0,
'credit': amt > 0.0 and amt or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False,
'tax_code_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False,
'tax_amount': line.salary_rule_id.account_tax_id and amt or 0.0,
})
line_ids.append(credit_line)
credit_sum += credit_line[2]['credit'] - credit_line[2]['debit']
if float_compare(credit_sum, debit_sum, precision_digits=precision) == -1:
acc_id = slip.journal_id.default_credit_account_id.id
if not acc_id:
raise osv.except_osv(_('Configuration Error!'),_('The Expense Journal "%s" has not properly configured the Credit Account!')%(slip.journal_id.name))
adjust_credit = (0, 0, {
'name': _('Adjustment Entry'),
'date': timenow,
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': 0.0,
'credit': debit_sum - credit_sum,
})
line_ids.append(adjust_credit)
elif float_compare(debit_sum, credit_sum, precision_digits=precision) == -1:
acc_id = slip.journal_id.default_debit_account_id.id
if not acc_id:
raise osv.except_osv(_('Configuration Error!'),_('The Expense Journal "%s" has not properly configured the Debit Account!')%(slip.journal_id.name))
adjust_debit = (0, 0, {
'name': _('Adjustment Entry'),
'date': timenow,
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': credit_sum - debit_sum,
'credit': 0.0,
})
line_ids.append(adjust_debit)
move.update({'line_id': line_ids})
move_id = move_pool.create(cr, uid, move, context=context)
self.write(cr, uid, [slip.id], {'move_id': move_id, 'period_id' : period_id}, context=context)
if slip.journal_id.entry_posted:
move_pool.post(cr, uid, [move_id], context=context)
return super(hr_payslip, self).process_sheet(cr, uid, [slip.id], context=context)
class hr_salary_rule(osv.osv):
_inherit = 'hr.salary.rule'
_columns = {
'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account'),
'account_tax_id':fields.many2one('account.tax.code', 'Tax Code'),
'account_debit': fields.many2one('account.account', 'Debit Account'),
'account_credit': fields.many2one('account.account', 'Credit Account'),
}
class hr_contract(osv.osv):
_inherit = 'hr.contract'
_description = 'Employee Contract'
_columns = {
'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account'),
'journal_id': fields.many2one('account.journal', 'Salary Journal'),
}
class hr_payslip_run(osv.osv):
_inherit = 'hr.payslip.run'
_description = 'Payslip Run'
_columns = {
'journal_id': fields.many2one('account.journal', 'Salary Journal', states={'draft': [('readonly', False)]}, readonly=True, required=True),
}
def _get_default_journal(self, cr, uid, context=None):
model_data = self.pool.get('ir.model.data')
res = model_data.search(cr, uid, [('name', '=', 'expenses_journal')])
if res:
return model_data.browse(cr, uid, res[0]).res_id
return False
_defaults = {
'journal_id': _get_default_journal,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| apache-2.0 | 909,915,434,385,326,000 | 47.039648 | 228 | 0.564237 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.