repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
joaduo/mepinta | refs/heads/master | core/python_core/mepinta/pipeline/lo/pipeline_evaluator/__init__.py | 12133432 | |
mc-coal/mc-coal | refs/heads/master | lib/cloudstorage/rest_api.py | 45 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Base and helper classes for Google RESTful APIs."""
__all__ = ['add_sync_methods']
import random
import time
from . import api_utils
try:
from google.appengine.api import app_identity
from google.appengine.ext import ndb
except ImportError:
from google.appengine.api import app_identity
from google.appengine.ext import ndb
def _make_sync_method(name):
"""Helper to synthesize a synchronous method from an async method name.
Used by the @add_sync_methods class decorator below.
Args:
name: The name of the synchronous method.
Returns:
A method (with first argument 'self') that retrieves and calls
self.<name>, passing its own arguments, expects it to return a
Future, and then waits for and returns that Future's result.
"""
def sync_wrapper(self, *args, **kwds):
method = getattr(self, name)
future = method(*args, **kwds)
return future.get_result()
return sync_wrapper
def add_sync_methods(cls):
"""Class decorator to add synchronous methods corresponding to async methods.
This modifies the class in place, adding additional methods to it.
If a synchronous method of a given name already exists it is not
replaced.
Args:
cls: A class.
Returns:
The same class, modified in place.
"""
for name in cls.__dict__.keys():
if name.endswith('_async'):
sync_name = name[:-6]
if not hasattr(cls, sync_name):
setattr(cls, sync_name, _make_sync_method(name))
return cls
class _AE_TokenStorage_(ndb.Model):
"""Entity to store app_identity tokens in memcache."""
token = ndb.StringProperty()
expires = ndb.FloatProperty()
@ndb.tasklet
def _make_token_async(scopes, service_account_id):
"""Get a fresh authentication token.
Args:
scopes: A list of scopes.
service_account_id: Internal-use only.
Raises:
An ndb.Return with a tuple (token, expiration_time) where expiration_time is
seconds since the epoch.
"""
rpc = app_identity.create_rpc()
app_identity.make_get_access_token_call(rpc, scopes, service_account_id)
token, expires_at = yield rpc
raise ndb.Return((token, expires_at))
class _RestApi(object):
"""Base class for REST-based API wrapper classes.
This class manages authentication tokens and request retries. All
APIs are available as synchronous and async methods; synchronous
methods are synthesized from async ones by the add_sync_methods()
function in this module.
WARNING: Do NOT directly use this api. It's an implementation detail
and is subject to change at any release.
"""
def __init__(self, scopes, service_account_id=None, token_maker=None,
retry_params=None):
"""Constructor.
Args:
scopes: A scope or a list of scopes.
service_account_id: Internal use only.
token_maker: An asynchronous function of the form
(scopes, service_account_id) -> (token, expires).
retry_params: An instance of api_utils.RetryParams. If None, the
default for current thread will be used.
"""
if isinstance(scopes, basestring):
scopes = [scopes]
self.scopes = scopes
self.service_account_id = service_account_id
self.make_token_async = token_maker or _make_token_async
if not retry_params:
retry_params = api_utils._get_default_retry_params()
self.retry_params = retry_params
self.user_agent = {'User-Agent': retry_params._user_agent}
self.expiration_headroom = random.randint(60, 240)
def __getstate__(self):
"""Store state as part of serialization/pickling."""
return {'scopes': self.scopes,
'id': self.service_account_id,
'a_maker': (None if self.make_token_async == _make_token_async
else self.make_token_async),
'retry_params': self.retry_params,
'expiration_headroom': self.expiration_headroom}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling."""
self.__init__(state['scopes'],
service_account_id=state['id'],
token_maker=state['a_maker'],
retry_params=state['retry_params'])
self.expiration_headroom = state['expiration_headroom']
@ndb.tasklet
def do_request_async(self, url, method='GET', headers=None, payload=None,
deadline=None, callback=None):
"""Issue one HTTP request.
It performs async retries using tasklets.
Args:
url: the url to fetch.
method: the method in which to fetch.
headers: the http headers.
payload: the data to submit in the fetch.
deadline: the deadline in which to make the call.
callback: the call to make once completed.
Yields:
The async fetch of the url.
"""
retry_wrapper = api_utils._RetryWrapper(
self.retry_params,
retriable_exceptions=api_utils._RETRIABLE_EXCEPTIONS,
should_retry=api_utils._should_retry)
resp = yield retry_wrapper.run(
self.urlfetch_async,
url=url,
method=method,
headers=headers,
payload=payload,
deadline=deadline,
callback=callback,
follow_redirects=False)
raise ndb.Return((resp.status_code, resp.headers, resp.content))
@ndb.tasklet
def get_token_async(self, refresh=False):
"""Get an authentication token.
The token is cached in memcache, keyed by the scopes argument.
Uses a random token expiration headroom value generated in the constructor
to eliminate a burst of GET_ACCESS_TOKEN API requests.
Args:
refresh: If True, ignore a cached token; default False.
Yields:
An authentication token. This token is guaranteed to be non-expired.
"""
key = '%s,%s' % (self.service_account_id, ','.join(self.scopes))
ts = yield _AE_TokenStorage_.get_by_id_async(
key, use_cache=True, use_memcache=True,
use_datastore=self.retry_params.save_access_token)
if refresh or ts is None or ts.expires < (
time.time() + self.expiration_headroom):
token, expires_at = yield self.make_token_async(
self.scopes, self.service_account_id)
timeout = int(expires_at - time.time())
ts = _AE_TokenStorage_(id=key, token=token, expires=expires_at)
if timeout > 0:
yield ts.put_async(memcache_timeout=timeout,
use_datastore=self.retry_params.save_access_token,
use_cache=True, use_memcache=True)
raise ndb.Return(ts.token)
@ndb.tasklet
def urlfetch_async(self, url, method='GET', headers=None,
payload=None, deadline=None, callback=None,
follow_redirects=False):
"""Make an async urlfetch() call.
This is an async wrapper around urlfetch(). It adds an authentication
header.
Args:
url: the url to fetch.
method: the method in which to fetch.
headers: the http headers.
payload: the data to submit in the fetch.
deadline: the deadline in which to make the call.
callback: the call to make once completed.
follow_redirects: whether or not to follow redirects.
Yields:
This returns a Future despite not being decorated with @ndb.tasklet!
"""
headers = {} if headers is None else dict(headers)
headers.update(self.user_agent)
self.token = yield self.get_token_async()
if self.token:
headers['authorization'] = 'OAuth ' + self.token
deadline = deadline or self.retry_params.urlfetch_timeout
ctx = ndb.get_context()
resp = yield ctx.urlfetch(
url, payload=payload, method=method,
headers=headers, follow_redirects=follow_redirects,
deadline=deadline, callback=callback)
raise ndb.Return(resp)
_RestApi = add_sync_methods(_RestApi)
|
mwcz/pelican-plugins | refs/heads/master | thumbnailer/__init__.py | 81 | from .thumbnailer import * |
gauravbose/digital-menu | refs/heads/master | digimenu2/django/http/multipartparser.py | 105 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import binascii
import cgi
import sys
from django.conf import settings
from django.core.exceptions import SuspiciousMultipartForm
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.text import unescape_entities
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
_BASE64_DECODE_ERROR = TypeError if six.PY2 else binascii.Error
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should contain multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict('', encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = base64.b64decode(raw_data)
except _BASE64_DECODE_ERROR:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by multiple of 4,
# ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
msg = "Could not decode base64 data: %r" % e
six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(
force_text(old_field_name, self._encoding, errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = next(self)
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof=False):
"""
Finds a multipart boundary in data.
Should no boundary exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count(b"'") == 2:
has_encoding = True
value = p[i + 1:].strip()
if has_encoding:
encoding, lang, value = value.split(b"'")
if six.PY3:
value = unquote(value.decode(), encoding=encoding.decode())
else:
value = unquote(value).decode(encoding)
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
|
LecomteEmerick/Essentia-build | refs/heads/master | test/src/unittest/temporal/test_loudness.py | 10 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestLoudness(TestCase):
def testEmpty(self):
input = []
self.assertComputeFails(Loudness(), input)
def testZero(self):
input = [0]*100
self.assertAlmostEqual(Loudness()(input), 0)
def testOne(self):
input = [0]
self.assertAlmostEqual(Loudness()(input), 0)
input = [100]
self.assertAlmostEqual(Loudness()(input), 478.63009232263852, 1e-6)
def testRegression(self):
input = [45, 78, 1, -5, -.1125, 1.236, 10.25, 100, 9, -78]
self.assertAlmostEqual(Loudness()(input), 870.22171051882947, 1e-6)
suite = allTests(TestLoudness)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
SebasSBM/django | refs/heads/master | django/conf/project_template/project_name/urls.py | 244 | """{{ project_name }} URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/{{ docs_version }}/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
Carmezim/tensorflow | refs/heads/master | tensorflow/tensorboard/backend/event_processing/event_file_loader.py | 68 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functionality for loading events from a record file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.util import event_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
from tensorflow.python.platform import app
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
class EventFileLoader(object):
"""An EventLoader is an iterator that yields Event protos."""
def __init__(self, file_path):
if file_path is None:
raise ValueError('A file path is required')
file_path = resource_loader.readahead_file_path(file_path)
logging.debug('Opening a record reader pointing at %s', file_path)
with errors.raise_exception_on_not_ok_status() as status:
self._reader = pywrap_tensorflow.PyRecordReader_New(
compat.as_bytes(file_path), 0, compat.as_bytes(''), status)
# Store it for logging purposes.
self._file_path = file_path
if not self._reader:
raise IOError('Failed to open a record reader pointing to %s' % file_path)
def Load(self):
"""Loads all new values from disk.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All values that were written to disk that have not been yielded yet.
"""
while True:
try:
with errors.raise_exception_on_not_ok_status() as status:
self._reader.GetNext(status)
except (errors.DataLossError, errors.OutOfRangeError):
# We ignore partial read exceptions, because a record may be truncated.
# PyRecordReader holds the offset prior to the failed read, so retrying
# will succeed.
break
event = event_pb2.Event()
event.ParseFromString(self._reader.record())
yield event
logging.debug('No more events in %s', self._file_path)
def main(argv):
if len(argv) != 2:
print('Usage: event_file_loader <path-to-the-recordio-file>')
return 1
loader = EventFileLoader(argv[1])
for event in loader.Load():
print(event)
if __name__ == '__main__':
app.run()
|
tedder/ansible | refs/heads/devel | lib/ansible/module_utils/aws/urls.py | 33 | # Copyright: (c) 2018, Aaron Haaf <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import datetime
import hashlib
import hmac
import operator
from ansible.module_utils.urls import open_url
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, HAS_BOTO3
from ansible.module_utils.six.moves.urllib.parse import urlencode
try:
from boto3 import session
except ImportError:
pass
def hexdigest(s):
"""
Returns the sha256 hexdigest of a string after encoding.
"""
return hashlib.sha256(s.encode("utf-8")).hexdigest()
def format_querystring(params=None):
"""
Returns properly url-encoded query string from the provided params dict.
It's specially sorted for cannonical requests
"""
if not params:
return ""
# Query string values must be URL-encoded (space=%20). The parameters must be sorted by name.
return urlencode(sorted(params.items(), operator.itemgetter(0)))
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
'''
Return digest for key applied to msg
'''
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def get_signature_key(key, dateStamp, regionName, serviceName):
'''
Returns signature key for AWS resource
'''
kDate = sign(("AWS4" + key).encode("utf-8"), dateStamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, "aws4_request")
return kSigning
def get_aws_credentials_object(module):
'''
Returns aws_access_key_id, aws_secret_access_key, session_token for a module.
'''
if not HAS_BOTO3:
module.fail_json("get_aws_credentials_object requires boto3")
dummy, dummy, boto_params = get_aws_connection_info(module, boto3=True)
s = session.Session(**boto_params)
return s.get_credentials()
# Reference: https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
def signed_request(
module=None,
method="GET", service=None, host=None, uri=None,
query=None, body="", headers=None,
session_in_header=True, session_in_query=False
):
"""Generate a SigV4 request to an AWS resource for a module
This is used if you wish to authenticate with AWS credentials to a secure endpoint like an elastisearch domain.
Returns :class:`HTTPResponse` object.
Example:
result = signed_request(
module=this,
service="es",
host="search-recipes1-xxxxxxxxx.us-west-2.es.amazonaws.com",
)
:kwarg host: endpoint to talk to
:kwarg service: AWS id of service (like `ec2` or `es`)
:kwarg module: An AnsibleAWSModule to gather connection info from
:kwarg body: (optional) Payload to send
:kwarg method: (optional) HTTP verb to use
:kwarg query: (optional) dict of query params to handle
:kwarg uri: (optional) Resource path without query parameters
:kwarg session_in_header: (optional) Add the session token to the headers
:kwarg session_in_query: (optional) Add the session token to the query parameters
:returns: HTTPResponse
"""
if not HAS_BOTO3:
module.fail_json("A sigv4 signed_request requires boto3")
# "Constants"
t = datetime.datetime.utcnow()
amz_date = t.strftime("%Y%m%dT%H%M%SZ")
datestamp = t.strftime("%Y%m%d") # Date w/o time, used in credential scope
algorithm = "AWS4-HMAC-SHA256"
# AWS stuff
region, dummy, dummy = get_aws_connection_info(module, boto3=True)
credentials = get_aws_credentials_object(module)
access_key = credentials.access_key
secret_key = credentials.secret_key
session_token = credentials.token
if not access_key:
module.fail_json(msg="aws_access_key_id is missing")
if not secret_key:
module.fail_json(msg="aws_secret_access_key is missing")
credential_scope = "/".join([datestamp, region, service, "aws4_request"])
# Argument Defaults
uri = uri or "/"
query_string = format_querystring(query) if query else ""
headers = headers or dict()
query = query or dict()
headers.update({
"host": host,
"x-amz-date": amz_date,
})
# Handle adding of session_token if present
if session_token:
if session_in_header:
headers["X-Amz-Security-Token"] = session_token
if session_in_query:
query["X-Amz-Security-Token"] = session_token
if method == "GET":
body = ""
# Derived data
body_hash = hexdigest(body)
signed_headers = ";".join(sorted(headers.keys()))
# Setup Cannonical request to generate auth token
cannonical_headers = "\n".join([
key.lower().strip() + ":" + value for key, value in headers.items()
]) + "\n" # Note additional trailing newline
cannonical_request = "\n".join([
method,
uri,
query_string,
cannonical_headers,
signed_headers,
body_hash,
])
string_to_sign = "\n".join([algorithm, amz_date, credential_scope, hexdigest(cannonical_request)])
# Sign the Cannonical request
signing_key = get_signature_key(secret_key, datestamp, region, service)
signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
# Make auth header with that info
authorization_header = "{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}".format(
algorithm, access_key, credential_scope, signed_headers, signature
)
# PERFORM THE REQUEST!
url = "https://" + host + uri
if query_string != "":
url = url + "?" + query_string
final_headers = {
"x-amz-date": amz_date,
"Authorization": authorization_header,
}
final_headers.update(headers)
return open_url(url, method=method, data=body, headers=final_headers)
|
R4stl1n/allianceauth | refs/heads/master | allianceauth/services/modules/teamspeak3/signals.py | 5 | import logging
from django.db import transaction
from django.db.models.signals import m2m_changed
from django.db.models.signals import post_delete
from django.db.models.signals import post_save
from django.dispatch import receiver
from allianceauth.authentication.signals import state_changed
from .tasks import Teamspeak3Tasks
from .models import AuthTS, StateGroup
logger = logging.getLogger(__name__)
def trigger_all_ts_update():
logger.debug("Triggering update_all_groups")
Teamspeak3Tasks.update_all_groups()
@receiver(m2m_changed, sender=AuthTS.ts_group.through)
def m2m_changed_authts_group(sender, instance, action, *args, **kwargs):
logger.debug("Received m2m_changed from %s ts_group with action %s" % (instance, action))
if action == "post_add" or action == "post_remove":
transaction.on_commit(trigger_all_ts_update)
@receiver(post_save, sender=AuthTS)
def post_save_authts(sender, instance, *args, **kwargs):
logger.debug("Received post_save from %s" % instance)
transaction.on_commit(trigger_all_ts_update)
@receiver(post_delete, sender=AuthTS)
def post_delete_authts(sender, instance, *args, **kwargs):
logger.debug("Received post_delete signal from %s" % instance)
transaction.on_commit(trigger_all_ts_update)
# it's literally the same logic so just recycle the receiver
post_save.connect(post_save_authts, sender=StateGroup)
post_delete.connect(post_delete_authts, sender=StateGroup)
@receiver(state_changed)
def check_groups_on_state_change(sender, user, state, **kwargs):
def trigger_update():
Teamspeak3Tasks.update_groups.delay(user.pk)
logger.debug("Received state_changed signal from {}".format(user))
transaction.on_commit(trigger_update)
|
ItsAdventureTime/fail2ban | refs/heads/master | config/action.d/badips.py | 5 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
if sys.version_info < (2, 7):
raise ImportError("badips.py action requires Python >= 2.7")
import json
from functools import partial
import threading
import logging
if sys.version_info >= (3, ):
from urllib.request import Request, urlopen
from urllib.parse import urlencode
from urllib.error import HTTPError
else:
from urllib2 import Request, urlopen, HTTPError
from urllib import urlencode
from fail2ban.server.actions import ActionBase
from fail2ban.version import version as f2bVersion
class BadIPsAction(ActionBase):
"""Fail2Ban action which reports bans to badips.com, and also
blacklist bad IPs listed on badips.com by using another action's
ban method.
Parameters
----------
jail : Jail
The jail which the action belongs to.
name : str
Name assigned to the action.
category : str
Valid badips.com category for reporting failures.
score : int, optional
Minimum score for bad IPs. Default 3.
age : str, optional
Age of last report for bad IPs, per badips.com syntax.
Default "24h" (24 hours)
key : str, optional
Key issued by badips.com to report bans, for later retrieval
of personalised content.
banaction : str, optional
Name of banaction to use for blacklisting bad IPs. If `None`,
no blacklist of IPs will take place.
Default `None`.
bancategory : str, optional
Name of category to use for blacklisting, which can differ
from category used for reporting. e.g. may want to report
"postfix", but want to use whole "mail" category for blacklist.
Default `category`.
bankey : str, optional
Key issued by badips.com to blacklist IPs reported with the
associated key.
updateperiod : int, optional
Time in seconds between updating bad IPs blacklist.
Default 900 (15 minutes)
Raises
------
ValueError
If invalid `category`, `score`, `banaction` or `updateperiod`.
"""
_badips = "http://www.badips.com"
_Request = partial(
Request, headers={'User-Agent': "Fail2Ban %s" % f2bVersion})
def __init__(self, jail, name, category, score=3, age="24h", key=None,
banaction=None, bancategory=None, bankey=None, updateperiod=900):
super(BadIPsAction, self).__init__(jail, name)
self.category = category
self.score = score
self.age = age
self.key = key
self.banaction = banaction
self.bancategory = bancategory or category
self.bankey = bankey
self.updateperiod = updateperiod
self._bannedips = set()
# Used later for threading.Timer for updating badips
self._timer = None
def getCategories(self, incParents=False):
"""Get badips.com categories.
Returns
-------
set
Set of categories.
Raises
------
HTTPError
Any issues with badips.com request.
"""
try:
response = urlopen(
self._Request("/".join([self._badips, "get", "categories"])))
except HTTPError as response:
messages = json.loads(response.read().decode('utf-8'))
self._logSys.error(
"Failed to fetch categories. badips.com response: '%s'",
messages['err'])
raise
else:
categories = json.loads(response.read().decode('utf-8'))['categories']
categories_names = set(
value['Name'] for value in categories)
if incParents:
categories_names.update(set(
value['Parent'] for value in categories
if "Parent" in value))
return categories_names
def getList(self, category, score, age, key=None):
"""Get badips.com list of bad IPs.
Parameters
----------
category : str
Valid badips.com category.
score : int
Minimum score for bad IPs.
age : str
Age of last report for bad IPs, per badips.com syntax.
key : str, optional
Key issued by badips.com to fetch IPs reported with the
associated key.
Returns
-------
set
Set of bad IPs.
Raises
------
HTTPError
Any issues with badips.com request.
"""
try:
url = "?".join([
"/".join([self._badips, "get", "list", category, str(score)]),
urlencode({'age': age})])
if key:
url = "&".join([url, urlencode({'key': key})])
response = urlopen(self._Request(url))
except HTTPError as response:
messages = json.loads(response.read().decode('utf-8'))
self._logSys.error(
"Failed to fetch bad IP list. badips.com response: '%s'",
messages['err'])
raise
else:
return set(response.read().decode('utf-8').split())
@property
def category(self):
"""badips.com category for reporting IPs.
"""
return self._category
@category.setter
def category(self, category):
if category not in self.getCategories():
self._logSys.error("Category name '%s' not valid. "
"see badips.com for list of valid categories",
category)
raise ValueError("Invalid category: %s" % category)
self._category = category
@property
def bancategory(self):
"""badips.com bancategory for fetching IPs.
"""
return self._bancategory
@bancategory.setter
def bancategory(self, bancategory):
if bancategory not in self.getCategories(incParents=True):
self._logSys.error("Category name '%s' not valid. "
"see badips.com for list of valid categories",
bancategory)
raise ValueError("Invalid bancategory: %s" % bancategory)
self._bancategory = bancategory
@property
def score(self):
"""badips.com minimum score for fetching IPs.
"""
return self._score
@score.setter
def score(self, score):
score = int(score)
if 0 <= score <= 5:
self._score = score
else:
raise ValueError("Score must be 0-5")
@property
def banaction(self):
"""Jail action to use for banning/unbanning.
"""
return self._banaction
@banaction.setter
def banaction(self, banaction):
if banaction is not None and banaction not in self._jail.actions:
self._logSys.error("Action name '%s' not in jail '%s'",
banaction, self._jail.name)
raise ValueError("Invalid banaction")
self._banaction = banaction
@property
def updateperiod(self):
"""Period in seconds between banned bad IPs will be updated.
"""
return self._updateperiod
@updateperiod.setter
def updateperiod(self, updateperiod):
updateperiod = int(updateperiod)
if updateperiod > 0:
self._updateperiod = updateperiod
else:
raise ValueError("Update period must be integer greater than 0")
def _banIPs(self, ips):
for ip in ips:
try:
self._jail.actions[self.banaction].ban({
'ip': ip,
'failures': 0,
'matches': "",
'ipmatches': "",
'ipjailmatches': "",
})
except Exception as e:
self._logSys.error(
"Error banning IP %s for jail '%s' with action '%s': %s",
ip, self._jail.name, self.banaction, e,
exc_info=self._logSys.getEffectiveLevel()<=logging.DEBUG)
else:
self._bannedips.add(ip)
self._logSys.info(
"Banned IP %s for jail '%s' with action '%s'",
ip, self._jail.name, self.banaction)
def _unbanIPs(self, ips):
for ip in ips:
try:
self._jail.actions[self.banaction].unban({
'ip': ip,
'failures': 0,
'matches': "",
'ipmatches': "",
'ipjailmatches': "",
})
except Exception as e:
self._logSys.info(
"Error unbanning IP %s for jail '%s' with action '%s': %s",
ip, self._jail.name, self.banaction, e,
exc_info=self._logSys.getEffectiveLevel()<=logging.DEBUG)
else:
self._logSys.info(
"Unbanned IP %s for jail '%s' with action '%s'",
ip, self._jail.name, self.banaction)
finally:
self._bannedips.remove(ip)
def start(self):
"""If `banaction` set, blacklists bad IPs.
"""
if self.banaction is not None:
self.update()
def update(self):
"""If `banaction` set, updates blacklisted IPs.
Queries badips.com for list of bad IPs, removing IPs from the
blacklist if no longer present, and adds new bad IPs to the
blacklist.
"""
if self.banaction is not None:
if self._timer:
self._timer.cancel()
self._timer = None
try:
ips = self.getList(
self.bancategory, self.score, self.age, self.bankey)
# Remove old IPs no longer listed
self._unbanIPs(self._bannedips - ips)
# Add new IPs which are now listed
self._banIPs(ips - self._bannedips)
self._logSys.info(
"Updated IPs for jail '%s'. Update again in %i seconds",
self._jail.name, self.updateperiod)
finally:
self._timer = threading.Timer(self.updateperiod, self.update)
self._timer.start()
def stop(self):
"""If `banaction` set, clears blacklisted IPs.
"""
if self.banaction is not None:
if self._timer:
self._timer.cancel()
self._timer = None
self._unbanIPs(self._bannedips.copy())
def ban(self, aInfo):
"""Reports banned IP to badips.com.
Parameters
----------
aInfo : dict
Dictionary which includes information in relation to
the ban.
Raises
------
HTTPError
Any issues with badips.com request.
"""
try:
url = "/".join([self._badips, "add", self.category, aInfo['ip']])
if self.key:
url = "?".join([url, urlencode({'key': self.key})])
response = urlopen(self._Request(url))
except HTTPError as response:
messages = json.loads(response.read().decode('utf-8'))
self._logSys.error(
"Response from badips.com report: '%s'",
messages['err'])
raise
else:
messages = json.loads(response.read().decode('utf-8'))
self._logSys.info(
"Response from badips.com report: '%s'",
messages['suc'])
Action = BadIPsAction
|
radicalbit/ambari | refs/heads/trunk | ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/oozie_service.py | 4 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management import *
def oozie_service(action = 'start'): # 'start' or 'stop'
import params
kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal};") if params.security_enabled else ""
no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
if action == 'start':
start_cmd = "service oozie start"
if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
params.jdbc_driver_name == "org.postgresql.Driver" or \
params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{jdbc_driver_jar} org.apache.ambari.server.DBConnectionVerification '{oozie_jdbc_connection_url}' {oozie_metastore_user_name} {oozie_metastore_user_passwd!p} {jdbc_driver_name}")
else:
db_connection_check_command = None
cmd1 = "service oozie init"
cmd2 = format("{kinit_if_needed} {put_shared_lib_to_hdfs_cmd} ; hadoop --config {hadoop_conf_dir} dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
if not os.path.isfile(params.jdbc_driver_jar) and params.jdbc_driver_name == "org.postgresql.Driver":
print format("ERROR: jdbc file {jdbc_driver_jar} is unavailable. Please, follow next steps:\n" \
"1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p {oozie_home}/libserver/\n" \
"3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp /path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
"{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
"/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
exit(1)
if db_connection_check_command:
Execute( db_connection_check_command, tries=5, try_sleep=10)
Execute( cmd1,
not_if = no_op_test,
ignore_failures = True
)
Execute( cmd2,
user = params.oozie_user,
not_if = format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
path = params.execute_path
)
Execute( start_cmd,
not_if = no_op_test,
)
elif action == 'stop':
stop_cmd = "service oozie stop"
Execute( stop_cmd,
only_if = no_op_test
)
|
jamesblunt/gunicorn | refs/heads/master | examples/frameworks/flaskapp.py | 41 | # Run with:
#
# $ gunicorn flaskapp:app
#
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
|
kmolab/kmolab.github.io | refs/heads/master | data/Brython-3.3.4/Lib/test/test_strftime.py | 85 | """
Unittest for time.strftime
"""
import calendar
import sys
import re
from test import support
import time
import unittest
# helper functions
def fixasctime(s):
if s[8] == ' ':
s = s[:8] + '0' + s[9:]
return s
def escapestr(text, ampm):
"""
Escape text to deal with possible locale values that have regex
syntax while allowing regex syntax used for comparison.
"""
new_text = re.escape(text)
new_text = new_text.replace(re.escape(ampm), ampm)
new_text = new_text.replace('\%', '%')
new_text = new_text.replace('\:', ':')
new_text = new_text.replace('\?', '?')
return new_text
class StrftimeTest(unittest.TestCase):
def _update_variables(self, now):
# we must update the local variables on every cycle
self.gmt = time.gmtime(now)
now = time.localtime(now)
if now[3] < 12: self.ampm='(AM|am)'
else: self.ampm='(PM|pm)'
self.jan1 = time.localtime(time.mktime((now[0], 1, 1, 0, 0, 0, 0, 1, 0)))
try:
if now[8]: self.tz = time.tzname[1]
else: self.tz = time.tzname[0]
except AttributeError:
self.tz = ''
if now[3] > 12: self.clock12 = now[3] - 12
elif now[3] > 0: self.clock12 = now[3]
else: self.clock12 = 12
self.now = now
def setUp(self):
try:
import java
java.util.Locale.setDefault(java.util.Locale.US)
except ImportError:
import locale
locale.setlocale(locale.LC_TIME, 'C')
def test_strftime(self):
now = time.time()
self._update_variables(now)
self.strftest1(now)
self.strftest2(now)
if support.verbose:
print("Strftime test, platform: %s, Python version: %s" % \
(sys.platform, sys.version.split()[0]))
for j in range(-5, 5):
for i in range(25):
arg = now + (i+j*100)*23*3603
self._update_variables(arg)
self.strftest1(arg)
self.strftest2(arg)
def strftest1(self, now):
if support.verbose:
print("strftime test for", time.ctime(now))
now = self.now
# Make sure any characters that could be taken as regex syntax is
# escaped in escapestr()
expectations = (
('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'),
('%A', calendar.day_name[now[6]], 'full weekday name'),
('%b', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%B', calendar.month_name[now[1]], 'full month name'),
# %c see below
('%d', '%02d' % now[2], 'day of month as number (00-31)'),
('%H', '%02d' % now[3], 'hour (00-23)'),
('%I', '%02d' % self.clock12, 'hour (01-12)'),
('%j', '%03d' % now[7], 'julian day (001-366)'),
('%m', '%02d' % now[1], 'month as number (01-12)'),
('%M', '%02d' % now[4], 'minute, (00-59)'),
('%p', self.ampm, 'AM or PM as appropriate'),
('%S', '%02d' % now[5], 'seconds of current time (00-60)'),
('%U', '%02d' % ((now[7] + self.jan1[6])//7),
'week number of the year (Sun 1st)'),
('%w', '0?%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
('%W', '%02d' % ((now[7] + (self.jan1[6] - 1)%7)//7),
'week number of the year (Mon 1st)'),
# %x see below
('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%y', '%02d' % (now[0]%100), 'year without century'),
('%Y', '%d' % now[0], 'year with century'),
# %Z see below
('%%', '%', 'single percent sign'),
)
for e in expectations:
# musn't raise a value error
try:
result = time.strftime(e[0], now)
except ValueError as error:
self.fail("strftime '%s' format gave error: %s" % (e[0], error))
if re.match(escapestr(e[1], self.ampm), result):
continue
if not result or result[0] == '%':
self.fail("strftime does not support standard '%s' format (%s)"
% (e[0], e[2]))
else:
self.fail("Conflict for %s (%s): expected %s, but got %s"
% (e[0], e[2], e[1], result))
def strftest2(self, now):
nowsecs = str(int(now))[:-1]
now = self.now
nonstandard_expectations = (
# These are standard but don't have predictable output
('%c', fixasctime(time.asctime(now)), 'near-asctime() format'),
('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)),
'%m/%d/%y %H:%M:%S'),
('%Z', '%s' % self.tz, 'time zone name'),
# These are some platform specific extensions
('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'),
('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'),
('%h', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'),
('%n', '\n', 'newline character'),
('%r', '%02d:%02d:%02d %s' % (self.clock12, now[4], now[5], self.ampm),
'%I:%M:%S %p'),
('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'),
('%s', nowsecs, 'seconds since the Epoch in UCT'),
('%t', '\t', 'tab character'),
('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%3y', '%03d' % (now[0]%100),
'year without century rendered using fieldwidth'),
)
for e in nonstandard_expectations:
try:
result = time.strftime(e[0], now)
except ValueError as result:
msg = "Error for nonstandard '%s' format (%s): %s" % \
(e[0], e[2], str(result))
if support.verbose:
print(msg)
continue
if re.match(escapestr(e[1], self.ampm), result):
if support.verbose:
print("Supports nonstandard '%s' format (%s)" % (e[0], e[2]))
elif not result or result[0] == '%':
if support.verbose:
print("Does not appear to support '%s' format (%s)" % \
(e[0], e[2]))
else:
if support.verbose:
print("Conflict for nonstandard '%s' format (%s):" % \
(e[0], e[2]))
print(" Expected %s, but got %s" % (e[1], result))
def test_main():
support.run_unittest(StrftimeTest)
if __name__ == '__main__':
test_main()
|
felipebetancur/scipy | refs/heads/master | scipy/interpolate/interpolate_wrapper.py | 78 | """ helper_funcs.py.
scavenged from enthought,interpolate
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from . import _interpolate # C extension. Does all the real work.
def atleast_1d_and_contiguous(ary, dtype=np.float64):
return np.atleast_1d(np.ascontiguousarray(ary, dtype))
def nearest(x, y, new_x):
"""
Rounds each new x to nearest input x and returns corresponding input y.
Parameters
----------
x : array_like
Independent values.
y : array_like
Dependent values.
new_x : array_like
The x values to return the interpolate y values.
Returns
-------
nearest : ndarray
Rounds each `new_x` to nearest `x` and returns the corresponding `y`.
"""
shifted_x = np.concatenate((np.array([x[0]-1]), x[0:-1]))
midpoints_of_x = atleast_1d_and_contiguous(.5*(x + shifted_x))
new_x = atleast_1d_and_contiguous(new_x)
TINY = 1e-10
indices = np.searchsorted(midpoints_of_x, new_x+TINY)-1
indices = np.atleast_1d(np.clip(indices, 0, np.Inf).astype(int))
new_y = np.take(y, indices, axis=-1)
return new_y
def linear(x, y, new_x):
"""
Linearly interpolates values in new_x based on the values in x and y
Parameters
----------
x : array_like
Independent values
y : array_like
Dependent values
new_x : array_like
The x values to return the interpolated y values.
"""
x = atleast_1d_and_contiguous(x, np.float64)
y = atleast_1d_and_contiguous(y, np.float64)
new_x = atleast_1d_and_contiguous(new_x, np.float64)
if y.ndim > 2:
raise ValueError("`linear` only works with 1-D or 2-D arrays.")
if len(y.shape) == 2:
new_y = np.zeros((y.shape[0], len(new_x)), np.float64)
for i in range(len(new_y)): # for each row
_interpolate.linear_dddd(x, y[i], new_x, new_y[i])
else:
new_y = np.zeros(len(new_x), np.float64)
_interpolate.linear_dddd(x, y, new_x, new_y)
return new_y
def logarithmic(x, y, new_x):
"""
Linearly interpolates values in new_x based in the log space of y.
Parameters
----------
x : array_like
Independent values.
y : array_like
Dependent values.
new_x : array_like
The x values to return interpolated y values at.
"""
x = atleast_1d_and_contiguous(x, np.float64)
y = atleast_1d_and_contiguous(y, np.float64)
new_x = atleast_1d_and_contiguous(new_x, np.float64)
if y.ndim > 2:
raise ValueError("`linear` only works with 1-D or 2-D arrays.")
if len(y.shape) == 2:
new_y = np.zeros((y.shape[0], len(new_x)), np.float64)
for i in range(len(new_y)):
_interpolate.loginterp_dddd(x, y[i], new_x, new_y[i])
else:
new_y = np.zeros(len(new_x), np.float64)
_interpolate.loginterp_dddd(x, y, new_x, new_y)
return new_y
def block_average_above(x, y, new_x):
"""
Linearly interpolates values in new_x based on the values in x and y.
Parameters
----------
x : array_like
Independent values.
y : array_like
Dependent values.
new_x : array_like
The x values to interpolate y values.
"""
bad_index = None
x = atleast_1d_and_contiguous(x, np.float64)
y = atleast_1d_and_contiguous(y, np.float64)
new_x = atleast_1d_and_contiguous(new_x, np.float64)
if y.ndim > 2:
raise ValueError("`linear` only works with 1-D or 2-D arrays.")
if len(y.shape) == 2:
new_y = np.zeros((y.shape[0], len(new_x)), np.float64)
for i in range(len(new_y)):
bad_index = _interpolate.block_averave_above_dddd(x, y[i],
new_x, new_y[i])
if bad_index is not None:
break
else:
new_y = np.zeros(len(new_x), np.float64)
bad_index = _interpolate.block_average_above_dddd(x, y, new_x, new_y)
if bad_index is not None:
msg = "block_average_above cannot extrapolate and new_x[%d]=%f "\
"is out of the x range (%f, %f)" % \
(bad_index, new_x[bad_index], x[0], x[-1])
raise ValueError(msg)
return new_y
def block(x, y, new_x):
"""
Essentially a step function.
For each `new_x`, finds largest j such that``x[j] < new_x[j]`` and
returns ``y[j]``.
Parameters
----------
x : array_like
Independent values.
y : array_like
Dependent values.
new_x : array_like
The x values used to calculate the interpolated y.
Returns
-------
block : ndarray
Return array, of same length as `x_new`.
"""
# find index of values in x that precede values in x
# This code is a little strange -- we really want a routine that
# returns the index of values where x[j] < x[index]
TINY = 1e-10
indices = np.searchsorted(x, new_x+TINY)-1
# If the value is at the front of the list, it'll have -1.
# In this case, we will use the first (0), element in the array.
# take requires the index array to be an Int
indices = np.atleast_1d(np.clip(indices, 0, np.Inf).astype(int))
new_y = np.take(y, indices, axis=-1)
return new_y
|
b4n/geany | refs/heads/master | tests/ctags/bug1856363.py | 98 | #!/usr/bin/python
def main():
# A broken ctags will see a function "initely_not_a_function" here.
definitely_not_a_function = 0
return
if __name__ == 'main':
main()
|
v0devil/JMeter-Control-Center | refs/heads/master | controller/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
schlueter/ansible | refs/heads/devel | lib/ansible/modules/network/avi/avi_cloudproperties.py | 41 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloudproperties
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of CloudProperties Avi RESTful Object
description:
- This module is used to configure CloudProperties object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
cc_props:
description:
- Cloudconnector properties.
cc_vtypes:
description:
- Cloud types supported by cloudconnector.
- Enum options - CLOUD_NONE, CLOUD_VCENTER, CLOUD_OPENSTACK, CLOUD_AWS, CLOUD_VCA, CLOUD_APIC, CLOUD_MESOS, CLOUD_LINUXSERVER, CLOUD_DOCKER_UCP,
- CLOUD_RANCHER, CLOUD_OSHIFT_K8S, CLOUD_AZURE.
hyp_props:
description:
- Hypervisor properties.
info:
description:
- Properties specific to a cloud type.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create CloudProperties object
avi_cloudproperties:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_cloudproperties
"""
RETURN = '''
obj:
description: CloudProperties (api/cloudproperties) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
cc_props=dict(type='dict',),
cc_vtypes=dict(type='list',),
hyp_props=dict(type='list',),
info=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloudproperties',
set([]))
if __name__ == '__main__':
main()
|
memtoko/django | refs/heads/master | tests/utils_tests/test_archive.py | 372 | import os
import shutil
import tempfile
import unittest
from django.utils._os import upath
from django.utils.archive import Archive, extract
TEST_DIR = os.path.join(os.path.dirname(upath(__file__)), 'archives')
class ArchiveTester(object):
archive = None
def setUp(self):
"""
Create temporary directory for testing extraction.
"""
self.old_cwd = os.getcwd()
self.tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmpdir)
self.archive_path = os.path.join(TEST_DIR, self.archive)
self.archive_lead_path = os.path.join(TEST_DIR, "leadpath_%s" % self.archive)
# Always start off in TEST_DIR.
os.chdir(TEST_DIR)
def tearDown(self):
os.chdir(self.old_cwd)
def test_extract_method(self):
with Archive(self.archive) as archive:
archive.extract(self.tmpdir)
self.check_files(self.tmpdir)
def test_extract_method_no_to_path(self):
os.chdir(self.tmpdir)
with Archive(self.archive_path) as archive:
archive.extract()
self.check_files(self.tmpdir)
def test_extract_function(self):
extract(self.archive_path, self.tmpdir)
self.check_files(self.tmpdir)
def test_extract_function_with_leadpath(self):
extract(self.archive_lead_path, self.tmpdir)
self.check_files(self.tmpdir)
def test_extract_function_no_to_path(self):
os.chdir(self.tmpdir)
extract(self.archive_path)
self.check_files(self.tmpdir)
def check_files(self, tmpdir):
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, '1')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, '2')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', '1')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', '2')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', 'bar', '1')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', 'bar', '2')))
class TestZip(ArchiveTester, unittest.TestCase):
archive = 'foobar.zip'
class TestTar(ArchiveTester, unittest.TestCase):
archive = 'foobar.tar'
class TestGzipTar(ArchiveTester, unittest.TestCase):
archive = 'foobar.tar.gz'
class TestBzip2Tar(ArchiveTester, unittest.TestCase):
archive = 'foobar.tar.bz2'
|
arjclark/cylc | refs/heads/master | lib/jinja2/meta.py | 222 | # -*- coding: utf-8 -*-
"""
jinja2.meta
~~~~~~~~~~~
This module implements various functions that exposes information about
templates that might be interesting for various kinds of applications.
:copyright: (c) 2017 by the Jinja Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.compiler import CodeGenerator
from jinja2._compat import string_types, iteritems
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
CodeGenerator.__init__(self, environment, '<introspection>',
'<introspection>')
self.undeclared_identifiers = set()
def write(self, x):
"""Don't write."""
def enter_frame(self, frame):
"""Remember all undeclared identifiers."""
CodeGenerator.enter_frame(self, frame)
for _, (action, param) in iteritems(frame.symbols.loads):
if action == 'resolve':
self.undeclared_identifiers.add(param)
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast) == set(['bar'])
True
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers
def find_referenced_templates(ast):
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
nodes.Include)):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
for template_name in node.template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, string_types):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(node.template.value, string_types):
yield node.template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and \
isinstance(node.template.value, (tuple, list)):
for template_name in node.template.value:
if isinstance(template_name, string_types):
yield template_name
# something else we don't care about, we could warn here
else:
yield None
|
blag/django-world-languages | refs/heads/master | world_languages/utils.py | 1 | from urllib.request import urlretrieve
from tqdm import tqdm
def urlopen_with_progress(url):
def my_hook(t):
"""
Wraps tqdm instance. Don't forget to close() or __exit__() the tqdm instance
once you're done (easiest using a context manager, eg: `with` syntax)
Example
-------
>>> with tqdm(...) as t:
... reporthook = my_hook(t)
... urllib.urlretrieve(..., reporthook=reporthook)
"""
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
"""
b : int, optional Number of blocks just transferred [default: 1]
bsize : int, optional Size of each block (in tqdm units) [default: 1]
tsize : int, optional Total size (in tqdm units). If [default: None]
remains unchanged.
"""
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
with tqdm(unit='B', unit_scale=True, miniters=1,
desc="Downloading languages file...") as t:
filename, _ = urlretrieve(url, reporthook=my_hook(t))
with open(filename, 'r') as f:
return f.read()
|
sleshepic/G920T_OI1_kernel | refs/heads/master | tools/perf/tests/attr.py | 3174 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
|
TNT-Samuel/Coding-Projects | refs/heads/master | DNS Server/Source/Lib/unittest/runner.py | 12 | """Running tests"""
import sys
import time
import warnings
from . import result
from .signals import registerResult
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped {0!r}".format(reason))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
class TextTestRunner(object):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=None, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None, warnings=None,
*, tb_locals=False):
"""Construct a TextTestRunner.
Subclasses should accept **kwargs to ensure compatibility as the
interface changes.
"""
if stream is None:
stream = sys.stderr
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
self.tb_locals = tb_locals
self.warnings = warnings
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
registerResult(result)
result.failfast = self.failfast
result.buffer = self.buffer
result.tb_locals = self.tb_locals
with warnings.catch_warnings():
if self.warnings:
# if self.warnings is set, use it to filter all the warnings
warnings.simplefilter(self.warnings)
# if the filter is 'default' or 'always', special-case the
# warnings from the deprecated unittest methods to show them
# no more than once per module, because they can be fairly
# noisy. The -Wd and -Wa flags can be used to bypass this
# only when self.warnings is None.
if self.warnings in ['default', 'always']:
warnings.filterwarnings('module',
category=DeprecationWarning,
message=r'Please use assert\w+ instead.')
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
except AttributeError:
pass
else:
expectedFails, unexpectedSuccesses, skipped = results
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = len(result.failures), len(result.errors)
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
|
SivagnanamCiena/robotframework | refs/heads/master | src/robot/running/runner.py | 17 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import ExecutionFailed, DataError, PassExecution
from robot.model import SuiteVisitor
from robot.result import TestSuite, Result
from robot.utils import get_timestamp, NormalizedDict
from robot.variables import VariableScopes
from .context import EXECUTION_CONTEXTS
from .keywordrunner import KeywordRunner
from .namespace import Namespace
from .status import SuiteStatus, TestStatus
from .timeouts import TestTimeout
# TODO: Some 'extract method' love needed here. Perhaps even 'extract class'.
class Runner(SuiteVisitor):
def __init__(self, output, settings):
self.result = None
self._output = output
self._settings = settings
self._variables = VariableScopes(settings)
self._suite = None
self._suite_status = None
self._executed_tests = None
@property
def _context(self):
return EXECUTION_CONTEXTS.current
def start_suite(self, suite):
result = TestSuite(source=suite.source,
name=suite.name,
doc=suite.doc,
metadata=suite.metadata,
starttime=get_timestamp())
if not self.result:
result.set_criticality(self._settings.critical_tags,
self._settings.non_critical_tags)
self.result = Result(root_suite=result)
self.result.configure(status_rc=self._settings.status_rc,
stat_config=self._settings.statistics_config)
else:
self._suite.suites.append(result)
self._suite = result
self._suite_status = SuiteStatus(self._suite_status,
self._settings.exit_on_failure,
self._settings.exit_on_error,
self._settings.skip_teardown_on_exit)
ns = Namespace(self._variables, result, suite.resource.keywords,
suite.resource.imports)
ns.start_suite()
ns.variables.set_from_variable_table(suite.resource.variables)
EXECUTION_CONTEXTS.start_suite(ns, self._output, self._settings.dry_run)
self._context.set_suite_variables(result)
if not self._suite_status.failures:
ns.handle_imports()
ns.variables.resolve_delayed()
result.doc = self._resolve_setting(result.doc)
result.metadata = [(self._resolve_setting(n), self._resolve_setting(v))
for n, v in result.metadata.items()]
self._context.set_suite_variables(result)
self._output.start_suite(ModelCombiner(result, suite,
tests=suite.tests,
suites=suite.suites,
test_count=suite.test_count))
self._output.register_error_listener(self._suite_status.error_occurred)
self._run_setup(suite.keywords.setup, self._suite_status)
self._executed_tests = NormalizedDict(ignore='_')
def _resolve_setting(self, value):
return self._variables.replace_string(value, ignore_errors=True)
def end_suite(self, suite):
self._suite.message = self._suite_status.message
self._context.report_suite_status(self._suite.status,
self._suite.full_message)
with self._context.suite_teardown():
failure = self._run_teardown(suite.keywords.teardown, self._suite_status)
if failure:
self._suite.suite_teardown_failed(unicode(failure))
if self._suite.statistics.critical.failed:
self._suite_status.critical_failure_occurred()
self._suite.endtime = get_timestamp()
self._suite.message = self._suite_status.message
self._context.end_suite(self._suite)
self._suite = self._suite.parent
self._suite_status = self._suite_status.parent
def visit_test(self, test):
if test.name in self._executed_tests:
self._output.warn("Multiple test cases with name '%s' executed in "
"test suite '%s'." % (test.name, self._suite.longname))
self._executed_tests[test.name] = True
result = self._suite.tests.create(name=test.name,
doc=self._resolve_setting(test.doc),
tags=test.tags,
starttime=get_timestamp(),
timeout=self._get_timeout(test))
status = TestStatus(self._suite_status, result.critical)
if not status.failures and not test.name:
status.test_failed('Test case name cannot be empty.')
if not status.failures and not test.keywords.normal:
status.test_failed('Test case contains no keywords.')
try:
result.tags = self._context.variables.replace_list(result.tags)
except DataError as err:
status.test_failed('Replacing variables from test tags failed: %s'
% unicode(err))
self._context.start_test(result)
self._output.start_test(ModelCombiner(result, test))
if status.exit:
self._add_exit_combine()
result.tags.add('robot-exit')
self._run_setup(test.keywords.setup, status, result)
try:
if not status.failures:
runner = KeywordRunner(self._context, bool(test.template))
runner.run_keywords(test.keywords.normal)
else:
status.test_failed(status.message)
except PassExecution as exception:
err = exception.earlier_failures
if err:
status.test_failed(err)
else:
result.message = exception.message
except ExecutionFailed as err:
status.test_failed(err)
result.status = status.status
result.message = status.message or result.message
if status.teardown_allowed:
with self._context.test_teardown(result):
failure = self._run_teardown(test.keywords.teardown, status,
result)
if failure and result.critical:
status.critical_failure_occurred()
if not status.failures and result.timeout and result.timeout.timed_out():
status.test_failed(result.timeout.get_message())
result.message = status.message
result.status = status.status
result.endtime = get_timestamp()
self._output.end_test(ModelCombiner(result, test))
self._context.end_test(result)
def _add_exit_combine(self):
exit_combine = ('NOT robot-exit', '')
if exit_combine not in self._settings['TagStatCombine']:
self._settings['TagStatCombine'].append(exit_combine)
def _get_timeout(self, test):
if not test.timeout:
return None
timeout = TestTimeout(test.timeout.value, test.timeout.message,
self._variables)
timeout.start()
return timeout
def _run_setup(self, setup, status, result=None):
if not status.failures:
exception = self._run_setup_or_teardown(setup)
status.setup_executed(exception)
if result and isinstance(exception, PassExecution):
result.message = exception.message
def _run_teardown(self, teardown, status, result=None):
if status.teardown_allowed:
exception = self._run_setup_or_teardown(teardown)
status.teardown_executed(exception)
failed = not isinstance(exception, PassExecution)
if result and exception:
result.message = status.message if failed else exception.message
return exception if failed else None
def _run_setup_or_teardown(self, data):
if not data:
return None
try:
name = self._variables.replace_string(data.name)
except DataError as err:
return err
if name.upper() in ('', 'NONE'):
return None
runner = KeywordRunner(self._context)
try:
runner.run_keyword(data, name=name)
except ExecutionFailed as err:
return err
class ModelCombiner(object):
def __init__(self, *models, **priority):
self.models = models
self.priority = priority
def __getattr__(self, name):
if name in self.priority:
return self.priority[name]
for model in self.models:
if hasattr(model, name):
return getattr(model, name)
raise AttributeError(name)
|
vperron/sentry | refs/heads/master | src/sentry/search/models.py | 36 | """
sentry.search.models
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
|
jimi-c/ansible | refs/heads/devel | lib/ansible/plugins/lookup/first_found.py | 11 | # (c) 2013, seth vidal <[email protected]> red hat, inc
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: first_found
author: Seth Vidal <[email protected]>
version_added: historical
short_description: return first file found from list
description:
- this lookup checks a list of files and paths and returns the full path to the first combination found.
- As all lookups, when fed relative paths it will try use the current task's location first and go up the chain
to the containing role/play/include/etc's location.
- The list of files has precedence over the paths searched.
i.e, A task in a role has a 'file1' in the play's relative path, this will be used, 'file2' in role's relative path will not.
options:
_terms:
description: list of file names
required: True
paths:
description: list of paths in which to look for the files
"""
EXAMPLES = """
- name: show first existing file
debug: msg={{lookup('first_found', findme)}}
vars:
findme:
- "/path/to/foo.txt"
- "bar.txt" # will be looked in files/ dir relative to role and/or play
- "/path/to/biz.txt"
- name: |
copy first existing file found to /some/file,
looking in relative directories from where the task is defined and
including any play objects that contain it
copy: src={{lookup('first_found', findme)}} dest=/some/file
vars:
findme:
- foo
- "{{inventory_hostname}}"
- bar
- name: same copy but specific paths
copy: src={{lookup('first_found', params)}} dest=/some/file
vars:
params:
files:
- foo
- "{{inventory_hostname}}"
- bar
paths:
- /tmp/production
- /tmp/staging
- name: INTERFACES | Create Ansible header for /etc/network/interfaces
template:
src: "{{ lookup('first_found', findme)}}"
dest: "/etc/foo.conf"
vars:
findme:
- "{{ ansible_virtualization_type }}_foo.conf"
- "default_foo.conf"
- name: read vars from first file found, use 'vars/' relative subdir
include_vars: "{{lookup('first_found', params)}}"
vars:
params:
files:
- '{{ansible_os_distribution}}.yml'
- '{{ansible_os_family}}.yml'
- default.yml
paths:
- 'vars'
"""
RETURN = """
_raw:
description:
- path to file found
"""
import os
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleFileNotFound, AnsibleLookupError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
anydict = False
skip = False
for term in terms:
if isinstance(term, dict):
anydict = True
total_search = []
if anydict:
for term in terms:
if isinstance(term, dict):
files = term.get('files', [])
paths = term.get('paths', [])
skip = boolean(term.get('skip', False), strict=False)
filelist = files
if isinstance(files, string_types):
files = files.replace(',', ' ')
files = files.replace(';', ' ')
filelist = files.split(' ')
pathlist = paths
if paths:
if isinstance(paths, string_types):
paths = paths.replace(',', ' ')
paths = paths.replace(':', ' ')
paths = paths.replace(';', ' ')
pathlist = paths.split(' ')
if not pathlist:
total_search = filelist
else:
for path in pathlist:
for fn in filelist:
f = os.path.join(path, fn)
total_search.append(f)
else:
total_search.append(term)
else:
total_search = self._flatten(terms)
for fn in total_search:
try:
fn = self._templar.template(fn)
except (AnsibleUndefinedVariable, UndefinedError):
continue
# get subdir if set by task executor, default to files otherwise
subdir = getattr(self, '_subdir', 'files')
path = None
path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True)
if path is not None:
return [path]
if skip:
return []
raise AnsibleLookupError("No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no "
"files are found")
|
jayoshih/kolibri | refs/heads/master | kolibri/auth/test/test_users.py | 9 | from __future__ import absolute_import, print_function, unicode_literals
# from django.db.utils import IntegrityError
from django.test import TestCase
from ..models import DeviceOwner, Facility, FacilityUser
class UserSanityTestCase(TestCase):
"""
Sanity checks basic functionality of user models.
"""
def setUp(self):
self.facility = Facility.objects.create()
self.user = FacilityUser.objects.create(
username="mike",
full_name="Mike Gallaspy",
password="###",
facility=self.facility,
)
self.do = DeviceOwner.objects.create(
username="bar",
password="###",
)
def test_facility_user(self):
self.assertFalse(self.user.is_superuser)
def test_device_owner(self):
self.assertTrue(self.do.is_superuser)
def test_short_name(self):
self.assertEqual(self.user.get_short_name(), "Mike")
|
timj/scons | refs/heads/master | src/engine/SCons/Defaults.py | 1 | """SCons.Defaults
Builders and other things for the local site. Here's where we'll
duplicate the functionality of autoconf until we move it into the
installation procedure or use something like qmconf.
The code that reads the registry to find MSVC components was borrowed
from distutils.msvccompiler.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import division
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import errno
import shutil
import stat
import time
import sys
import SCons.Action
import SCons.Builder
import SCons.CacheDir
import SCons.Environment
import SCons.PathList
import SCons.Subst
import SCons.Tool
# A placeholder for a default Environment (for fetching source files
# from source code management systems and the like). This must be
# initialized later, after the top-level directory is set by the calling
# interface.
_default_env = None
# Lazily instantiate the default environment so the overhead of creating
# it doesn't apply when it's not needed.
def _fetch_DefaultEnvironment(*args, **kw):
"""
Returns the already-created default construction environment.
"""
global _default_env
return _default_env
def DefaultEnvironment(*args, **kw):
"""
Initial public entry point for creating the default construction
Environment.
After creating the environment, we overwrite our name
(DefaultEnvironment) with the _fetch_DefaultEnvironment() function,
which more efficiently returns the initialized default construction
environment without checking for its existence.
(This function still exists with its _default_check because someone
else (*cough* Script/__init__.py *cough*) may keep a reference
to this function. So we can't use the fully functional idiom of
having the name originally be a something that *only* creates the
construction environment and then overwrites the name.)
"""
global _default_env
if not _default_env:
import SCons.Util
_default_env = SCons.Environment.Environment(*args, **kw)
if SCons.Util.md5:
_default_env.Decider('MD5')
else:
_default_env.Decider('timestamp-match')
global DefaultEnvironment
DefaultEnvironment = _fetch_DefaultEnvironment
_default_env._CacheDir_path = None
return _default_env
# Emitters for setting the shared attribute on object files,
# and an action for checking that all of the source files
# going into a shared library are, in fact, shared.
def StaticObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = None
return (target, source)
def SharedObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = 1
return (target, source)
def SharedFlagChecker(source, target, env):
same = env.subst('$STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME')
if same == '0' or same == '' or same == 'False':
for src in source:
try:
shared = src.attributes.shared
except AttributeError:
shared = None
if not shared:
raise SCons.Errors.UserError("Source file: %s is static and is not compatible with shared target: %s" % (src, target[0]))
SharedCheck = SCons.Action.Action(SharedFlagChecker, None)
# Some people were using these variable name before we made
# SourceFileScanner part of the public interface. Don't break their
# SConscript files until we've given them some fair warning and a
# transition period.
CScan = SCons.Tool.CScanner
DScan = SCons.Tool.DScanner
LaTeXScan = SCons.Tool.LaTeXScanner
ObjSourceScan = SCons.Tool.SourceFileScanner
ProgScan = SCons.Tool.ProgramScanner
# These aren't really tool scanners, so they don't quite belong with
# the rest of those in Tool/__init__.py, but I'm not sure where else
# they should go. Leave them here for now.
import SCons.Scanner.Dir
DirScanner = SCons.Scanner.Dir.DirScanner()
DirEntryScanner = SCons.Scanner.Dir.DirEntryScanner()
# Actions for common languages.
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR")
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR")
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR")
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR")
DAction = SCons.Action.Action("$DCOM", "$DCOMSTR")
ShDAction = SCons.Action.Action("$SHDCOM", "$SHDCOMSTR")
ASAction = SCons.Action.Action("$ASCOM", "$ASCOMSTR")
ASPPAction = SCons.Action.Action("$ASPPCOM", "$ASPPCOMSTR")
LinkAction = SCons.Action.Action("$LINKCOM", "$LINKCOMSTR")
ShLinkAction = SCons.Action.Action("$SHLINKCOM", "$SHLINKCOMSTR")
LdModuleLinkAction = SCons.Action.Action("$LDMODULECOM", "$LDMODULECOMSTR")
# Common tasks that we allow users to perform in platform-independent
# ways by creating ActionFactory instances.
ActionFactory = SCons.Action.ActionFactory
def get_paths_str(dest):
# If dest is a list, we need to manually call str() on each element
if SCons.Util.is_List(dest):
elem_strs = []
for element in dest:
elem_strs.append('"' + str(element) + '"')
return '[' + ', '.join(elem_strs) + ']'
else:
return '"' + str(dest) + '"'
permission_dic = {
'u':{
'r':stat.S_IRUSR,
'w':stat.S_IWUSR,
'x':stat.S_IXUSR
},
'g':{
'r':stat.S_IRGRP,
'w':stat.S_IWGRP,
'x':stat.S_IXGRP
},
'o':{
'r':stat.S_IROTH,
'w':stat.S_IWOTH,
'x':stat.S_IXOTH
}
}
def chmod_func(dest, mode):
import SCons.Util
from string import digits
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
if SCons.Util.is_String(mode) and not 0 in [i in digits for i in mode]:
mode = int(mode, 8)
if not SCons.Util.is_String(mode):
for element in dest:
os.chmod(str(element), mode)
else:
mode = str(mode)
for operation in mode.split(","):
if "=" in operation:
operator = "="
elif "+" in operation:
operator = "+"
elif "-" in operation:
operator = "-"
else:
raise SyntaxError("Could not find +, - or =")
operation_list = operation.split(operator)
if len(operation_list) is not 2:
raise SyntaxError("More than one operator found")
user = operation_list[0].strip().replace("a", "ugo")
permission = operation_list[1].strip()
new_perm = 0
for u in user:
for p in permission:
try:
new_perm = new_perm | permission_dic[u][p]
except KeyError:
raise SyntaxError("Unrecognized user or permission format")
for element in dest:
curr_perm = os.stat(str(element)).st_mode
if operator == "=":
os.chmod(str(element), new_perm)
elif operator == "+":
os.chmod(str(element), curr_perm | new_perm)
elif operator == "-":
os.chmod(str(element), curr_perm & ~new_perm)
def chmod_strfunc(dest, mode):
import SCons.Util
if not SCons.Util.is_String(mode):
return 'Chmod(%s, 0%o)' % (get_paths_str(dest), mode)
else:
return 'Chmod(%s, "%s")' % (get_paths_str(dest), str(mode))
Chmod = ActionFactory(chmod_func, chmod_strfunc)
def copy_func(dest, src, symlinks=True):
"""
If symlinks (is true), then a symbolic link will be
shallow copied and recreated as a symbolic link; otherwise, copying
a symbolic link will be equivalent to copying the symbolic link's
final target regardless of symbolic link depth.
"""
dest = str(dest)
src = str(src)
SCons.Node.FS.invalidate_node_memos(dest)
if SCons.Util.is_List(src) and os.path.isdir(dest):
for file in src:
shutil.copy2(file, dest)
return 0
elif os.path.islink(src):
if symlinks:
return os.symlink(os.readlink(src), dest)
else:
return copy_func(dest, os.path.realpath(src))
elif os.path.isfile(src):
shutil.copy2(src, dest)
return 0
else:
return shutil.copytree(src, dest, symlinks)
Copy = ActionFactory(
copy_func,
lambda dest, src, symlinks=True: 'Copy("%s", "%s")' % (dest, src)
)
def delete_func(dest, must_exist=0):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
entry = str(entry)
# os.path.exists returns False with broken links that exist
entry_exists = os.path.exists(entry) or os.path.islink(entry)
if not entry_exists and not must_exist:
continue
# os.path.isdir returns True when entry is a link to a dir
if os.path.isdir(entry) and not os.path.islink(entry):
shutil.rmtree(entry, 1)
continue
os.unlink(entry)
def delete_strfunc(dest, must_exist=0):
return 'Delete(%s)' % get_paths_str(dest)
Delete = ActionFactory(delete_func, delete_strfunc)
def mkdir_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
try:
os.makedirs(str(entry))
except os.error as e:
p = str(entry)
if (e.args[0] == errno.EEXIST or
(sys.platform=='win32' and e.args[0]==183)) \
and os.path.isdir(str(entry)):
pass # not an error if already exists
else:
raise
Mkdir = ActionFactory(mkdir_func,
lambda dir: 'Mkdir(%s)' % get_paths_str(dir))
def move_func(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
SCons.Node.FS.invalidate_node_memos(src)
shutil.move(src, dest)
Move = ActionFactory(move_func,
lambda dest, src: 'Move("%s", "%s")' % (dest, src),
convert=str)
def touch_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for file in dest:
file = str(file)
mtime = int(time.time())
if os.path.exists(file):
atime = os.path.getatime(file)
else:
open(file, 'w')
atime = mtime
os.utime(file, (atime, mtime))
Touch = ActionFactory(touch_func,
lambda file: 'Touch(%s)' % get_paths_str(file))
# Internal utility functions
def _concat(prefix, list, suffix, env, f=lambda x: x, target=None, source=None):
"""
Creates a new list from 'list' by first interpolating each element
in the list using the 'env' dictionary and then calling f on the
list, and finally calling _concat_ixes to concatenate 'prefix' and
'suffix' onto each element of the list.
"""
if not list:
return list
l = f(SCons.PathList.PathList(list).subst_path(env, target, source))
if l is not None:
list = l
return _concat_ixes(prefix, list, suffix, env)
def _concat_ixes(prefix, list, suffix, env):
"""
Creates a new list from 'list' by concatenating the 'prefix' and
'suffix' arguments onto each element of the list. A trailing space
on 'prefix' or leading space on 'suffix' will cause them to be put
into separate list elements rather than being concatenated.
"""
result = []
# ensure that prefix and suffix are strings
prefix = str(env.subst(prefix, SCons.Subst.SUBST_RAW))
suffix = str(env.subst(suffix, SCons.Subst.SUBST_RAW))
for x in list:
if isinstance(x, SCons.Node.FS.File):
result.append(x)
continue
x = str(x)
if x:
if prefix:
if prefix[-1] == ' ':
result.append(prefix[:-1])
elif x[:len(prefix)] != prefix:
x = prefix + x
result.append(x)
if suffix:
if suffix[0] == ' ':
result.append(suffix[1:])
elif x[-len(suffix):] != suffix:
result[-1] = result[-1]+suffix
return result
def _stripixes(prefix, itms, suffix, stripprefixes, stripsuffixes, env, c=None):
"""
This is a wrapper around _concat()/_concat_ixes() that checks for
the existence of prefixes or suffixes on list items and strips them
where it finds them. This is used by tools (like the GNU linker)
that need to turn something like 'libfoo.a' into '-lfoo'.
"""
if not itms:
return itms
if not callable(c):
env_c = env['_concat']
if env_c != _concat and callable(env_c):
# There's a custom _concat() method in the construction
# environment, and we've allowed people to set that in
# the past (see test/custom-concat.py), so preserve the
# backwards compatibility.
c = env_c
else:
c = _concat_ixes
stripprefixes = list(map(env.subst, SCons.Util.flatten(stripprefixes)))
stripsuffixes = list(map(env.subst, SCons.Util.flatten(stripsuffixes)))
stripped = []
for l in SCons.PathList.PathList(itms).subst_path(env, None, None):
if isinstance(l, SCons.Node.FS.File):
stripped.append(l)
continue
if not SCons.Util.is_String(l):
l = str(l)
for stripprefix in stripprefixes:
lsp = len(stripprefix)
if l[:lsp] == stripprefix:
l = l[lsp:]
# Do not strip more than one prefix
break
for stripsuffix in stripsuffixes:
lss = len(stripsuffix)
if l[-lss:] == stripsuffix:
l = l[:-lss]
# Do not strip more than one suffix
break
stripped.append(l)
return c(prefix, stripped, suffix, env)
def processDefines(defs):
"""process defines, resolving strings, lists, dictionaries, into a list of
strings
"""
if SCons.Util.is_List(defs):
l = []
for d in defs:
if d is None:
continue
elif SCons.Util.is_List(d) or isinstance(d, tuple):
if len(d) >= 2:
l.append(str(d[0]) + '=' + str(d[1]))
else:
l.append(str(d[0]))
elif SCons.Util.is_Dict(d):
for macro,value in d.items():
if value is not None:
l.append(str(macro) + '=' + str(value))
else:
l.append(str(macro))
elif SCons.Util.is_String(d):
l.append(str(d))
else:
raise SCons.Errors.UserError("DEFINE %s is not a list, dict, string or None."%repr(d))
elif SCons.Util.is_Dict(defs):
# The items in a dictionary are stored in random order, but
# if the order of the command-line options changes from
# invocation to invocation, then the signature of the command
# line will change and we'll get random unnecessary rebuilds.
# Consequently, we have to sort the keys to ensure a
# consistent order...
l = []
for k,v in sorted(defs.items()):
if v is None:
l.append(str(k))
else:
l.append(str(k) + '=' + str(v))
else:
l = [str(defs)]
return l
def _defines(prefix, defs, suffix, env, c=_concat_ixes):
"""A wrapper around _concat_ixes that turns a list or string
into a list of C preprocessor command-line definitions.
"""
return c(prefix, env.subst_path(processDefines(defs)), suffix, env)
class NullCmdGenerator(object):
"""This is a callable class that can be used in place of other
command generators if you don't want them to do anything.
The __call__ method for this class simply returns the thing
you instantiated it with.
Example usage:
env["DO_NOTHING"] = NullCmdGenerator
env["LINKCOM"] = "${DO_NOTHING('$LINK $SOURCES $TARGET')}"
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature=None):
return self.cmd
class Variable_Method_Caller(object):
"""A class for finding a construction variable on the stack and
calling one of its methods.
We use this to support "construction variables" in our string
eval()s that actually stand in for methods--specifically, use
of "RDirs" in call to _concat that should actually execute the
"TARGET.RDirs" method. (We used to support this by creating a little
"build dictionary" that mapped RDirs to the method, but this got in
the way of Memoizing construction environments, because we had to
create new environment objects to hold the variables.)
"""
def __init__(self, variable, method):
self.variable = variable
self.method = method
def __call__(self, *args, **kw):
try: 1//0
except ZeroDivisionError:
# Don't start iterating with the current stack-frame to
# prevent creating reference cycles (f_back is safe).
frame = sys.exc_info()[2].tb_frame.f_back
variable = self.variable
while frame:
if variable in frame.f_locals:
v = frame.f_locals[variable]
if v:
method = getattr(v, self.method)
return method(*args, **kw)
frame = frame.f_back
return None
# if $version_var is not empty, returns env[flags_var], otherwise returns None
def __libversionflags(env, version_var, flags_var):
try:
if env.subst('$'+version_var):
return env[flags_var]
except KeyError:
pass
return None
ConstructionEnvironment = {
'BUILDERS' : {},
'SCANNERS' : [ SCons.Tool.SourceFileScanner ],
'CONFIGUREDIR' : '#/.sconf_temp',
'CONFIGURELOG' : '#/config.log',
'CPPSUFFIXES' : SCons.Tool.CSuffixes,
'DSUFFIXES' : SCons.Tool.DSuffixes,
'ENV' : {},
'IDLSUFFIXES' : SCons.Tool.IDLSuffixes,
# 'LATEXSUFFIXES' : SCons.Tool.LaTeXSuffixes, # moved to the TeX tools generate functions
'_concat' : _concat,
'_defines' : _defines,
'_stripixes' : _stripixes,
'_LIBFLAGS' : '${_concat(LIBLINKPREFIX, LIBS, LIBLINKSUFFIX, __env__)}',
'_LIBDIRFLAGS' : '$( ${_concat(LIBDIRPREFIX, LIBPATH, LIBDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPINCFLAGS' : '$( ${_concat(INCPREFIX, CPPPATH, INCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPDEFFLAGS' : '${_defines(CPPDEFPREFIX, CPPDEFINES, CPPDEFSUFFIX, __env__)}',
'__libversionflags' : __libversionflags,
'__SHLIBVERSIONFLAGS' : '${__libversionflags(__env__,"SHLIBVERSION","_SHLIBVERSIONFLAGS")}',
'__LDMODULEVERSIONFLAGS' : '${__libversionflags(__env__,"LDMODULEVERSION","_LDMODULEVERSIONFLAGS")}',
'__DSHLIBVERSIONFLAGS' : '${__libversionflags(__env__,"DSHLIBVERSION","_DSHLIBVERSIONFLAGS")}',
'TEMPFILE' : NullCmdGenerator,
'Dir' : Variable_Method_Caller('TARGET', 'Dir'),
'Dirs' : Variable_Method_Caller('TARGET', 'Dirs'),
'File' : Variable_Method_Caller('TARGET', 'File'),
'RDirs' : Variable_Method_Caller('TARGET', 'RDirs'),
}
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
s20121035/rk3288_android5.1_repo | refs/heads/master | external/chromium_org/ppapi/native_client/tools/browser_tester/browser_tester.py | 69 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import optparse
import os.path
import socket
import sys
import thread
import time
import urllib
# Allow the import of third party modules
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(script_dir, '../../../../third_party/'))
sys.path.insert(0, os.path.join(script_dir, '../../../../tools/valgrind/'))
sys.path.insert(0, os.path.join(script_dir, '../../../../testing/'))
import browsertester.browserlauncher
import browsertester.rpclistener
import browsertester.server
import memcheck_analyze
import tsan_analyze
import test_env
def BuildArgParser():
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage)
parser.add_option('-p', '--port', dest='port', action='store', type='int',
default='0', help='The TCP port the server will bind to. '
'The default is to pick an unused port number.')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Use the browser located here.')
parser.add_option('--map_file', dest='map_files', action='append',
type='string', nargs=2, default=[],
metavar='DEST SRC',
help='Add file SRC to be served from the HTTP server, '
'to be made visible under the path DEST.')
parser.add_option('--serving_dir', dest='serving_dirs', action='append',
type='string', default=[],
metavar='DIRNAME',
help='Add directory DIRNAME to be served from the HTTP '
'server to be made visible under the root.')
parser.add_option('--output_dir', dest='output_dir', action='store',
type='string', default=None,
metavar='DIRNAME',
help='Set directory DIRNAME to be the output directory '
'when POSTing data to the server. NOTE: if this flag is '
'not set, POSTs will fail.')
parser.add_option('--test_arg', dest='test_args', action='append',
type='string', nargs=2, default=[],
metavar='KEY VALUE',
help='Parameterize the test with a key/value pair.')
parser.add_option('--redirect_url', dest='map_redirects', action='append',
type='string', nargs=2, default=[],
metavar='DEST SRC',
help='Add a redirect to the HTTP server, '
'requests for SRC will result in a redirect (302) to DEST.')
parser.add_option('-f', '--file', dest='files', action='append',
type='string', default=[],
metavar='FILENAME',
help='Add a file to serve from the HTTP server, to be '
'made visible in the root directory. '
'"--file path/to/foo.html" is equivalent to '
'"--map_file foo.html path/to/foo.html"')
parser.add_option('--mime_type', dest='mime_types', action='append',
type='string', nargs=2, default=[], metavar='DEST SRC',
help='Map file extension SRC to MIME type DEST when '
'serving it from the HTTP server.')
parser.add_option('-u', '--url', dest='url', action='store',
type='string', default=None,
help='The webpage to load.')
parser.add_option('--ppapi_plugin', dest='ppapi_plugin', action='store',
type='string', default=None,
help='Use the browser plugin located here.')
parser.add_option('--ppapi_plugin_mimetype', dest='ppapi_plugin_mimetype',
action='store', type='string', default='application/x-nacl',
help='Associate this mimetype with the browser plugin. '
'Unused if --ppapi_plugin is not specified.')
parser.add_option('--sel_ldr', dest='sel_ldr', action='store',
type='string', default=None,
help='Use the sel_ldr located here.')
parser.add_option('--sel_ldr_bootstrap', dest='sel_ldr_bootstrap',
action='store', type='string', default=None,
help='Use the bootstrap loader located here.')
parser.add_option('--irt_library', dest='irt_library', action='store',
type='string', default=None,
help='Use the integrated runtime (IRT) library '
'located here.')
parser.add_option('--interactive', dest='interactive', action='store_true',
default=False, help='Do not quit after testing is done. '
'Handy for iterative development. Disables timeout.')
parser.add_option('--debug', dest='debug', action='store_true', default=False,
help='Request debugging output from browser.')
parser.add_option('--timeout', dest='timeout', action='store', type='float',
default=5.0,
help='The maximum amount of time to wait, in seconds, for '
'the browser to make a request. The timer resets with each '
'request.')
parser.add_option('--hard_timeout', dest='hard_timeout', action='store',
type='float', default=None,
help='The maximum amount of time to wait, in seconds, for '
'the entire test. This will kill runaway tests. ')
parser.add_option('--allow_404', dest='allow_404', action='store_true',
default=False,
help='Allow 404s to occur without failing the test.')
parser.add_option('-b', '--bandwidth', dest='bandwidth', action='store',
type='float', default='0.0',
help='The amount of bandwidth (megabits / second) to '
'simulate between the client and the server. This used for '
'replies with file payloads. All other responses are '
'assumed to be short. Bandwidth values <= 0.0 are assumed '
'to mean infinite bandwidth.')
parser.add_option('--extension', dest='browser_extensions', action='append',
type='string', default=[],
help='Load the browser extensions located at the list of '
'paths. Note: this currently only works with the Chrome '
'browser.')
parser.add_option('--tool', dest='tool', action='store',
type='string', default=None,
help='Run tests under a tool.')
parser.add_option('--browser_flag', dest='browser_flags', action='append',
type='string', default=[],
help='Additional flags for the chrome command.')
parser.add_option('--enable_ppapi_dev', dest='enable_ppapi_dev',
action='store', type='int', default=1,
help='Enable/disable PPAPI Dev interfaces while testing.')
parser.add_option('--nacl_exe_stdin', dest='nacl_exe_stdin',
type='string', default=None,
help='Redirect standard input of NaCl executable.')
parser.add_option('--nacl_exe_stdout', dest='nacl_exe_stdout',
type='string', default=None,
help='Redirect standard output of NaCl executable.')
parser.add_option('--nacl_exe_stderr', dest='nacl_exe_stderr',
type='string', default=None,
help='Redirect standard error of NaCl executable.')
parser.add_option('--expect_browser_process_crash',
dest='expect_browser_process_crash',
action='store_true',
help='Do not signal a failure if the browser process '
'crashes')
parser.add_option('--enable_crash_reporter', dest='enable_crash_reporter',
action='store_true', default=False,
help='Force crash reporting on.')
parser.add_option('--enable_sockets', dest='enable_sockets',
action='store_true', default=False,
help='Pass --allow-nacl-socket-api=<host> to Chrome, where '
'<host> is the name of the browser tester\'s web server.')
return parser
def ProcessToolLogs(options, logs_dir):
if options.tool == 'memcheck':
analyzer = memcheck_analyze.MemcheckAnalyzer('', use_gdb=True)
logs_wildcard = 'xml.*'
elif options.tool == 'tsan':
analyzer = tsan_analyze.TsanAnalyzer(use_gdb=True)
logs_wildcard = 'log.*'
files = glob.glob(os.path.join(logs_dir, logs_wildcard))
retcode = analyzer.Report(files, options.url)
return retcode
# An exception that indicates possible flake.
class RetryTest(Exception):
pass
def DumpNetLog(netlog):
sys.stdout.write('\n')
if not os.path.isfile(netlog):
sys.stdout.write('Cannot find netlog, did Chrome actually launch?\n')
else:
sys.stdout.write('Netlog exists (%d bytes).\n' % os.path.getsize(netlog))
sys.stdout.write('Dumping it to stdout.\n\n\n')
sys.stdout.write(open(netlog).read())
sys.stdout.write('\n\n\n')
# Try to discover the real IP address of this machine. If we can't figure it
# out, fall back to localhost.
# A windows bug makes using the loopback interface flaky in rare cases.
# http://code.google.com/p/chromium/issues/detail?id=114369
def GetHostName():
host = 'localhost'
try:
host = socket.gethostbyname(socket.gethostname())
except Exception:
pass
if host == '0.0.0.0':
host = 'localhost'
return host
def RunTestsOnce(url, options):
# Set the default here so we're assured hard_timeout will be defined.
# Tests, such as run_inbrowser_trusted_crash_in_startup_test, may not use the
# RunFromCommand line entry point - and otherwise get stuck in an infinite
# loop when something goes wrong and the hard timeout is not set.
# http://code.google.com/p/chromium/issues/detail?id=105406
if options.hard_timeout is None:
options.hard_timeout = options.timeout * 4
options.files.append(os.path.join(script_dir, 'browserdata', 'nacltest.js'))
# Setup the environment with the setuid sandbox path.
test_env.enable_sandbox_if_required(os.environ)
# Create server
host = GetHostName()
try:
server = browsertester.server.Create(host, options.port)
except Exception:
sys.stdout.write('Could not bind %r, falling back to localhost.\n' % host)
server = browsertester.server.Create('localhost', options.port)
# If port 0 has been requested, an arbitrary port will be bound so we need to
# query it. Older version of Python do not set server_address correctly when
# The requested port is 0 so we need to break encapsulation and query the
# socket directly.
host, port = server.socket.getsockname()
file_mapping = dict(options.map_files)
for filename in options.files:
file_mapping[os.path.basename(filename)] = filename
for server_path, real_path in file_mapping.iteritems():
if not os.path.exists(real_path):
raise AssertionError('\'%s\' does not exist.' % real_path)
mime_types = {}
for ext, mime_type in options.mime_types:
mime_types['.' + ext] = mime_type
def ShutdownCallback():
server.TestingEnded()
close_browser = options.tool is not None and not options.interactive
return close_browser
listener = browsertester.rpclistener.RPCListener(ShutdownCallback)
server.Configure(file_mapping,
dict(options.map_redirects),
mime_types,
options.allow_404,
options.bandwidth,
listener,
options.serving_dirs,
options.output_dir)
browser = browsertester.browserlauncher.ChromeLauncher(options)
full_url = 'http://%s:%d/%s' % (host, port, url)
if len(options.test_args) > 0:
full_url += '?' + urllib.urlencode(options.test_args)
browser.Run(full_url, host, port)
server.TestingBegun(0.125)
# In Python 2.5, server.handle_request may block indefinitely. Serving pages
# is done in its own thread so the main thread can time out as needed.
def Serve():
while server.test_in_progress or options.interactive:
server.handle_request()
thread.start_new_thread(Serve, ())
tool_failed = False
time_started = time.time()
def HardTimeout(total_time):
return total_time >= 0.0 and time.time() - time_started >= total_time
try:
while server.test_in_progress or options.interactive:
if not browser.IsRunning():
if options.expect_browser_process_crash:
break
listener.ServerError('Browser process ended during test '
'(return code %r)' % browser.GetReturnCode())
# If Chrome exits prematurely without making a single request to the
# web server, this is probally a Chrome crash-on-launch bug not related
# to the test at hand. Retry, unless we're in interactive mode. In
# interactive mode the user may manually close the browser, so don't
# retry (it would just be annoying.)
if not server.received_request and not options.interactive:
raise RetryTest('Chrome failed to launch.')
else:
break
elif not options.interactive and server.TimedOut(options.timeout):
js_time = server.TimeSinceJSHeartbeat()
err = 'Did not hear from the test for %.1f seconds.' % options.timeout
err += '\nHeard from Javascript %.1f seconds ago.' % js_time
if js_time > 2.0:
err += '\nThe renderer probably hung or crashed.'
else:
err += '\nThe test probably did not get a callback that it expected.'
listener.ServerError(err)
if not server.received_request:
raise RetryTest('Chrome hung before running the test.')
break
elif not options.interactive and HardTimeout(options.hard_timeout):
listener.ServerError('The test took over %.1f seconds. This is '
'probably a runaway test.' % options.hard_timeout)
break
else:
# If Python 2.5 support is dropped, stick server.handle_request() here.
time.sleep(0.125)
if options.tool:
sys.stdout.write('##################### Waiting for the tool to exit\n')
browser.WaitForProcessDeath()
sys.stdout.write('##################### Processing tool logs\n')
tool_failed = ProcessToolLogs(options, browser.tool_log_dir)
finally:
try:
if listener.ever_failed and not options.interactive:
if not server.received_request:
sys.stdout.write('\nNo URLs were served by the test runner. It is '
'unlikely this test failure has anything to do with '
'this particular test.\n')
DumpNetLog(browser.NetLogName())
except Exception:
listener.ever_failed = 1
# Try to let the browser clean itself up normally before killing it.
sys.stdout.write('##################### Terminating the browser\n')
browser.WaitForProcessDeath()
if browser.IsRunning():
sys.stdout.write('##################### TERM failed, KILLING\n')
# Always call Cleanup; it kills the process, but also removes the
# user-data-dir.
browser.Cleanup()
# We avoid calling server.server_close() here because it causes
# the HTTP server thread to exit uncleanly with an EBADF error,
# which adds noise to the logs (though it does not cause the test
# to fail). server_close() does not attempt to tell the server
# loop to shut down before closing the socket FD it is
# select()ing. Since we are about to exit, we don't really need
# to close the socket FD.
if tool_failed:
return 2
elif listener.ever_failed:
return 1
else:
return 0
# This is an entrypoint for tests that treat the browser tester as a Python
# library rather than an opaque script.
# (e.g. run_inbrowser_trusted_crash_in_startup_test)
def Run(url, options):
result = 1
attempt = 1
while True:
try:
result = RunTestsOnce(url, options)
if result:
# Currently (2013/11/15) nacl_integration is fairly flaky and there is
# not enough time to look into it. Retry if the test fails for any
# reason. Note that in general this test runner tries to only retry
# when a known flake is encountered. (See the other raise
# RetryTest(..)s in this file.) This blanket retry means that those
# other cases could be removed without changing the behavior of the test
# runner, but it is hoped that this blanket retry will eventually be
# unnecessary and subsequently removed. The more precise retries have
# been left in place to preserve the knowledge.
raise RetryTest('HACK retrying failed test.')
break
except RetryTest:
# Only retry once.
if attempt < 2:
sys.stdout.write('\n@@@STEP_WARNINGS@@@\n')
sys.stdout.write('WARNING: suspected flake, retrying test!\n\n')
attempt += 1
continue
else:
sys.stdout.write('\nWARNING: failed too many times, not retrying.\n\n')
result = 1
break
return result
def RunFromCommandLine():
parser = BuildArgParser()
options, args = parser.parse_args()
if len(args) != 0:
print args
parser.error('Invalid arguments')
# Validate the URL
url = options.url
if url is None:
parser.error('Must specify a URL')
return Run(url, options)
if __name__ == '__main__':
sys.exit(RunFromCommandLine())
|
shubhamgupta123/erpnext | refs/heads/master | erpnext/hr/doctype/leave_encashment/test_leave_encashment.py | 10 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import today, add_months
from erpnext.hr.doctype.employee.test_employee import make_employee
from erpnext.hr.doctype.salary_structure.test_salary_structure import make_salary_structure
from erpnext.hr.doctype.leave_period.test_leave_period import create_leave_period
test_dependencies = ["Leave Type"]
class TestLeaveEncashment(unittest.TestCase):
def setUp(self):
frappe.db.sql('''delete from `tabLeave Period`''')
def test_leave_balance_value_and_amount(self):
employee = "[email protected]"
leave_type = "_Test Leave Type Encashment"
# create the leave policy
leave_policy = frappe.get_doc({
"doctype": "Leave Policy",
"leave_policy_details": [{
"leave_type": leave_type,
"annual_allocation": 10
}]
}).insert()
leave_policy.submit()
# create employee, salary structure and assignment
employee = make_employee(employee)
frappe.db.set_value("Employee", employee, "leave_policy", leave_policy.name)
salary_structure = make_salary_structure("Salary Structure for Encashment", "Monthly", employee,
other_details={"leave_encashment_amount_per_day": 50})
# create the leave period and assign the leaves
leave_period = create_leave_period(add_months(today(), -3), add_months(today(), 3))
leave_period.grant_leave_allocation(employee=employee)
leave_encashment = frappe.get_doc(dict(
doctype = 'Leave Encashment',
employee = employee,
leave_type = leave_type,
leave_period = leave_period.name,
payroll_date = today()
)).insert()
self.assertEqual(leave_encashment.leave_balance, 10)
self.assertEqual(leave_encashment.encashable_days, 5)
self.assertEqual(leave_encashment.encashment_amount, 250)
leave_encashment.submit()
self.assertTrue(frappe.db.get_value("Leave Encashment", leave_encashment.name, "additional_salary"))
|
felixbuenemann/sentry | refs/heads/master | src/sentry/api/endpoints/project_tagkey_values.py | 6 | from __future__ import absolute_import
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import TagKey, TagKeyStatus, TagValue
from sentry.utils.db import is_postgres
class ProjectTagKeyValuesEndpoint(ProjectEndpoint):
doc_section = DocSection.PROJECTS
def get(self, request, project, key):
"""
List a Tag's Values
```````````````````
Return a list of values associated with this key. The `query`
parameter can be used to to perform a "starts with" match on
values.
:pparam string organization_slug: the slug of the organization.
:pparam string project_slug: the slug of the project.
:pparam string key: the tag key to look up.
:auth: required
"""
if key in ('release', 'user', 'filename', 'function'):
lookup_key = 'sentry:{0}'.format(key)
else:
lookup_key = key
try:
tagkey = TagKey.objects.get(
project=project,
key=lookup_key,
status=TagKeyStatus.VISIBLE,
)
except TagKey.DoesNotExist:
raise ResourceDoesNotExist
base_queryset = TagValue.objects.filter(
project=project,
key=tagkey.key,
)
query = request.GET.get('query')
if query:
if is_postgres():
# not quite optimal, but best we can do with ORM
queryset = TagValue.objects.filter(
id__in=base_queryset.order_by('-times_seen')[:10000]
)
else:
# MySQL can't handle an `IN` with a `LIMIT` clause
queryset = base_queryset
queryset = queryset.filter(value__istartswith=query)
else:
queryset = TagValue.objects.filter(
project=project,
key=tagkey.key,
)
return self.paginate(
request=request,
queryset=queryset,
order_by='-times_seen',
on_results=lambda x: serialize(x, request.user),
)
|
Cressidai/robotframework-selenium2library | refs/heads/master | demo/package.py | 64 | #!/usr/bin/env python
import os, sys
from time import localtime
from zipfile import ZipFile, ZIP_DEFLATED
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
execfile(os.path.join(THIS_DIR, '..', 'src', 'Selenium2Library', 'version.py'))
FILES = {
'': ['rundemo.py'],
'login_tests': ['valid_login.txt', 'invalid_login.txt', 'resource.txt'],
'demoapp': ['server.py'],
'demoapp/html': ['index.html', 'welcome.html', 'error.html', 'demo.css']
}
def main():
cwd = os.getcwd()
try:
os.chdir(THIS_DIR)
name = 'robotframework-selenium2library-%s-demo' % VERSION
zipname = '%s.zip' % name
if os.path.exists(zipname):
os.remove(zipname)
zipfile = ZipFile(zipname, 'w', ZIP_DEFLATED)
for dirname in FILES:
for filename in FILES[dirname]:
path = os.path.join('.', dirname.replace('/', os.sep), filename)
print 'Adding: ', os.path.normpath(path)
zipfile.write(path, os.path.join(name, path))
zipfile.close()
target_path = os.path.join('..', 'dist', zipname)
if os.path.exists(target_path):
os.remove(target_path)
os.rename(zipname, target_path)
print 'Created: ', os.path.abspath(target_path)
finally:
os.chdir(cwd)
if __name__ == '__main__':
main()
|
cmcerove/pyvxl | refs/heads/develop | pyvxl/tests/conftest.py | 1 | #!/usr/bin/env python
"""Configuration file for tests in this folder.
pytest API reference: https://docs.pytest.org/en/latest/reference.html
hooks: https://docs.pytest.org/en/latest/reference.html#hooks
"""
import logging
from os import path, remove
from time import sleep
from glob import glob
def pytest_sessionstart(session):
"""Called before any tests are executed."""
# Remove old log files so tests can't reference them accidentally
tests_dir = path.dirname(path.realpath(__file__))
for log_file in glob(path.join(tests_dir, '*.asc')):
for tries in range(5):
try:
remove(log_file)
except PermissionError:
sleep(1)
else:
break
def pytest_sessionfinish(session, exitstatus):
"""Called after all tests have finished executing."""
# Remove log files if all tests pass
if not exitstatus:
tests_dir = path.dirname(path.realpath(__file__))
for log_file in glob(path.join(tests_dir, '*.asc')):
for tries in range(5):
try:
remove(log_file)
except PermissionError:
sleep(1)
else:
break
else:
logging.error('Failed deleteing logs after 5s')
# with open('session_finish', 'w') as f:
# f.write('{} - {}'.format(time(), dir(session), exitstatus))
'''
pytest_collection_modifyitems(session, config, items):
"""Called after collection has been performed.
May filter or re-order the items in-place.
Parameters:
session (_pytest.main.Session) - the pytest session object
config (_pytest.config.Config) - pytest config object
items (List[_pytest.nodes.Item]) - list of item objects
"""
'''
|
Lochlan/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/comments.py | 148 | # Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# A tool for automating dealing with bugzilla, posting patches, committing
# patches, etc.
from webkitpy.common.config import urls
def bug_comment_from_svn_revision(svn_revision):
return "Committed r%s: <%s>" % (svn_revision, urls.view_revision_url(svn_revision))
def bug_comment_from_commit_text(scm, commit_text):
svn_revision = scm.svn_revision_from_commit_text(commit_text)
return bug_comment_from_svn_revision(svn_revision)
|
baylee/django | refs/heads/master | django/contrib/gis/db/backends/postgis/features.py | 345 | from django.contrib.gis.db.backends.base.features import BaseSpatialFeatures
from django.db.backends.postgresql.features import \
DatabaseFeatures as Psycopg2DatabaseFeatures
class DatabaseFeatures(BaseSpatialFeatures, Psycopg2DatabaseFeatures):
supports_3d_storage = True
supports_3d_functions = True
supports_left_right_lookups = True
supports_raster = True
|
sveetch/cmsplugin-feedparser | refs/heads/master | cmsplugin_feedparser/south_migrations/0001_initial.py | 1 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FeedparserPluginModel'
db.create_table(u'cmsplugin_feedparser_feedparserpluginmodel', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('renderer', self.gf('django.db.models.fields.CharField')(default='basic-xml', max_length=100)),
('template', self.gf('django.db.models.fields.CharField')(max_length=100)),
('expiration', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal(u'cmsplugin_feedparser', ['FeedparserPluginModel'])
def backwards(self, orm):
# Deleting model 'FeedparserPluginModel'
db.delete_table(u'cmsplugin_feedparser_feedparserpluginmodel')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'cmsplugin_feedparser.feedparserpluginmodel': {
'Meta': {'object_name': 'FeedparserPluginModel', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'expiration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'renderer': ('django.db.models.fields.CharField', [], {'default': "'basic-xml'", 'max_length': '100'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['cmsplugin_feedparser'] |
ethantang95/DIGITS | refs/heads/master | digits/dataset/tasks/analyze_db.py | 6 | # Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os.path
import re
import sys
import digits
from digits.task import Task
from digits.utils import subclass, override
# NOTE: Increment this every time the pickled object
PICKLE_VERSION = 1
@subclass
class AnalyzeDbTask(Task):
"""
Reads information from a database
"""
def __init__(self, database, purpose, **kwargs):
"""
Arguments:
database -- path to the database to analyze
purpose -- what is this database going to be used for
Keyword arguments:
force_same_shape -- if True, enforce that every entry in the database has the same shape
"""
self.force_same_shape = kwargs.pop('force_same_shape', False)
super(AnalyzeDbTask, self).__init__(**kwargs)
self.pickver_task_analyzedb = PICKLE_VERSION
self.database = database
self.purpose = purpose
self.backend = 'lmdb'
# Results
self.image_count = None
self.image_width = None
self.image_height = None
self.image_channels = None
self.analyze_db_log_file = 'analyze_db_%s.log' % '-'.join(p.lower() for p in self.purpose.split())
def __getstate__(self):
state = super(AnalyzeDbTask, self).__getstate__()
if 'analyze_db_log' in state:
del state['analyze_db_log']
return state
def __setstate__(self, state):
super(AnalyzeDbTask, self).__setstate__(state)
if not hasattr(self, 'backend') or self.backend is None:
self.backend = 'lmdb'
@override
def name(self):
return 'Analyze DB (%s)' % (self.purpose)
@override
def html_id(self):
return 'task-analyze-db-%s' % '-'.join(p.lower() for p in self.purpose.split())
@override
def offer_resources(self, resources):
key = 'analyze_db_task_pool'
if key not in resources:
return None
for resource in resources[key]:
if resource.remaining() >= 1:
return {key: [(resource.identifier, 1)]}
return None
@override
def task_arguments(self, resources, env):
args = [sys.executable, os.path.join(
os.path.dirname(os.path.abspath(digits.__file__)),
'tools', 'analyze_db.py'),
self.database,
]
if self.force_same_shape:
args.append('--force-same-shape')
else:
args.append('--only-count')
return args
@override
def before_run(self):
super(AnalyzeDbTask, self).before_run()
self.analyze_db_log = open(self.path(self.analyze_db_log_file), 'a')
@override
def process_output(self, line):
self.analyze_db_log.write('%s\n' % line)
self.analyze_db_log.flush()
timestamp, level, message = self.preprocess_output_digits(line)
if not message:
return False
# progress
match = re.match(r'Progress: (\d+)\/(\d+)', message)
if match:
self.progress = float(match.group(1)) / float(match.group(2))
self.emit_progress_update()
return True
# total count
match = re.match(r'Total entries: (\d+)', message)
if match:
self.image_count = int(match.group(1))
return True
# image dimensions
match = re.match(r'(\d+) entries found with shape ((\d+)x(\d+)x(\d+))', message)
if match:
# count = int(match.group(1))
dims = match.group(2)
self.image_width = int(match.group(3))
self.image_height = int(match.group(4))
self.image_channels = int(match.group(5))
self.logger.debug('Images are %s' % dims)
return True
if level == 'warning':
self.logger.warning('%s: %s' % (self.name(), message))
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
return True
@override
def after_run(self):
super(AnalyzeDbTask, self).after_run()
self.analyze_db_log.close()
def image_type(self):
"""
Returns an easy-to-read version of self.image_channels
"""
if self.image_channels is None:
return None
elif self.image_channels == 1:
return 'GRAYSCALE'
elif self.image_channels == 3:
return 'COLOR'
else:
return '%s-channel' % self.image_channels
|
cuong-nguyenduy/django-learning | refs/heads/master | blog_project/mysite/blog/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
rajadhva/servo | refs/heads/master | tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_serializer.py | 451 | from __future__ import absolute_import, division, unicode_literals
import json
import unittest
from .support import get_data_files
try:
unittest.TestCase.assertEqual
except AttributeError:
unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
import html5lib
from html5lib import constants
from html5lib.serializer import HTMLSerializer, serialize
from html5lib.treewalkers._base import TreeWalker
optionals_loaded = []
try:
from lxml import etree
optionals_loaded.append("lxml")
except ImportError:
pass
default_namespace = constants.namespaces["html"]
class JsonWalker(TreeWalker):
def __iter__(self):
for token in self.tree:
type = token[0]
if type == "StartTag":
if len(token) == 4:
namespace, name, attrib = token[1:4]
else:
namespace = default_namespace
name, attrib = token[1:3]
yield self.startTag(namespace, name, self._convertAttrib(attrib))
elif type == "EndTag":
if len(token) == 3:
namespace, name = token[1:3]
else:
namespace = default_namespace
name = token[1]
yield self.endTag(namespace, name)
elif type == "EmptyTag":
if len(token) == 4:
namespace, name, attrib = token[1:]
else:
namespace = default_namespace
name, attrib = token[1:]
for token in self.emptyTag(namespace, name, self._convertAttrib(attrib)):
yield token
elif type == "Comment":
yield self.comment(token[1])
elif type in ("Characters", "SpaceCharacters"):
for token in self.text(token[1]):
yield token
elif type == "Doctype":
if len(token) == 4:
yield self.doctype(token[1], token[2], token[3])
elif len(token) == 3:
yield self.doctype(token[1], token[2])
else:
yield self.doctype(token[1])
else:
raise ValueError("Unknown token type: " + type)
def _convertAttrib(self, attribs):
"""html5lib tree-walkers use a dict of (namespace, name): value for
attributes, but JSON cannot represent this. Convert from the format
in the serializer tests (a list of dicts with "namespace", "name",
and "value" as keys) to html5lib's tree-walker format."""
attrs = {}
for attrib in attribs:
name = (attrib["namespace"], attrib["name"])
assert(name not in attrs)
attrs[name] = attrib["value"]
return attrs
def serialize_html(input, options):
options = dict([(str(k), v) for k, v in options.items()])
stream = JsonWalker(input)
serializer = HTMLSerializer(alphabetical_attributes=True, **options)
return serializer.render(stream, options.get("encoding", None))
def runSerializerTest(input, expected, options):
encoding = options.get("encoding", None)
if encoding:
encode = lambda x: x.encode(encoding)
expected = list(map(encode, expected))
result = serialize_html(input, options)
if len(expected) == 1:
assert expected[0] == result, "Expected:\n%s\nActual:\n%s\nOptions:\n%s" % (expected[0], result, str(options))
elif result not in expected:
assert False, "Expected: %s, Received: %s" % (expected, result)
class EncodingTestCase(unittest.TestCase):
def throwsWithLatin1(self, input):
self.assertRaises(UnicodeEncodeError, serialize_html, input, {"encoding": "iso-8859-1"})
def testDoctypeName(self):
self.throwsWithLatin1([["Doctype", "\u0101"]])
def testDoctypePublicId(self):
self.throwsWithLatin1([["Doctype", "potato", "\u0101"]])
def testDoctypeSystemId(self):
self.throwsWithLatin1([["Doctype", "potato", "potato", "\u0101"]])
def testCdataCharacters(self):
runSerializerTest([["StartTag", "http://www.w3.org/1999/xhtml", "style", {}], ["Characters", "\u0101"]],
["<style>ā"], {"encoding": "iso-8859-1"})
def testCharacters(self):
runSerializerTest([["Characters", "\u0101"]],
["ā"], {"encoding": "iso-8859-1"})
def testStartTagName(self):
self.throwsWithLatin1([["StartTag", "http://www.w3.org/1999/xhtml", "\u0101", []]])
def testEmptyTagName(self):
self.throwsWithLatin1([["EmptyTag", "http://www.w3.org/1999/xhtml", "\u0101", []]])
def testAttributeName(self):
self.throwsWithLatin1([["StartTag", "http://www.w3.org/1999/xhtml", "span", [{"namespace": None, "name": "\u0101", "value": "potato"}]]])
def testAttributeValue(self):
runSerializerTest([["StartTag", "http://www.w3.org/1999/xhtml", "span",
[{"namespace": None, "name": "potato", "value": "\u0101"}]]],
["<span potato=ā>"], {"encoding": "iso-8859-1"})
def testEndTagName(self):
self.throwsWithLatin1([["EndTag", "http://www.w3.org/1999/xhtml", "\u0101"]])
def testComment(self):
self.throwsWithLatin1([["Comment", "\u0101"]])
if "lxml" in optionals_loaded:
class LxmlTestCase(unittest.TestCase):
def setUp(self):
self.parser = etree.XMLParser(resolve_entities=False)
self.treewalker = html5lib.getTreeWalker("lxml")
self.serializer = HTMLSerializer()
def testEntityReplacement(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>"""
tree = etree.fromstring(doc, parser=self.parser).getroottree()
result = serialize(tree, tree="lxml", omit_optional_tags=False)
self.assertEqual("""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>\u03B2</html>""", result)
def testEntityXML(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>></html>"""
tree = etree.fromstring(doc, parser=self.parser).getroottree()
result = serialize(tree, tree="lxml", omit_optional_tags=False)
self.assertEqual("""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>></html>""", result)
def testEntityNoResolve(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>"""
tree = etree.fromstring(doc, parser=self.parser).getroottree()
result = serialize(tree, tree="lxml", omit_optional_tags=False,
resolve_entities=False)
self.assertEqual("""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>""", result)
def test_serializer():
for filename in get_data_files('serializer', '*.test'):
with open(filename) as fp:
tests = json.load(fp)
for index, test in enumerate(tests['tests']):
yield runSerializerTest, test["input"], test["expected"], test.get("options", {})
|
duanhjlt/gyp | refs/heads/master | test/gyp-defines/gyptest-regyp.py | 268 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that when the same value is repeated for a gyp define, duplicates are
stripped from the regeneration rule.
"""
import os
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
os.environ['GYP_DEFINES'] = 'key=repeated_value key=value1 key=repeated_value'
test.run_gyp('defines.gyp')
test.build('defines.gyp')
# The last occurrence of a repeated set should take precedence over other
# values. See gyptest-multiple-values.py.
test.must_contain('action.txt', 'repeated_value')
# So the regeneration rule needs to use the correct order.
test.must_not_contain(
'Makefile', '"-Dkey=repeated_value" "-Dkey=value1" "-Dkey=repeated_value"')
test.must_contain('Makefile', '"-Dkey=value1" "-Dkey=repeated_value"')
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
os.utime("defines.gyp", None)
test.build('defines.gyp')
test.must_contain('action.txt', 'repeated_value')
test.pass_test()
|
Alwnikrotikz/stoqs | refs/heads/master | loaders/BEDS/loadBEDS_2013.py | 5 | #!/usr/bin/env python
__author__ = 'Mike McCann'
__copyright__ = '2013'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
Master loader for all BEDS deployments.
The default is to load data with a stride of 1 into a database named stoqs_beds2013.
Execute with "./loadBEDS_2013.py 10 stoqs_beds2013" to load with a stride of 10.
Mike McCann
MBARI 13 May 2013
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime
os.environ['DJANGO_SETTINGS_MODULE']='settings'
project_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../")) # settings.py is one dir up
from BEDS import BEDSLoader
bl = BEDSLoader('stoqs_beds2013', 'BEDS - 2013',
x3dTerrains= {
'http://dods.mbari.org/terrain/x3d/MontereyCanyonBeds_1m+5m_1x/MontereyCanyonBeds_1m+5m_1x.x3d': {
'position': '-2706054.97556 -4352297.32558 3798919.71875',
'orientation': '0.92863 -0.26237 -0.26231 1.59089',
'centerOfRotation': '-2700040.0076912297 -4342439.858864189 3798898.2847731174',
'VerticalExaggeration': '1',
},
##'/stoqs/static/x3d/Monterey25/Monterey25_10x-pop.x3d': {
## 'position': '-2822317.31255 -4438600.53640 3786150.85474',
## 'orientation': '0.89575 -0.31076 -0.31791 1.63772',
## 'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
## 'VerticalExaggeration': '10',
##}
}
)
# Base OPeNDAP server
bl.tdsBase = 'http://odss-test.shore.mbari.org/thredds/'
bl.dodsBase = bl.tdsBase + 'dodsC/'
# Files created by bed2nc.py from the BEDS SVN BEDS repository
bl.bed_base = bl.dodsBase + 'BEDS_2013/beds01/'
##bl.bed_files = ['BED%05d.nc' % i for i in range(1,234)]
bl.bed_files = ['BED00001.nc', 'BED00002.nc', 'BED00003.nc', 'BED00005.nc',
'BED00006.nc', 'BED00008.nc', 'BED00014.nc', 'BED00015.nc',
'BED00017.nc', 'BED00018.nc', 'BED00020.nc', 'BED00026.nc',
'BED00038.nc', 'BED00039.nc', 'BED00040.nc', 'BED00041.nc',
'BED00042.nc', 'BED00043.nc', 'BED00044.nc', 'BED00046.nc',
'BED00047.nc', 'BED00048.nc', 'BED00049.nc', 'BED00062.nc',
'BED00082.nc', 'BED00083.nc', 'BED00084.nc', 'BED00085.nc',
'BED00086.nc', 'BED00087.nc', 'BED00088.nc', 'BED00089.nc',
'BED00090.nc', 'BED00092.nc', 'BED00093.nc', 'BED00094.nc',
'BED00095.nc', 'BED00096.nc', 'BED00097.nc', 'BED00098.nc',
'BED00100.nc', 'BED00101.nc', 'BED00102.nc', 'BED00103.nc',
'BED00104.nc', 'BED00106.nc', 'BED00107.nc', 'BED00108.nc',
'BED00109.nc', 'BED00110.nc', 'BED00111.nc', 'BED00112.nc',
'BED00113.nc', 'BED00114.nc', 'BED00115.nc', 'BED00116.nc',
'BED00117.nc', 'BED00118.nc', 'BED00123.nc', 'BED00124.nc',
'BED00125.nc', 'BED00126.nc', 'BED00127.nc', 'BED00129.nc',
'BED00130.nc', 'BED00131.nc', 'BED00132.nc', 'BED00133.nc',
'BED00136.nc', 'BED00137.nc', 'BED00138.nc', 'BED00139.nc',
'BED00142.nc', 'BED00143.nc', 'BED00144.nc', 'BED00146.nc',
'BED00148.nc', 'BED00149.nc', 'BED00151.nc', 'BED00152.nc',
'BED00154.nc', 'BED00155.nc', 'BED00156.nc', 'BED00157.nc',
'BED00158.nc', 'BED00159.nc', 'BED00160.nc', 'BED00161.nc',
'BED00162.nc', 'BED00163.nc', 'BED00164.nc', 'BED00166.nc',
'BED00167.nc', 'BED00169.nc', 'BED00170.nc', 'BED00172.nc',
'BED00173.nc', 'BED00174.nc', 'BED00175.nc', 'BED00176.nc',
'BED00177.nc', 'BED00178.nc', 'BED00179.nc', 'BED00180.nc',
'BED00181.nc', 'BED00182.nc', 'BED00183.nc', 'BED00185.nc',
'BED00186.nc', 'BED00197.nc', 'BED00198.nc', 'BED00200.nc',
'BED00203.nc', 'BED00204.nc', 'BED00205.nc', 'BED00206.nc',
'BED00207.nc', 'BED00211.nc', 'BED00212.nc', 'BED00213.nc',
'BED00214.nc', 'BED00215.nc', 'BED00216.nc', 'BED00217.nc',
'BED00218.nc', 'BED00219.nc', 'BED00220.nc', 'BED00221.nc',
'BED00222.nc', 'BED00223.nc', 'BED00224.nc', 'BED00227.nc',
'BED00229.nc', 'BED00230.nc', 'BED00231.nc']
##bl.bed_parms = ['XA', 'XR', 'PRESS', 'BED_DEPTH']
bl.bed_parms = ['XA', 'YA', 'ZA', 'XR', 'YR', 'ZR', 'PRESS', 'BED_DEPTH']
# Execute the load
bl.process_command_line()
if bl.args.test:
bl.loadBEDS(stride=10)
elif bl.args.optimal_stride:
bl.loadBEDS(stride=1)
else:
bl.stride = bl.args.stride
bl.loadBEDS()
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
bl.addTerrainResources()
print "All Done."
|
future-architect/eclipse-uroborosql-formatter | refs/heads/master | jp.co.future.eclipse.usqlfmt/resources/python/uroborosqlfmt/filters.py | 1 | # coding:utf-8
'''
@author: ota
'''
import math
import re
import sys
from sqlparse import sql, tokens as T, utils
from sqlparse.filters import StripWhitespaceFilter, ReindentFilter
from uroborosqlfmt import tokenutils as tu, grouping
from uroborosqlfmt.exceptions import SqlFormatterException
from uroborosqlfmt.sql import Phrase
class StripWhitespaceAndToTabFilter(StripWhitespaceFilter):
"""
連続したwhitespaceを除去し、
Punctuation前後のwhitespaceも除去する
またwhitespaceはTab文字で統一する
"""
def process(self, stack, stmt, depth=0):
SqlFormatterException.wrap_try_except(
super(StripWhitespaceAndToTabFilter, self).process,
stmt,
stack,
stmt,
depth
)
def __custom_stripws_tokenlist(self, tlist):
"""
コメントとラインコメントの間のスペースを除去
"""
last_token = None
ws_tokens = []
for token in tlist.tokens[:]:
if tu.is_line_comment(token) and last_token and tu.is_comment(last_token):
for tws in ws_tokens:
tlist.tokens.remove(tws)
last_token = token
ws_tokens = []
elif token.is_whitespace():
ws_tokens.append(token)
else:
last_token = token
ws_tokens = []
def _stripws_default(self, tlist):
last_was_ws = False
last_ws_token = None
last_was_punctuation = False
for token in tlist.tokens[:]:
if token.is_whitespace():
if last_was_ws or last_was_punctuation: # 前tokenがwhitespaceまたはPunctuationの場合、空白を除去
tlist.tokens.remove(token)
continue
else:
token.value = "\t"
if tu.is_punctuation(token):
if last_ws_token:
tlist.tokens.remove(last_ws_token) # Punctuation前のwhitespaceを除去
last_was_ws = token.is_whitespace()
last_ws_token = token if last_was_ws else None
last_was_punctuation = tu.is_punctuation(token)
self.__custom_stripws_tokenlist(tlist)
def _stripws_identifierlist(self, tlist):
super(StripWhitespaceAndToTabFilter, self)._stripws_identifierlist(tlist)
self.__custom_stripws_tokenlist(tlist)
def _stripws_parenthesis(self, tlist):
super(StripWhitespaceAndToTabFilter, self)._stripws_parenthesis(tlist)
self.__custom_stripws_tokenlist(tlist)
class GroupFilter(object):
"""
グルーピング関連
"""
def process(self, _, stmt):
grouping.re_group(stmt)
grouping.group(stmt)
class LineDescriptionLineCommentFilter(object):
"""
ラインコメント行の説明を成しているか、次の説明を成しているか振り分ける
"""
class Comment(sql.Comment):
"""
置き換え用コメントクラスのラッパー
"""
__slots__ = ('is_line_description')
def __init__(self, local_config):
self.local_config = local_config
self.is_line_description = False
def process(self, _, stmt):
def custom_flaten(token):
"""
コメントはflatenしないflaten
"""
if isinstance(token, sql.TokenList) and not tu.is_comment(token):
for tkn in token.tokens:
for item in custom_flaten(tkn):
yield item
else:
yield token
is_prev_cr = True
for token in custom_flaten(stmt):
if tu.is_plain_line_comment(token, self.local_config.comment_syntax):
# コメントクラス置き換え
parent = token.parent
index = parent.tokens.index(token)
comment = LineDescriptionLineCommentFilter.Comment(token.tokens)
for tkn in token.tokens:
tkn.parent = comment
comment.parent = parent
parent.tokens[index] = comment
# フラグセット
comment.is_line_description = not is_prev_cr # pylint: disable=attribute-defined-outside-init
elif token.is_whitespace():
if is_inc_cr(token):
is_prev_cr = True
else:
is_prev_cr = False
class AdjustGroupFilter(object):
"""
グルーピング調整
"""
def __init__(self, local_config):
self.comment_syntax = local_config.comment_syntax
def process(self, _, stmt):
grouping.adj_group(stmt, self.comment_syntax)
class _LineObject(object):
def _rstrip(self, target_tokens, base_token):
for tkn in target_tokens[::-1]:
if tkn.is_whitespace():
base_token.tokens.remove(tkn)
target_tokens.remove(tkn)
else:
break
def _remove_indent(self, lines, indent):
for i, line in enumerate(lines):
for cval in line[:indent]:
if cval == "\t":
lines[i] = lines[i][1:]
else:
break
def _right_tokens_between(self, base_token, separetor, line_comment):
start = base_token.token_next(separetor) if separetor else base_token.tokens[0]
if not line_comment:
return base_token.tokens_between(start, base_token.tokens[-1])
else:
return base_token.tokens_between(start, line_comment, exclude_end=True)
def _get_linecomment(self, base_token, comment_syntax):
first_line_comment = None
for tkn in base_token.tokens[::-1]:
if tu.is_line_description_line_comment(tkn, comment_syntax):
first_line_comment = tkn
continue
if tkn.is_whitespace():
continue
if tu.is_comment(tkn):
continue
return first_line_comment
return None
class _BaseIdentifierObject(_LineObject):
"""
Identifier内部のインデント調整用のObject
"""
def __init__(self, token, indent, local_config):
self.token = token
self.center_token = None
self.line_comment = None
self.left_tokens = []
self.right_tokens = []
self.left_lines = []
self.right_lines = []
self.width_left = 0
self.width_right = 0
self.line_comment = self._get_linecomment(token, local_config.comment_syntax)
separetor = self._get_separetor_token(token)
if separetor:
self.center_token = self._get_center_token(token)
self.left_tokens = token.tokens_between(token.tokens[0], separetor, exclude_end=True)
self.right_tokens = self._right_tokens_between(token, separetor, self.line_comment)
self._rstrip(self.left_tokens, token)
self._rstrip(self.right_tokens, token)
lefts = utils.split_unquoted_newlines("".join([str(t) for t in self.left_tokens]))
rights = utils.split_unquoted_newlines("".join([str(t) for t in self.right_tokens]))
self._remove_indent(lefts, indent)
self._remove_indent(rights, indent)
self.width_left = get_need_tab_char_width(lefts[-1])
self.width_right = get_need_tab_char_width(rights[-1])
self.left_lines = lefts
self.right_lines = rights
else:
self.left_tokens = self._right_tokens_between(token, None, self.line_comment)
self._rstrip(self.left_tokens, token)
lefts = utils.split_unquoted_newlines("".join([str(t) for t in self.left_tokens]))
self._remove_indent(lefts, indent)
self.width_left = get_need_tab_char_width(lefts[-1])
self.left_lines = lefts
def _get_center_token(self, token):
pass
def _get_separetor_token(self, token):
pass
def __str__(self, *args, **kwargs):
left = "".join(str(t) for t in self.left_tokens)
right = "".join(str(t) for t in self.right_tokens)
comment = str(self.line_comment)
return "left:" + left + "\nright:" + right + "\ncomment:" + comment
class _IdentifierObject(_BaseIdentifierObject):
def _get_separetor_token(self, token):
alias = self._get_alias(token)
if alias:
as_token = token.token_next_match(0, T.Keyword, "AS")
if as_token:
return as_token
else:
return token.token_prev(alias, skip_ws=False)
return None
def _get_center_token(self, token):
return token.token_next_match(0, T.Keyword, "AS")
def _get_alias(self, token):
tkw = token.token_next_match(0, T.Keyword, 'AS')
if tkw is not None:
return tu.token_next_enable(token, tkw)
left = tu.token_next_enable(token)
if not left:
return None
def is_space(tkn):
return tkn.is_whitespace() and tkn.value
spl = token.token_matching(token.token_index(left), [is_space])
if spl:
return tu.token_next_enable(token, spl)
if tu.is_parenthesis(left):
tkn = tu.token_next_enable(token, left)
if tkn and (tu.is_identifier(tkn) or (tkn.ttype in T.Name)):
# (・・・)ALIAS の場合
space = sql.Token(T.Whitespace, "\t") # スペースを付与
token.insert_after(left, space)
return tkn
return None
class _UpdIdentifierObject(_BaseIdentifierObject):
def _get_separetor_token(self, token):
comp = token.token_next_match(0, T.Comparison, "=")
if comp:
return comp
else:
second = tu.token_next_enable(token, tu.token_next_enable(token))
return token.token_prev(second, skip_ws=False)
return None
def _get_center_token(self, token):
return token.token_next_match(0, T.Comparison, "=")
class _ComparisonObject(_LineObject):
"""
Comparison内部のインデント調整用のObject
"""
def __init__(self, token, indent, local_config):
self.token = token
self.line_comment = None
self.left_tokens = []
self.right_tokens = []
self.operator_tokens = []
self.left_lines = []
self.right_lines = []
self.operator_string = ""
self.width_left = 0
self.width_right = 0
self.line_comment = self._get_linecomment(token, local_config.comment_syntax)
op_tokens = tu.find_comparison_operator_words(token.tokens)
if len(op_tokens) > 1:
self.operator_tokens = token.tokens_between(op_tokens[0], op_tokens[1])
for tkn in self.operator_tokens[1:-1]:
if tkn.is_whitespace():
tkn.value = " "
else:
self.operator_tokens = op_tokens
for tkn in self.operator_tokens:
if tkn.is_whitespace():
tkn.value = " "
self.operator_string = "".join(str(tkn) for tkn in self.operator_tokens)
tws = token.token_next(self.operator_tokens[-1], skip_ws=False)
if tws.is_whitespace():
# 比較演算の隣の空白を削除
token.tokens.remove(tws)
self.left_tokens = token.tokens_between(token.tokens[0], self.operator_tokens[0], exclude_end=True)
self.right_tokens = self._right_tokens_between(token, self.operator_tokens[-1], self.line_comment)
self._rstrip(self.left_tokens, token)
self._rstrip(self.right_tokens, token)
lefts = utils.split_unquoted_newlines("".join([str(tkn) for tkn in self.left_tokens]))
rights = utils.split_unquoted_newlines("".join([str(tkn) for tkn in self.right_tokens]))
self._remove_indent(lefts, indent)
self._remove_indent(rights, indent)
self.width_left = get_need_tab_char_width(lefts[-1])
self.width_right = get_need_tab_char_width(rights[-1])
self.width_operator = get_need_tab_char_width(self.operator_string)
self.left_lines = lefts
self.right_lines = rights
def __str__(self, *args, **kwargs):
left = "".join(str(t) for t in self.left_tokens)
right = "".join(str(t) for t in self.right_tokens)
comment = str(self.line_comment)
operator = str(self.operator_string)
return "left:" + left + "\noperator:" + operator + "\nright:" + right + "\ncomment:" + comment
class OperatorFilter(object):
"""
比較演算子の統一
"""
def __init__(self):
self._process = SqlFormatterException.to_wrap_try_except(self._process, 0)
def process(self, _, stmt):
self._process(stmt)
def _process(self, tlist): # pylint: disable=method-hidden
[self._process(sgroup) for sgroup in tlist.get_sublists()]
for token in tlist.tokens:
if tu.is_operator(token) and token.value in ("<>", "^="):
token.value = "!="
class MoveCommaFilter(object):
"""
カンマ位置調整
"""
def __init__(self, local_config):
self._process = SqlFormatterException.to_wrap_try_except(self._process, 0)
self.local_config = local_config
def process(self, _, stmt):
self._process(stmt, [])
def _process(self, tlist, parents): # pylint: disable=method-hidden
tps = [tlist] + parents
[self._process(sgroup, tps) for sgroup in tlist.get_sublists()]
for token in tlist.tokens[:]:
if tu.is_comma(token):
nxt = tlist.token_next(token)
if nxt:
prv = tlist.token_prev(token)
comment = hit_first(
nxt,
lambda t: tu.is_line_description_line_comment(t, self.local_config.comment_syntax)
)
if comment and not (prv \
and hit_last(
prv,
lambda t: tu.is_line_description_line_comment(t, self.local_config.comment_syntax)
)
):
self.__move_token(tlist, token, comment)
def __move_token(self, parent, token_a, token_b):
if token_b in parent.tokens:
idxa = parent.tokens.index(token_a)
idxb = parent.tokens.index(token_b)
parent.tokens[idxa] = token_b
parent.tokens[idxb] = token_a
else:
def remove_token(parent, token):
if token in parent.tokens:
parent.tokens.remove(token)
return True
else:
for tkn in parent.tokens:
if tkn.is_group() and remove_token(tkn, token):
return True
return False
remove_token(parent, token_b)
parent.insert_before(token_a, token_b)
token_b.parent = parent
class CustomReindentFilter(ReindentFilter):
"""
インデント処理
"""
def __init__(self, local_config):
super(CustomReindentFilter, self).__init__(1, "\t")
self.local_config = local_config
def process(self, stack, stmt):
super(CustomReindentFilter, self).process(stack, stmt)
flatten = list(tu.flatten(stmt))
if not flatten:
return
if tu.is_semicolon_punctuation(flatten[-1]):
tcr = self.cr()
flatten[-1].parent.insert_before(flatten[-1], tcr)
flatten = flatten[:-1] + [tcr] + flatten[-1:]
# 重複した改行を削除
# まずは空白を結合
pre = flatten[0]
for token in flatten[1:]:
if token.is_whitespace():
if pre.is_whitespace():
pre.value += token.value
token.value = ""
continue
pre = token
# 重複した改行の除去
pre = None
for token in flatten:
if token.is_whitespace():
text = str(token)
if not text:
continue
white_lines = utils.split_unquoted_newlines(str(token))
while len(white_lines) > 2:
del white_lines[1]
if pre:
if len(white_lines) > 1 and pre.parent and tu.is_line_comment(pre.parent):
# 行コメントの後なら自分の改行を除去
del white_lines[0]
else:
if len(white_lines) > 1:
# 最初の改行を削除
del white_lines[0]
token.value = "\n".join(white_lines)
pre = token
def _process(self, tlist):
SqlFormatterException.wrap_try_except(super(CustomReindentFilter, self)._process, tlist)
def __within_with_section(self, token):
return tu.within_with_section(self._curr_stmt, token)
def __within_select_statement(self, token):
return tu.within_select_statement(self._curr_stmt, token)
def __within_insert_statement(self, token):
return tu.within_insert_statement(self._curr_stmt, token)
def __within_merge_statement(self, token):
return tu.within_merge_statement(self._curr_stmt, token)
def __within_update_statement(self, token):
return tu.within_update_statement(self._curr_stmt, token)
def __within_update_set_section(self, token):
return tu.within_update_set_section(self._curr_stmt, token)
def __within_insert_values_section(self, token):
return tu.within_insert_values_section(self._curr_stmt, token)
def __within_insert_into_columns_section(self, token):
return tu.within_insert_into_columns_section(self._curr_stmt, token)
def __custom_process_list(self, tlist):
for token in tlist.tokens[:]:
if tu.is_dml(token):
tlist.insert_before(token, self.nl())
tlist.insert_after(token, self.nl())
elif tu.is_from_keyword(token):
tlist.insert_before(token, self.nl()) # DELETEの場合(?)_split_kwdsで戻されてしまうため2つ改行する
tlist.insert_before(token, self.nl())
tlist.insert_after(token, self.cr())
elif tu.is_wildcard(token) or tu.is_literal(token) or tu.is_function(token):
prev = tlist.token_prev(token, skip_ws=False)
if prev and tu.is_param_comment(prev, token, self.local_config.comment_syntax):
target = prev
else:
target = token
prev = tu.token_prev_enable(tlist, target)
if prev and hit_last(prev, tu.is_dml):
tlist.insert_before(target, self.nl_with_indent(1))
else:
tlist.insert_before(target, self.indent_space())
elif tu.is_identifier(token) or tu.is_parenthesis(token):
prev = tu.token_prev_enable(tlist, token)
def is_need_indent(tkn):
if tu.is_from_keyword(tkn):
return True
if tu.is_by_keyword(tkn):
return True
if tu.is_select_dml(tkn):
return True
if tu.is_update_dml(tkn):
return True
if tu.is_into_keyword(tkn) and (
self.__within_insert_statement(tkn) \
or self.__within_merge_statement(tkn)
):
return True
if tu.is_using_keyword(tkn) and self.__within_merge_statement(tkn):
return True
if tu.is_set_keyword(tkn) and self.__within_update_statement(tkn):
return True
return False
if prev and hit_last(prev, is_need_indent):
self.offset += 1
self.__custom_process_identifier(tlist, token)
self.offset -= 1
else:
self.__custom_process_identifier(tlist, token)
elif tu.is_distinct_keyword(token):
tlist.insert_before(token, self.nl_with_indent(1))
elif tu.is_into_keyword(token) and (
self.__within_insert_statement(token) \
or self.__within_merge_statement(token)
):
tlist.insert_before(token, self.nl())
elif tu.is_using_keyword(token) and self.__within_merge_statement(token):
tlist.insert_before(token, self.nl())
elif tu.is_keyword(token) and tu.endswith_ignore_case(token.value, "COUNT"): # 念のため現状はCOUNTのみ処理
# keyword として扱われたidentifierを処理
prev = tu.token_prev_enable(tlist, token)
def is_need_indent(tkn):
if tu.is_from_keyword(tkn):
return True
if tu.is_by_keyword(tkn):
return True
if tu.is_select_dml(tkn):
return True
if tu.is_update_dml(tkn):
return True
if tu.is_into_keyword(tkn) and (
self.__within_insert_statement(tkn) \
or self.__within_merge_statement(tkn)
):
return True
if tu.is_using_keyword(tkn) and self.__within_merge_statement(tkn):
return True
if tu.is_set_keyword(tkn) and self.__within_update_statement(tkn):
return True
return False
if prev and hit_last(prev, is_need_indent):
self.offset += 1
tlist.insert_before(token, self.nl())
self.offset -= 1
def __custom_process_identifier(self, parent, identifier):
parent.insert_before(identifier, self.nl())
# 一つだけなら改行+インデント要らない
# if tu.is_line_comment(identifier.tokens[-1]):
# identifier.tokens[-1].tokens[-1].value = identifier.tokens[-1].tokens[-1].value.rstrip("\t")
# else:
# parent.insert_after(identifier, self.nl_with_indent(-1))
def __custom_process_line_comment(self, comment):
tcm = comment.token_next_by_type(0, T.Comment)
text = tcm.value[2:]
text = text.strip()
tcm.value = "-- " + text + "\n"
def __custom_process_block_comment(self, comment, is_hint):
start = comment.token_matching(0, [lambda t: t.ttype in T.Comment and t.value == "/*"])
end = comment.token_matching(
comment.token_index(start) + 1,
[lambda t: t.ttype in T.Comment and t.value == "*/"]
)
tokens = comment.tokens_between(start, end)[1:-1]
if not tokens:
return
comment.insert_before(comment.tokens[0], self.nl())
text_token = tokens[0]
while len(tokens) > 1:
# コメントのノードが分かれていたら1つに結合する
tgt = tokens[1]
text_token.value += tgt.value
comment.tokens.remove(tgt)
tokens.remove(tgt)
if is_hint:
text_token.value = text_token.value[1:] # 一旦+を抜く
text = str(text_token)
lines = utils.split_unquoted_newlines(text)
def is_doc_comment(lines):
"""
/*
* この形式のコメントかどうか?
*/
"""
end = len(lines) if lines[-1].strip() else -1
tgtlines = lines[1:end]
if not tgtlines:
return False
for line in tgtlines:
if not line.lstrip().startswith("*"):
return False
return True
def is_separator_line_comment(lines):
"""
/******************/
"""
return len(lines) == 1 and lines[0] == ("*" * len(lines[0]))
def is_lines_doc_comment(lines):
"""
/******************
この形式のコメント
****************/
"""
if len(lines) < 2:
return False
fst = lines[0].strip()
if (not fst) or (fst != ("*" * len(fst))):
return False
lst = lines[-1].strip()
if (not lst) or (lst != ("*" * len(lst))):
return False
return True
def remove_blank_lines(lines):
# 前後の空行削除
while not lines[0].strip():
del lines[0]
while not lines[-1].strip():
del lines[-1]
def format_doc(lines):
remove_blank_lines(lines)
for i, value in enumerate(lines):
text = value.lstrip()
if not text.startswith("*"):
text = "* " + text
elif not text.startswith("* "):
text = text[0] + " " + text[1:]
lines[i] = str(self.indent_space()) + " " + text
lines += [str(self.indent_space()) + " "]
return "\n" + "\n".join(lines)
def format_hint(lines):
# hint句なら+を戻す
return "+" + format_normal(lines)
def format_normal(lines):
remove_blank_lines(lines)
for i, value in enumerate(lines):
lines[i] = str(self.indent_space(1)) + value.lstrip()
lines += [str(self.indent_space())]
return "\n" + "\n".join(lines)
def format_oneline(lines):
return "".join(lines)
def format_lines_doc(lines):
for i, value in enumerate(lines[:-1]):
if i:
lines[i] = str(self.indent_space(1)) + value.lstrip()
lines[-1] = str(self.indent_space()) + lines[-1].strip()
lines[0] = lines[0].lstrip()
return "\n".join(lines)
if is_doc_comment(lines):
text_token.value = format_doc(lines)
elif is_hint:
text_token.value = format_hint(lines)
elif is_separator_line_comment(lines):
text_token.value = format_oneline(lines)
elif is_lines_doc_comment(lines):
text_token.value = format_lines_doc(lines)
else:
text_token.value = format_normal(lines)
comment.insert_after(comment.tokens[-1], self.nl())
def __custom_process_inorder_function(self, function):
name_token = tu.token_next_enable(function)
parenthesis = tu.token_next_enable(function, name_token)
spaces = function.tokens_between(name_token, parenthesis)[1:-1]
for tkn in spaces:
if tkn.is_whitespace():
function.tokens.remove(tkn)
self.__custom_process_parenthesis_order(parenthesis)
def __custom_process_parenthesis_order(self, parenthesis):
open_punc = parenthesis.token_next_match(0, T.Punctuation, '(')
close_punc = parenthesis.token_next_match(open_punc, T.Punctuation, ')')
self.indent += 2
parenthesis.insert_after(open_punc, self.nl())
for token in parenthesis.tokens_between(open_punc, close_punc)[1:-1]:
if isinstance(token, Phrase):
parenthesis.insert_before(token, self.nl())
self._process_phrase(token, kwds=False)
parenthesis.insert_after(token, self.nl_with_indent(1))
elif isinstance(token, sql.Identifier) and len(token.tokens)==1 and isinstance(token.tokens[0], Phrase):
# 中がPhraseのIdentifier
child_token = token.tokens[0]
parenthesis.insert_before(token, self.nl())
self._process_phrase(child_token, kwds=False)
parenthesis.insert_after(token, self.nl_with_indent(1))
elif token.is_group():
self._process(token)
self.indent -= 1
parenthesis.insert_before(close_punc, self.nl())
self.indent -= 1
def __custom_process_insert_values_lr(self, tlist):
#INSERT の場合VALUES前後に空白1つをセット
values_token = tlist.token_next_match(0, T.Keyword, "VALUES")
if values_token:
prv = tlist.token_prev(values_token, skip_ws=False)
if prv and prv.is_whitespace():
prv.value = " "
prv = tlist.token_prev(prv, skip_ws=False)
while prv and prv.is_whitespace():
prv.value = ""
prv = tlist.token_prev(prv, skip_ws=False)
else:
tlist.insert_before(values_token, sql.Token(T.Whitespace, " "))
nxt = tlist.token_next(values_token, skip_ws=False)
if nxt and nxt.is_whitespace():
nxt.value = " "
nxt = tlist.token_next(nxt, skip_ws=False)
while nxt and nxt.is_whitespace():
nxt.value = ""
nxt = tlist.token_next(nxt, skip_ws=False)
else:
tlist.insert_after(values_token, sql.Token(T.Whitespace, " "))
def _process_statement(self, tlist):
self.__custom_process_list(tlist)
self._process_default(tlist)
tkn = tu.token_next_enable(tlist)
if tkn and tu.is_insert_dml(tkn):
self.__custom_process_insert_values_lr(tlist)
def _process_comment(self, tlist):
if tu.is_block_comment(tlist):
usql = tu.get_comment_type(tlist, self.local_config.comment_syntax)
if usql == tu.EngineComment.param:
pass
elif usql == tu.EngineComment.syntax:
tlist.insert_before(tlist.tokens[0], self.nl())
tlist.insert_after(tlist.tokens[-1], self.nl())
elif usql == tu.EngineComment.sql_identifier:
# 前の改行を削除。半角スペース開ける
whitespaces = []
for tkn in self._flatten_tokens_prev(tlist):
if tkn.is_whitespace():
whitespaces.append(tkn)
else:
break
if whitespaces:
whitespaces[-1].value = " "
for tws in whitespaces[:-1]:
tws.value = ""
tlist.insert_after(tlist.tokens[-1], self.nl())
elif tu.is_hint_block_comment(tlist):
self.__custom_process_block_comment(tlist, True)
else:
self.__custom_process_block_comment(tlist, False)
elif tu.is_line_comment(tlist):
self.__custom_process_line_comment(tlist)
for tkn in tlist.tokens[:]:
if tkn.is_whitespace():
tlist.tokens.remove(tkn)
usql = tu.get_comment_type(tlist, self.local_config.comment_syntax)
if usql != tu.EngineComment.none:
# Uroboroシンタックスのラインコメントなので改行を入れる
tlist.insert_before(tlist.tokens[0], self.nl())
elif not tu.is_line_description_line_comment(tlist, self.local_config.comment_syntax):
# もともと改行後のラインコメントだったので改行を入れる
tlist.insert_before(tlist.tokens[0], self.nl())
tlist.insert_after(tlist.tokens[-1], self.nl())
self._process_default(tlist)
def _process_identifierlist(self, tlist):
self._process_default(tlist)
if not self._is_format_target_identifire_list(tlist):
return
identifiers = list(tlist.get_identifiers())
self._adjust_identifiers_indent(identifiers)
if identifiers:
self.offset += 1
first = identifiers[0]
tlist.insert_before(first, self.nl())
tlist.insert_after(first, self.nl_with_indent(-1))
if len(identifiers) > 1:
for token in identifiers[1:-1]:
tlist.insert_before(token, self.one_indent_space())
tlist.insert_after(token, self.nl_with_indent(-1))
last = identifiers[-1]
tlist.insert_before(last, self.one_indent_space())
self.offset -= 1
def _process_when(self, tlist):
token = tlist.token_next_match(0, T.Keyword, 'WHEN')
try:
tlist.insert_before(token, self.nl())
tlist.insert_after(token, self.nl_with_indent(1))
except ValueError: # issue121, errors in statement
pass
self._process_default(tlist)
self._adjust_comparisons_indent(self._get_comparisons(tlist))
def _process_where(self, tlist):
token = tlist.token_next_match(0, T.Keyword, 'WHERE')
try:
tlist.insert_before(token, self.nl())
tlist.insert_after(token, self.nl_with_indent(1))
except ValueError: # issue121, errors in statement
pass
self._process_default(tlist)
self._adjust_comparisons_indent(self._get_comparisons(tlist))
def _process_having(self, tlist):
self._process_default(tlist)
token = tlist.token_next_match(0, T.Keyword, 'HAVING')
try:
tlist.insert_before(token, self.nl())
tlist.insert_after(token, self.nl_with_indent(1))
except ValueError: # issue121, errors in statement
pass
self._adjust_comparisons_indent(self._get_comparisons(tlist))
def _process_comparison(self, tlist):
# tlist.insert_before(tlist.tokens[0], self.indent_space(1))
self._process_default(tlist)
def _process_parenthesis(self, tlist):
def is_insert_parenthesis(tlist):
if not self.__within_insert_statement(tlist):
return False
if self.__within_insert_values_section(tlist): # insertのvaluesの処理
return True
if self.__within_insert_into_columns_section(tlist): # insertのinto columnsの処理
return True
return False
def is_include_join(tlist):
token = tu.tokens_parenthesis_inner(tlist)
for tkn in token:
if tu.is_join(tkn):
return True
return False
def is_with_query_cols(tlist):
"""
WITHのqueryカラム名括弧判定
"""
parent = tlist.parent
if parent and tu.is_identifier(parent):
nametoken = tu.token_prev_enable(parent, tlist)
if not nametoken:
return False
if not tu.is_identifier(nametoken) and not nametoken.ttype in T.Name:
return False
parent = parent.parent
if parent and tu.is_identifier_list(parent):
parent = parent.parent
if parent and tu.is_with(parent):
return True
return False
def is_need_shift(tlist):
"""
閉じ括弧「)」の前が改行されている場合shiftさせる
ただし開き括弧「(」の前が改行されている場合はshiftさせない
"""
# 開き括弧「(」の前が改行されているかどうか?
open_punc = tlist.token_next_match(0, T.Punctuation, '(')
exists = False
spaces = ""
for token in self._flatten_tokens_prev(open_punc):
exists = True
if token.is_whitespace():
spaces += token.value
if is_inc_cr(token):
if spaces.count("\tkn") == self.indent:
return False
# 閉じ括弧判定へ
break
else:
# 閉じ括弧判定へ
break
if not exists:
return False
# 閉じ括弧「)」の前が改行されているかどうか?
close_punc = tlist.token_next_match(open_punc, T.Punctuation, ')')
for tkn in tlist.tokens_between(open_punc, close_punc)[1::-1]:
for token in list(tu.flatten(tkn))[::-1]:
if token.is_whitespace():
if is_inc_cr(token):
return True
else:
return False
return False
if tu.is_dmlddl_parenthesis(tlist): # 括弧の中はDML
self.__process_parenthesis_for_dmlddl(tlist)
elif tu.is_comparisons_parenthesis(tlist): # 条件の括弧
self.__process_parenthesis_for_complist(tlist)
elif is_insert_parenthesis(tlist): # INSERT句の括弧
self.__process_parenthesis_for_insert(tlist)
elif is_include_join(tlist): # JOIN句の括弧
self.__process_parenthesis_for_jointables(tlist)
elif is_with_query_cols(tlist): # WITH句の括弧
self.__process_parenthesis_for_with_query_cols(tlist)
elif tu.is_enum_parenthesis(tlist):
if self._is_include_format_target_identifire_list_parenthesis(tlist):
self.__process_parenthesis_for_identifier_list(tlist) # identifierlistのフォーマットを期待した処理
else:
self.__process_parenthesis_for_enum(tlist) # 値の列挙ならカンマ後のスペースだけ処理
else:
self._process_default(tlist, stmts=True)
# 閉じ括弧「)」の前が改行されている場合右にshiftさせる(中身をIndentする)
if is_need_shift(tlist):
self.__indent_shift(tlist)
def __process_parenthesis_for_identifier_list(self, tlist):
open_punc = tlist.token_next_match(0, T.Punctuation, '(')
self.indent += 1
tlist.insert_after(open_punc, self.nl_with_indent(1))
self._process_default(tlist)
close_punc = tlist.token_next_match(open_punc, T.Punctuation, ')')
tlist.insert_before(close_punc, self.nl())
self.indent -= 1
def __process_parenthesis_for_enum(self, parenthesis):
def proc_parenthesis(tokens, parent):
for token in tokens:
if tu.is_comma(token):
next_token = parent.token_next(token, skip_ws=False)
if next_token and next_token.is_whitespace():
next_token.value = " "
else:
parent.insert_after(token, sql.Token(T.Whitespace, " "))
elif tu.is_identifier_list(token):
proc_parenthesis(token.tokens[:], token)
elif token.is_group():
self._process(token)
proc_parenthesis(tu.tokens_parenthesis_inner(parenthesis), parenthesis)
def __process_parenthesis_for_join_using(self, tlist):
open_punc = tlist.token_next_match(0, T.Punctuation, '(')
tlist.insert_after(open_punc, self.nl_with_indent(1))
self._process_default(tlist)
close_punc = tlist.token_next_match(open_punc, T.Punctuation, ')')
tlist.insert_before(close_punc, self.nl())
def __process_parenthesis_for_jointables(self, tlist):
open_punc = tlist.token_next_match(0, T.Punctuation, '(')
self.indent += 1
tlist.insert_after(open_punc, self.nl_with_indent(1))
self._process_default(tlist)
close_punc = tlist.token_next_match(open_punc, T.Punctuation, ')')
tlist.insert_before(close_punc, self.nl())
self.indent -= 1
def __process_parenthesis_for_with_query_cols(self, tlist):
"""
WITHのqueryカラム名
"""
open_punc = tlist.token_next_match(0, T.Punctuation, '(')
self.indent += 1
tlist.insert_after(open_punc, self.nl())
self._process_default(tlist)
close_punc = tlist.token_next_match(open_punc, T.Punctuation, ')')
tlist.insert_before(close_punc, self.nl())
self.indent -= 1
def __process_parenthesis_for_insert(self, tlist):
open_punc = tlist.token_next_match(0, T.Punctuation, '(')
tlist.insert_after(open_punc, self.nl())
self._process_default(tlist)
close_punc = tlist.token_next_match(open_punc, T.Punctuation, ')')
tlist.insert_before(close_punc, self.nl())
def __process_parenthesis_for_complist(self, tlist):
open_punc = tlist.token_next_match(0, T.Punctuation, '(')
self.indent += 1
tlist.insert_after(open_punc, self.nl())
self._process_default(tlist)
comps = self._get_comparisons(tlist)
tlist.insert_before(comps[0], sql.Token(T.Whitespace, "\t"))
self._adjust_comparisons_indent(comps)
close_punc = tlist.token_next_match(open_punc, T.Punctuation, ')')
tlist.insert_before(close_punc, self.nl())
self.indent -= 1
def __process_parenthesis_for_dmlddl(self, tlist):
open_punc = tlist.token_next_match(0, T.Punctuation, '(')
def calc_dmlddl_indent(tlist):
exists = False
for tkn in self._flatten_tokens_prev(tlist):
if tu.is_enable(tkn):
if tu.is_open_punctuation(tkn):
"""
括弧内
(<--|
|(
|--->SELECT
|--->--->*
|--->FROM
|--->--->TBL
|)
)<--|
"""
return (1, 0)
if tu.is_union(tkn.parent):
return (1, 0)
exists = True
break
if not exists:
"""
前が存在しない
|(
|--->SELECT
|--->--->*
|--->FROM
|--->--->TBL
|)
"""
return (1, 0)
"""
通常は
|AND AAA = (
|--->--->SELECT
|--->--->--->*
|--->--->FROM
|--->--->--->TBL
|--->)
"""
return (2, 1)
dmlddl_indent, dmlddl_close_indent = calc_dmlddl_indent(tlist)
self.indent += dmlddl_indent
tlist.insert_after(open_punc, self.nl())
self._process_default(tlist, stmts=False)
self.__custom_process_list(tlist)
self.indent -= (dmlddl_indent - dmlddl_close_indent)
close_punc = tlist.token_next_match(open_punc, T.Punctuation, ')')
tlist.insert_before(close_punc, self.nl())
self.indent -= dmlddl_close_indent
def _process_case(self, tlist):
def is_prev_comma(token):
for prev in self._flatten_tokens_prev(token):
if not tu.is_enable(prev):
continue
return tu.is_comma(prev)
return False
commma_next = is_prev_comma(tlist)
cases = tu.get_cases(tlist)
if not commma_next:
case = tlist.tokens[0]
tlist.insert_before(case, self.nl_with_indent(1))
self.offset += 2
is_first = True
for cond, value in cases:
if is_first:
is_first = False
if not value:
if cond:
tlist.insert_before(cond[0], self.nl_with_indent(-1))
continue
if cond:
tlist.insert_before(cond[0], self.nl())
tlist.insert_after(cond[0], self.nl_with_indent(1))
if value:
tlist.insert_before(value[0], self.nl())
tlist.insert_after(value[0], self.nl_with_indent(1))
self._process_default(tlist)
self.offset -= 2
end = tlist.token_next_match(0, T.Keyword, 'END')
tlist.insert_before(end, self.nl_with_indent(1))
if not commma_next:
tlist.insert_after(end, self.nl())
def _process_function(self, tlist):
func_name = tlist.token_next(-1)
idx = tlist.token_index(func_name)
tkn = tlist.token_next(idx, skip_ws=False)
while tkn and tkn.is_whitespace():
tlist.tokens.remove(tkn)
tkn = tlist.token_next(idx, skip_ws=False)
self._process_default(tlist)
def _process_withingroupfunctions(self, tlist):
for token in tlist.tokens:
if token.is_whitespace():
token.value = " "
tgp = tlist.get_group()
if tgp:
tkn = tlist.token_next(tgp, skip_ws=False)
while tkn and tkn.is_whitespace():
tlist.tokens.remove(tkn)
tkn = tlist.token_next(tgp, skip_ws=False)
self._process_function(tlist.get_main_function())
self.__custom_process_inorder_function(tlist.get_group())
def _process_phrase(self, tlist, kwds=True):
self._process_default(tlist, kwds=kwds)
for tkn in tlist.tokens_words():
if tkn.is_whitespace():
tkn.value = " "
def _process_ascdesc(self, tlist):
self._process_default(tlist)
for tkn in tlist.tokens_words():
if tkn.is_whitespace():
tkn.value = " "
def _process_offsetfetch(self, tlist):
self._process_default(tlist)
for tkn in tlist.tokens_words():
if tkn.is_whitespace():
tkn.value = " "
tlist.insert_before(tlist.tokens[0], self.nl())
def _process_limitoffset(self, tlist):
def remove_whitespace(tokens, parent): # 空白の削除
tokens = tokens[:]
for token in tokens:
if token.is_whitespace():
parent.tokens.remove(token)
def proc_csv(tokens, parent): # カンマ形式の処理
tokens = tokens[:]
for token in tokens:
if tu.is_comma(token):
next_token = parent.token_next(token, skip_ws=False)
if next_token and next_token.is_whitespace():
next_token.value = " "
else:
parent.insert_after(token, sql.Token(T.Whitespace, " "))
self._process_default(tlist)
has_comma = False
identifier_list = None
for tkn in tlist.tokens_words():
if tu.is_comma(tkn):
has_comma = True
break
if tu.is_identifier_list(tkn):
identifier_list = tkn
break
if has_comma :# LIMIT num, num 形式
remove_whitespace(tlist.tokens, tlist)
proc_csv(tlist.tokens, tlist)
for tkn in tlist.tokens_words():
if tu.is_keyword(tkn):
tlist.insert_after(tkn, sql.Token(T.Whitespace, " "))
tlist.insert_before(tkn, self.nl())
elif identifier_list :# LIMIT num, num 形式
remove_whitespace(tlist.tokens, tlist)
remove_whitespace(identifier_list.tokens, identifier_list)
proc_csv(identifier_list.tokens, identifier_list)
for tkn in tlist.tokens_words():
if tu.is_keyword(tkn):
tlist.insert_after(tkn, sql.Token(T.Whitespace, " "))
tlist.insert_before(tkn, self.nl())
else : # LIMIT num / LIMIT num OFFSET num 形式
for tkn in tlist.tokens_words():
if tkn.is_whitespace():
tkn.value = " "
elif tu.is_keyword(tkn):
tlist.insert_before(tkn, self.nl())
def _process_mergewhen(self, tlist):
self._process_default(tlist)
for tkn in tlist.tokens_words():
if tkn.is_whitespace():
tkn.value = " "
tlist.insert_before(tlist.tokens[0], self.nl())
def _process_overfunctions(self, tlist):
"""
ROW_NUMBERなどのOVERが付く系
"""
for token in tlist.tokens:
if token.is_whitespace():
token.value = " "
self._process_function(tlist.get_main_function())
self.__custom_process_inorder_function(tlist.get_over())
def _process_keepfunctions(self, tlist):
"""
KEEPが付く系
"""
for token in tlist.tokens:
if token.is_whitespace():
token.value = " "
self._process_function(tlist.get_main_function())
self.__custom_process_inorder_function(tlist.get_keep())
def _process_forupdate(self, tlist, kwds=True):
self.__custom_process_list(tlist)
self._process_default(tlist, kwds=kwds)
tlist.insert_before(tlist.get_for(), self.nl())
if tlist.is_in_identifier():
prev = None
for tkn in tlist.tokens_between(tlist.get_for(), tlist.get_of()):
if tkn.is_whitespace():
if prev and prev.is_whitespace():
tlist.tokens.remove(tkn)
else:
tkn.value = " "
prev = tkn
tlist.insert_after(tlist.get_of(), self.nl_with_indent(1))
if tlist.get_wait_or_nowait():
tlist.insert_before(tlist.get_wait_or_nowait(), self.nl())
else:
prev = None
for tkn in tlist.tokens_between(tlist.get_for(),tlist.get_target_tokens()[-1]):
if tkn.is_whitespace():
if prev and prev.is_whitespace():
tlist.tokens.remove(tkn)
else:
tkn.value = " "
prev = tkn
tlist.insert_after(tlist.get_target_tokens()[-1], self.nl())
def _process_waitornowait(self, tlist, _=True):
for tkn in tlist.tokens_words():
if tkn.is_whitespace():
tkn.value = " "
def _process_union(self, tlist, kwds=True):
self._process_phrase(tlist, kwds)
tlist.insert_before(tlist.tokens[0], self.nl())
tlist.insert_after(tlist.tokens[-1], self.nl())
def _process_join(self, tlist):
"""
JOIN系
"""
tlist.insert_before(tlist.jointoken, self.nl())
tlist.insert_before(tlist.identifiertoken, self.nl_with_indent(1))
self._process_default(tlist.identifiertoken)
if tlist.usingtoken:
tlist.insert_before(tlist.usingtoken, self.nl())
tokens = tlist.tokens_between(tlist.usingtoken, tlist.usingparenthesistoken)[1:-1]
for tkn in tokens:
if tkn.is_whitespace():
tkn.value = ''
self.indent += 1
self.__process_parenthesis_for_join_using(tlist.usingparenthesistoken)
self.indent -= 1
def _process_on(self, tlist):
"""
ON句
"""
token = tlist.token_next_match(0, T.Keyword, 'ON')
try:
tlist.insert_before(token, self.nl())
tlist.insert_after(token, self.nl_with_indent(1))
except ValueError: # issue121, errors in statement
pass
self._process_default(tlist)
self._adjust_comparisons_indent(self._get_comparisons(tlist))
def _process_mergeupdateinsertclause(self, tlist):
"""
MERGEの内のUPDATE・INSERT句
"""
self.indent += 1
self.__custom_process_list(tlist)
self._process_default(tlist)
self.indent -= 1
tkn = tu.token_next_enable(tlist)
if tkn and tu.is_insert_dml(tkn):
self.__custom_process_insert_values_lr(tlist)
def _process_connectby(self, tlist):
"""
CONNECT BY句
"""
token = tlist.token_matching(0, (tu.is_phrase ,))
try:
tlist.insert_before(token, self.nl())
tlist.insert_after(token, self.nl_with_indent(1))
except ValueError: # issue121, errors in statement
pass
self._process_default(tlist)
self._adjust_comparisons_indent(self._get_comparisons(tlist))
def _process_startwith(self, tlist):
"""
START WITH句
"""
token = tlist.token_matching(0, (tu.is_phrase ,))
try:
tlist.insert_before(token, self.nl())
tlist.insert_after(token, self.nl_with_indent(1))
except ValueError: # issue121, errors in statement
pass
self._process_default(tlist)
self._adjust_comparisons_indent(self._get_comparisons(tlist))
def _process_with(self, tlist):
"""
WITH句
"""
with_token = tlist.token_with()
tlist.insert_before(with_token, self.nl())
tlist.insert_after(with_token, self.nl_with_indent(1))
self._process_default(tlist)
def _process_specialfunctionparameter(self, tlist):
"""
特殊パラメータ
"""
for tkn in tlist.tokens_words():
if tkn.is_whitespace():
tkn.value = " "
elif tkn.is_group():
self._process(tkn)
def nl_with_indent(self, offset):
count = ((self.indent * self.width) + self.offset + offset)
if count < 0:
count = 0
space = "\t" * count
sws = '\n' + space
return sql.Token(T.Whitespace, sws)
def cr(self):
return sql.Token(T.Whitespace, '\n')
def indent_space(self, offset=0):
space = ("\t" * ((self.indent * self.width) + self.offset + offset))
return sql.Token(T.Whitespace, space)
def one_indent_space(self):
return sql.Token(T.Whitespace, "\t")
def _adjust_identifiers_indent(self, identifiers):
"""
Identifierの内部インデントの調整
"""
if not identifiers:
return
def is_update_set_identifiers(token):
if not self.__within_update_set_section(token):
return False
while token.parent:
parent = token.parent
if tu.is_parenthesis(parent):
return False
if (not parent.parent) or (not self.__within_update_set_section(parent)):
return True
token = parent
return True
fst = identifiers[0]
ids = []
if is_update_set_identifiers(fst):
# update set句
for token in identifiers:
if not token.is_group():
continue
ids.append(_UpdIdentifierObject(token, self.indent + self.offset, self.local_config))
else:
for token in identifiers:
if not token.is_group():
continue
ids.append(_IdentifierObject(token, self.indent + self.offset, self.local_config))
max_width_left = 0
max_width_right = 0
has_center_token = False
for identifier in ids:
max_width_left = max(max_width_left, identifier.width_left)
max_width_right = max(max_width_right, identifier.width_right)
has_center_token = has_center_token or identifier.center_token
left_offset = 0 if has_center_token else -1
for identifier in ids:
if identifier.right_tokens:
left = identifier.left_lines[-1]
left_space = "\t" * int(calc_tab_padding_count(left, max_width_left) + left_offset)
if len(identifier.left_lines) > 1:
left_space += "\t"
identifier.token.insert_after(identifier.left_tokens[-1], sql.Token(T.Whitespace, left_space))
if identifier.line_comment:
right = identifier.right_lines[-1]
right_space = "\t" * int(calc_tab_padding_count(right, max_width_right))
if len(identifier.right_lines) > 1:
right_space += "\t\t" + ("\t" * int(calc_tab_padding_count("", max_width_left) + left_offset))
identifier.token.insert_after(identifier.right_tokens[-1], sql.Token(T.Whitespace, right_space))
elif identifier.line_comment:
left = identifier.left_lines[-1]
left_space = "\t" * int(calc_tab_padding_count(left, max_width_left) + left_offset) \
+ "\t" \
+ "\t" * int(calc_tab_padding_count("", max_width_right))
if len(identifier.left_lines) > 1:
left_space += "\t"
identifier.token.insert_after(identifier.left_tokens[-1], sql.Token(T.Whitespace, left_space))
def _adjust_comparisons_indent(self, comparisons):
"""
Comparisonの内部インデントの調整
"""
ids = []
for token in comparisons:
if not token.is_group():
continue
if tu.is_comparison(token):
ids.append(_ComparisonObject(token, self.indent + self.offset, self.local_config))
max_width_left = 0
max_width_operator = 0
max_width_right = 0
for comparison in ids:
max_width_left = max(max_width_left, comparison.width_left)
max_width_operator = max(max_width_operator, comparison.width_operator)
max_width_right = max(max_width_right, comparison.width_right)
for comparison in ids:
if comparison.right_tokens:
left = comparison.left_lines[-1]
left_space = "\t" * int(calc_tab_padding_count(left, max_width_left))
if len(comparison.left_lines) > 1:
left_space += "\t"
comparison.token.insert_after(comparison.left_tokens[-1], sql.Token(T.Whitespace, left_space))
op_space = "\t" * int(calc_tab_padding_count(comparison.operator_string, max_width_operator))
comparison.token.insert_after(comparison.operator_tokens[-1], sql.Token(T.Whitespace, op_space))
if comparison.line_comment:
right = comparison.right_lines[-1]
right_space = "\t" * int(calc_tab_padding_count(right, max_width_right))
if len(comparison.right_lines) > 1:
right_space += "\t\t" + ("\t" * int(calc_tab_padding_count("", max_width_left)))
comparison.token.insert_after(comparison.right_tokens[-1], sql.Token(T.Whitespace, right_space))
def _flatten_tokens_prev(self, token):
return tu.flatten_tokens_prev(self._curr_stmt, token)
def _flatten_tokens_next(self, token):
return tu.flatten_tokens_next(self._curr_stmt, token)
def _get_comparisons(self, token):
return list(x for x in token.tokens if tu.is_comparison(x) or tu.is_parenthesis(x) or tu.is_exists_function(x))
def _is_include_format_target_identifire_list_parenthesis(self, parenthesis):
"""
括弧がフォーマットが必要なidentifirelistを有するかどうか
"""
def find_identifire_list(token):
if tu.is_identifier_list(token):
return token
if isinstance(token, sql.TokenList):
for tkn in token.tokens:
til = find_identifire_list(tkn)
if til:
return til
return None
def is_include_line_comment(identifier):
for tkn in identifier.tokens:
if tu.is_line_comment(tkn):
return True
return False
til = find_identifire_list(parenthesis)
if not til:
return False
identifiers = list(til.get_identifiers())
# ラインコメントが無ければ対象外
for identifier in identifiers:
if is_include_line_comment(identifier):
return True
return False
def _is_format_target_identifire_list(self, identifirelist):
"""
フォーマットが必要なidentifirelistかどうか
"""
identifiers = list(identifirelist.get_identifiers())
if not identifiers:
return False
func_token = tu.within_function(self._curr_stmt, identifirelist)
if not func_token:
# 関数内でなければ処理する
return True
if tu.is_exists_function(func_token) or tu.is_over_function(func_token):
# existsとover内は処理する
return True
parenthesis = tu.within_parenthesis(self._curr_stmt, identifirelist)
if tu.is_dmlddl_parenthesis(parenthesis):
return True
if tu.is_comparisons_parenthesis(parenthesis):
return True
if self._is_include_format_target_identifire_list_parenthesis(parenthesis):
return True
return False
def __indent_shift(self, tlist, shift=1):
for token in tu.flatten(tlist):
if is_inc_cr(token):
token.value += "\t" * shift
def hit_last(token, func, skip_ws=True):
if func(token):
return token
if token.is_group():
for tkn in token.tokens[::-1]:
if hit_last(tkn, func):
return tkn
if skip_ws and tkn.is_whitespace():
continue
else:
break
return None
def hit_first(token, func, skip_ws=True):
if func(token):
return token
if token.is_group():
for tkn in token.tokens[:]:
if hit_first(tkn, func):
return tkn
if skip_ws and tkn.is_whitespace():
continue
else:
break
return None
def calc_tab_padding_count(text, size):
width = get_text_char_width(text)
if size < width :
#サイズオーバー
return 1
count = 0
mod = width % 4
if mod > 0 :
width += 4 - mod
count += 1
count += math.ceil((size - width) / 4)
return count
def calc_tab_pad_size(size):
mod = size % 4
if mod == 0:
return size + 4
else:
return size + 4 - mod
def get_need_tab_char_width(text) :
return calc_tab_pad_size(get_text_char_width(text))
def get_text_char_width(text) :
if sys.version_info[0] < 3 and isinstance(text, str):
text = text.decode('utf-8')
width = 0
for cval in text:
if cval =="\t":
width = calc_tab_pad_size(width)
elif is_zen(cval):
width += 2
else:
width += 1
return width
def is_zen(cval):
regexp = re.compile(r'(?:\xEF\xBD[\xA1-\xBF]|\xEF\xBE[\x80-\x9F])|[\x20-\x7E]')
result = regexp.search(cval)
return not result
def is_inc_cr(token):
return token.is_whitespace() and ("\n" in token.value or "\r" in token.value)
'''
ReservedWordCaseFilter is used to convert Reserved words
which are provided by a user.This class compares tokens
with the reserved words.
'''
class ReservedWordCaseFilter():
ttype = None
def __init__(self, local_config):
if local_config.reserved_case == 'upper':
self.input_reserved_words = [word.upper() for word in local_config.input_reserved_words]
elif local_config.reserved_case == 'lower':
self.input_reserved_words = [word.lower() for word in local_config.input_reserved_words]
elif local_config.reserved_case == 'capitalize':
self.input_reserved_words = [word.capitalize() for word in local_config.input_reserved_words]
self.reserved_case = local_config.reserved_case
# for jython str.upper()
# self.convert = getattr(str, case)
def get_convert():
import sys
if sys.version_info[0] < 3:
unicodecase = getattr(unicode, local_config.reserved_case)
def convert(s):
if isinstance(s, str):
return unicodecase(s.decode('utf-8')).encode('utf-8')
else:
return unicodecase(s)
return convert
else:
return getattr(str, local_config.reserved_case)
self.convert = get_convert()
def process(self, stack, stream):
if self.reserved_case == 'upper':
for ttype, value in stream:
if value.upper() in self.input_reserved_words:
value = self.convert(value)
yield ttype, value
if self.reserved_case == 'lower':
for ttype, value in stream:
if value.lower() in self.input_reserved_words:
value = self.convert(value)
yield ttype, value
if self.reserved_case == 'capitalize':
for ttype, value in stream:
if value.capitalize() in self.input_reserved_words:
value = self.convert(value)
yield ttype, value |
vv1133/home_web | refs/heads/master | tests/m2m_through/models.py | 115 | from datetime import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# M2M described on one of the models
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=128)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=128)
members = models.ManyToManyField(Person, through='Membership')
custom_members = models.ManyToManyField(Person, through='CustomMembership', related_name="custom")
nodefaultsnonulls = models.ManyToManyField(Person, through='TestNoDefaultsOrNulls', related_name="testnodefaultsnonulls")
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Membership(models.Model):
person = models.ForeignKey(Person)
group = models.ForeignKey(Group)
date_joined = models.DateTimeField(default=datetime.now)
invite_reason = models.CharField(max_length=64, null=True)
class Meta:
ordering = ('date_joined', 'invite_reason', 'group')
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
@python_2_unicode_compatible
class CustomMembership(models.Model):
person = models.ForeignKey(Person, db_column="custom_person_column", related_name="custom_person_related_name")
group = models.ForeignKey(Group)
weird_fk = models.ForeignKey(Membership, null=True)
date_joined = models.DateTimeField(default=datetime.now)
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
class Meta:
db_table = "test_table"
class TestNoDefaultsOrNulls(models.Model):
person = models.ForeignKey(Person)
group = models.ForeignKey(Group)
nodefaultnonull = models.CharField(max_length=5)
@python_2_unicode_compatible
class PersonSelfRefM2M(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="Friendship", symmetrical=False)
def __str__(self):
return self.name
class Friendship(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set")
date_friended = models.DateTimeField()
|
Bysmyyr/chromium-crosswalk | refs/heads/master | chrome/test/chromedriver/test/webserver.py | 17 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import os
import threading
class Responder(object):
"""Sends a HTTP response. Used with TestWebServer."""
def __init__(self, handler):
self._handler = handler
def SendResponse(self, headers, body):
"""Sends OK response with body."""
self.SendHeaders(headers, len(body))
self.SendBody(body)
def SendResponseFromFile(self, path):
"""Sends OK response with the given file as the body."""
with open(path, 'r') as f:
self.SendResponse({}, f.read())
def SendHeaders(self, headers={}, content_length=None):
"""Sends headers for OK response."""
self._handler.send_response(200)
for field, value in headers.iteritems():
self._handler.send_header(field, value)
if content_length:
self._handler.send_header('Content-Length', content_length)
self._handler.end_headers()
def SendError(self, code):
"""Sends response for the given HTTP error code."""
self._handler.send_error(code)
def SendBody(self, body):
"""Just sends the body, no headers."""
self._handler.wfile.write(body)
class Request(object):
"""An HTTP request."""
def __init__(self, handler):
self._handler = handler
def GetPath(self):
return self._handler.path
def GetHeader(self, name):
return self._handler.headers.getheader(name)
class _BaseServer(BaseHTTPServer.HTTPServer):
"""Internal server that throws if timed out waiting for a request."""
def __init__(self, on_request, server_cert_and_key_path=None):
"""Starts the server.
It is an HTTP server if parameter server_cert_and_key_path is not provided.
Otherwise, it is an HTTPS server.
Args:
server_cert_and_key_path: path to a PEM file containing the cert and key.
if it is None, start the server as an HTTP one.
"""
class _Handler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Internal handler that just asks the server to handle the request."""
def do_GET(self):
if self.path.endswith('favicon.ico'):
self.send_error(404)
return
on_request(Request(self), Responder(self))
def log_message(self, *args, **kwargs):
"""Overriddes base class method to disable logging."""
pass
BaseHTTPServer.HTTPServer.__init__(self, ('127.0.0.1', 0), _Handler)
if server_cert_and_key_path is not None:
self._is_https_enabled = True
self._server.socket = ssl.wrap_socket(
self._server.socket, certfile=server_cert_and_key_path,
server_side=True)
else:
self._is_https_enabled = False
def handle_timeout(self):
"""Overridden from SocketServer."""
raise RuntimeError('Timed out waiting for http request')
def GetUrl(self):
"""Returns the base URL of the server."""
postfix = '://127.0.0.1:%s' % self.server_port
if self._is_https_enabled:
return 'https' + postfix
return 'http' + postfix
class WebServer(object):
"""An HTTP or HTTPS server that serves on its own thread.
Serves files from given directory but may use custom data for specific paths.
"""
def __init__(self, root_dir, server_cert_and_key_path=None):
"""Starts the server.
It is an HTTP server if parameter server_cert_and_key_path is not provided.
Otherwise, it is an HTTPS server.
Args:
root_dir: root path to serve files from. This parameter is required.
server_cert_and_key_path: path to a PEM file containing the cert and key.
if it is None, start the server as an HTTP one.
"""
self._root_dir = os.path.abspath(root_dir)
self._server = _BaseServer(self._OnRequest, server_cert_and_key_path)
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.daemon = True
self._thread.start()
self._path_data_map = {}
self._path_callback_map = {}
self._path_maps_lock = threading.Lock()
def _OnRequest(self, request, responder):
path = request.GetPath().split('?')[0]
# Serve from path -> callback and data maps.
self._path_maps_lock.acquire()
try:
if path in self._path_callback_map:
headers, body = self._path_callback_map[path](request)
if body:
responder.SendResponse(headers, body)
else:
responder.SendError(503)
return
if path in self._path_data_map:
responder.SendResponse({}, self._path_data_map[path])
return
finally:
self._path_maps_lock.release()
# Serve from file.
path = os.path.normpath(
os.path.join(self._root_dir, *path.split('/')))
if not path.startswith(self._root_dir):
responder.SendError(403)
return
if not os.path.exists(path):
responder.SendError(404)
return
responder.SendResponseFromFile(path)
def SetDataForPath(self, path, data):
self._path_maps_lock.acquire()
try:
self._path_data_map[path] = data
finally:
self._path_maps_lock.release()
def SetCallbackForPath(self, path, func):
self._path_maps_lock.acquire()
try:
self._path_callback_map[path] = func
finally:
self._path_maps_lock.release()
def GetUrl(self):
"""Returns the base URL of the server."""
return self._server.GetUrl()
def Shutdown(self):
"""Shuts down the server synchronously."""
self._server.shutdown()
self._thread.join()
class SyncWebServer(object):
"""WebServer for testing.
Incoming requests are blocked until explicitly handled.
This was designed for single thread use. All requests should be handled on
the same thread.
"""
def __init__(self):
self._server = _BaseServer(self._OnRequest)
# Recognized by SocketServer.
self._server.timeout = 10
self._on_request = None
def _OnRequest(self, request, responder):
self._on_request(responder)
self._on_request = None
def Respond(self, on_request):
"""Blocks until request comes in, then calls given handler function.
Args:
on_request: Function that handles the request. Invoked with single
parameter, an instance of Responder.
"""
if self._on_request:
raise RuntimeError('Must handle 1 request at a time.')
self._on_request = on_request
while self._on_request:
# Don't use handle_one_request, because it won't work with the timeout.
self._server.handle_request()
def RespondWithContent(self, content):
"""Blocks until request comes in, then handles it with the given content."""
def SendContent(responder):
responder.SendResponse({}, content)
self.Respond(SendContent)
def GetUrl(self):
return self._server.GetUrl()
|
erikr/django | refs/heads/master | tests/syndication_tests/models.py | 281 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Entry(models.Model):
title = models.CharField(max_length=200)
updated = models.DateTimeField()
published = models.DateTimeField()
class Meta:
ordering = ('updated',)
def __str__(self):
return self.title
def get_absolute_url(self):
return "/blog/%s/" % self.pk
@python_2_unicode_compatible
class Article(models.Model):
title = models.CharField(max_length=200)
entry = models.ForeignKey(Entry, models.CASCADE)
def __str__(self):
return self.title
|
duanhjlt/gyp | refs/heads/master | test/standalone-static-library/gyptest-standalone-static-library.py | 186 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of a static_library with the standalone_static_library flag set.
"""
import os
import subprocess
import sys
import TestGyp
# standalone_static_library currently means two things: a specific output
# location for the built target and non-thin archive files. The Android gyp
# generator leaves both decisions to the Android build system, so this test
# doesn't work for that format.
test = TestGyp.TestGyp(formats=['!android'])
# Verify that types other than static_library cause a failure.
test.run_gyp('invalid.gyp', status=1, stderr=None)
target_str = 'invalid.gyp:bad#target'
err = ['gyp: Target %s has type executable but standalone_static_library flag '
'is only valid for static_library type.' % target_str]
test.must_contain_all_lines(test.stderr(), err)
# Build a valid standalone_static_library.
test.run_gyp('mylib.gyp')
test.build('mylib.gyp', target='prog')
# Verify that the static library is copied to the correct location.
# We expect the library to be copied to $PRODUCT_DIR.
standalone_static_library_dir = test.EXECUTABLE
path_to_lib = os.path.split(
test.built_file_path('mylib', type=standalone_static_library_dir))[0]
lib_name = test.built_file_basename('mylib', type=test.STATIC_LIB)
path = os.path.join(path_to_lib, lib_name)
test.must_exist(path)
# Verify that the program runs properly.
expect = 'hello from mylib.c\n'
test.run_built_executable('prog', stdout=expect)
# Verify that libmylib.a contains symbols. "ar -x" fails on a 'thin' archive.
supports_thick = ('make', 'ninja', 'cmake')
if test.format in supports_thick and sys.platform.startswith('linux'):
retcode = subprocess.call(['ar', '-x', path])
assert retcode == 0
test.pass_test()
|
vertigo235/Sick-Beard-XEM | refs/heads/master | lib/requests/packages/chardet/cp949prober.py | 2800 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
|
westinedu/similarinterest | refs/heads/master | settings.py | 1 | # Initialize App Engine and import the default settings (DB backend, etc.).
# If you want to use a different backend you have to remove all occurences
# of "djangoappengine" from this file.
try:
from djangoappengine.settings_base import *
has_djangoappengine = True
except ImportError:
has_djangoappengine = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
## Fall back to MongoDB if App Engine isn't used (note that other backends
# # including SQL should work, too)
#DATABASES = {
# 'default': {
# 'ENGINE': 'django_mongodb_engine',
# 'NAME': 'test',
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': 'localhost',
# 'PORT': 27017,
# }
#}
import os
# If True, the south application will be automatically added to the
# INSTALLED_APPS setting.
USE_SOUTH = True
# Activate django-dbindexer for the default database
# Activate django-dbindexer if available
DATABASES['default']['HIGH_REPLICATION'] = True
AUTOLOAD_SITECONF = 'indexes'
SECRET_KEY = "8cc1cc06-22fa-4a69-a9ea-e7ebee6d2567e7608c56-f019-4e9a-a440-a554197d5f2318c89510-cf34-42d5-9b24-6b7a117b22ee"
INSTALLED_APPS = (
'djangoappengine',
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.comments',
'django.contrib.messages',
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.pages",
"mezzanine.galleries",
"mezzanine.twitter",
#"mezzanine.accounts",
#"mezzanine.mobile",
'djangotoolbox',
'autoload',
# djangoappengine should come last, so it can override a few manage.py commands
'bookmarks',
'handlers'
)
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
# This loads the index definitions, so it has to come first
'autoload.middleware.AutoloadMiddleware',
"django.contrib.redirects.middleware.RedirectFallbackMiddleware",
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
'django.contrib.messages.context_processors.messages'
)
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
DEBUG_TOOLBAR_CONFIG = {"INTERCEPT_REDIRECTS": False}
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
#ADMIN_MEDIA_PREFIX = '/media/admin/'
#PROJECT_DIR = os.path.dirname(__file__) # this is not Django setting.
#TEMPLATE_DIRS = (
# os.path.join(PROJECT_DIR, "templates"),
# "static"# here you can add another templates directory if you wish.
#)
#
#ROOT_URLCONF = 'urls'
# Full filesystem path to the project.
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Name of the directory for the project.
PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = STATIC_URL + "grappelli/"
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME
###################
# DEPLOY SETTINGS #
###################
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "SSH_USER": "", # SSH username
# "SSH_PASS": "", # SSH password (consider key-based authentication)
# "SSH_KEY_PATH": "", # Local path to SSH key file, for key-based auth
# "HOSTS": [], # List of hosts to deploy to
# "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs
# "PROJECT_NAME": "", # Unique identifier for project
# "REQUIREMENTS_PATH": "", # Path to pip requirements, relative to project
# "GUNICORN_PORT": 8000, # Port gunicorn will listen on
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "LIVE_HOSTNAME": "www.example.com", # Host for public site.
# "REPO_URL": "", # Git or Mercurial remote repo URL for the project
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# }
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from local_settings import *
except ImportError:
pass
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable.
from mezzanine.utils.conf import set_dynamic_settings
set_dynamic_settings(globals())
# Activate django-dbindexer if available
try:
import dbindexer
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer',
'NAME': 'mezzaninedb',
'TARGET': 'native'}
INSTALLED_APPS += ('dbindexer',)
except ImportError:
pass
|
murphyke/avocado | refs/heads/master | avocado/south_migrations/0018_auto__del_field_datafield_data_modified__add_field_datafield_data_vers.py | 3 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'DataField.data_modified'
db.delete_column('avocado_datafield', 'data_modified')
# Adding field 'DataField.data_version'
db.add_column('avocado_datafield', 'data_version',
self.gf('django.db.models.fields.IntegerField')(default=1),
keep_default=False)
def backwards(self, orm):
# Adding field 'DataField.data_modified'
db.add_column('avocado_datafield', 'data_modified',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
# Deleting field 'DataField.data_version'
db.delete_column('avocado_datafield', 'data_version')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'avocado.datacategory': {
'Meta': {'ordering': "('-parent__id', 'order', 'name')", 'object_name': 'DataCategory'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'_order'", 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['avocado.DataCategory']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'avocado.dataconcept': {
'Meta': {'ordering': "('order',)", 'object_name': 'DataConcept'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['avocado.DataCategory']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'concepts'", 'symmetrical': 'False', 'through': "orm['avocado.DataConceptField']", 'to': "orm['avocado.DataField']"}),
'formatter_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'concepts+'", 'null': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'_order'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'queryable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'concepts+'", 'blank': 'True', 'to': "orm['sites.Site']"}),
'sortable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'avocado.dataconceptfield': {
'Meta': {'ordering': "('order',)", 'object_name': 'DataConceptField'},
'concept': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'concept_fields'", 'to': "orm['avocado.DataConcept']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'field': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'concept_fields'", 'to': "orm['avocado.DataField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_plural': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'_order'", 'blank': 'True'})
},
'avocado.datacontext': {
'Meta': {'object_name': 'DataContext'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'_count'"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('jsonfield.fields.JSONField', [], {'default': '{}', 'null': 'True', 'blank': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datacontext+'", 'null': 'True', 'to': "orm['auth.User']"})
},
'avocado.datafield': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_name', 'model_name', 'field_name'),)", 'object_name': 'DataField'},
'app_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['avocado.DataCategory']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data_version': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'enumerable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'fields+'", 'null': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'_order'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'fields+'", 'blank': 'True', 'to': "orm['sites.Site']"}),
'translator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'unit_plural': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
},
'avocado.dataquery': {
'Meta': {'object_name': 'DataQuery'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'context_json': ('jsonfield.fields.JSONField', [], {'default': '{}', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'dataquery+'", 'null': 'True', 'to': "orm['auth.User']"}),
'view_json': ('jsonfield.fields.JSONField', [], {'default': '{}', 'null': 'True', 'blank': 'True'})
},
'avocado.dataview': {
'Meta': {'object_name': 'DataView'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'_count'"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('jsonfield.fields.JSONField', [], {'default': '{}', 'null': 'True', 'blank': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'dataview+'", 'null': 'True', 'to': "orm['auth.User']"})
},
'avocado.log': {
'Meta': {'object_name': 'Log'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['avocado'] |
bwrsandman/OpenUpgrade | refs/heads/8.0 | addons/claim_from_delivery/__openerp__.py | 261 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Claim on Deliveries',
'version' : '1.0',
'author' : 'OpenERP SA',
'category' : 'Warehouse Management',
'depends' : ['base', 'crm_claim', 'stock'],
'demo' : [],
'description': """
Create a claim from a delivery order.
=====================================
Adds a Claim link to the delivery order.
""",
'data' : [
'claim_delivery_view.xml',
'claim_delivery_data.xml',],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
avichalp/django-push-notifications | refs/heads/master | push_notifications/settings.py | 24 | from django.conf import settings
PUSH_NOTIFICATIONS_SETTINGS = getattr(settings, "PUSH_NOTIFICATIONS_SETTINGS", {})
# GCM
PUSH_NOTIFICATIONS_SETTINGS.setdefault("GCM_POST_URL", "https://android.googleapis.com/gcm/send")
PUSH_NOTIFICATIONS_SETTINGS.setdefault("GCM_MAX_RECIPIENTS", 1000)
# APNS
PUSH_NOTIFICATIONS_SETTINGS.setdefault("APNS_PORT", 2195)
PUSH_NOTIFICATIONS_SETTINGS.setdefault("APNS_FEEDBACK_PORT", 2196)
PUSH_NOTIFICATIONS_SETTINGS.setdefault("APNS_ERROR_TIMEOUT", None)
PUSH_NOTIFICATIONS_SETTINGS.setdefault("APNS_MAX_NOTIFICATION_SIZE", 2048)
if settings.DEBUG:
PUSH_NOTIFICATIONS_SETTINGS.setdefault("APNS_HOST", "gateway.sandbox.push.apple.com")
PUSH_NOTIFICATIONS_SETTINGS.setdefault("APNS_FEEDBACK_HOST", "feedback.sandbox.push.apple.com")
else:
PUSH_NOTIFICATIONS_SETTINGS.setdefault("APNS_HOST", "gateway.push.apple.com")
PUSH_NOTIFICATIONS_SETTINGS.setdefault("APNS_FEEDBACK_HOST", "feedback.push.apple.com")
|
Glottotopia/aagd | refs/heads/master | moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/script/maint/cleanpage.py | 2 | # -*- coding: iso-8859-1 -*-
"""
MoinMoin - display unused or trash page directories in data/pages
@copyright: 2005-2006 MoinMoin:ThomasWaldmann
@license: GNU GPL, see COPYING for details.
"""
import os
from MoinMoin.script import MoinScript
class PluginScript(MoinScript):
"""\
Purpose:
========
This tool outputs a shell script which upon execution will remove unused or
trashed pages from the wiki.
Detailed Instructions:
======================
General syntax: moin [options] maint cleanpage [cleanpage-options]
[options] usually should be:
--config-dir=/path/to/my/cfg/ --wiki-url=http://wiki.example.org/
[cleanpage-options] see below:
0. Verify the outputted shell script before running it.
1. This script takes no command line arguments.
"""
def __init__(self, argv, def_values):
MoinScript.__init__(self, argv, def_values)
def qualify(self, p):
""" look at page directory p and return its state """
dir = os.listdir(p)
if not dir:
return 'empty'
# check if we have something of potential value
revs = []
if 'revisions' in dir:
revs = os.listdir(os.path.join(p, 'revisions'))
atts = []
if 'attachments' in dir:
atts = os.listdir(os.path.join(p, 'attachments'))
if not revs and not atts:
return 'trash'
if 'current-locked' in dir:
return 'current-locked'
elif 'current' in dir:
try:
current = open(os.path.join(p, 'current')).read().strip()
int(current)
except:
return 'current damaged'
if current not in revs:
return 'deleted'
else:
return 'no current'
return 'ok'
def mainloop(self):
self.init_request()
base = self.request.cfg.data_dir
pagesdir = os.path.join(base, 'pages')
for p in os.listdir(pagesdir):
pagedir = os.path.join(pagesdir, p)
status = self.qualify(pagedir)
if status in ['trash', 'empty', ]:
print "mv '%s' trash # %s" % (pagedir, status)
elif status in ['deleted', ]:
print "mv '%s' deleted # %s" % (pagedir, status)
else:
print "# %s: '%s'" % (status, pagedir)
|
kingvuplus/nn-gui | refs/heads/master | lib/python/Components/Renderer/Picon.py | 1 | ##
## Picon renderer by Gruffy .. some speedups by Ghost
##
from Components.config import config
from Renderer import Renderer
from enigma import ePixmap, eEnv, iServiceInformation, iPlayableService, iPlayableServicePtr
from Tools.Directories import fileExists, SCOPE_SKIN_IMAGE, SCOPE_CURRENT_SKIN, resolveFilename
class Picon(Renderer):
def __init__(self):
Renderer.__init__(self)
self.path = "picon"
self.nameCache = { }
self.pngname = ""
def getServiceInfoValue(self, info, what, ref=None):
v = ref and info.getInfo(ref, what) or info.getInfo(what)
if v != iServiceInformation.resIsString:
return "N/A"
return ref and info.getInfoString(ref, what) or info.getInfoString(what)
def applySkin(self, desktop, parent):
attribs = [ ]
for (attrib, value) in self.skinAttributes:
if attrib == "path":
self.path = value
else:
attribs.append((attrib,value))
self.skinAttributes = attribs
return Renderer.applySkin(self, desktop, parent)
GUI_WIDGET = ePixmap
def changed(self, what):
if self.instance:
pngname = ""
if what[0] != self.CHANGED_CLEAR:
service = self.source.service
if isinstance(service, iPlayableServicePtr):
info = service and service.info()
ref = None
else: # reference
info = service and self.source.info
ref = service
if info is None:
return ""
if self.path == "picon":
piconType = config.nemesis.picontype.value
else:
piconType = config.nemesis.piconlcdtype.value
if piconType == "Reference":
sname = self.getServiceInfoValue(info, iServiceInformation.sServiceref, ref)
# strip all after last :
pos = sname.rfind(':')
if pos != -1:
sname = sname[:pos].rstrip(':').replace(':','_')
else:
name = ref and info.getName(ref)
if name is None:
name = info.getName()
sname = name.replace('\xc2\x86', '').replace('\xc2\x87', '')
pngname = self.nameCache.get(sname, "")
if pngname == "":
pngname = self.findPicon(sname)
if pngname != "":
self.nameCache[sname] = pngname
if pngname == "": # no picon for service found
pngname = self.nameCache.get("default", "")
if pngname == "": # no default yet in cache..
pngname = self.findPicon("picon_default")
if pngname == "":
tmp = resolveFilename(SCOPE_CURRENT_SKIN, "picon_default.png")
if fileExists(tmp):
pngname = tmp
else:
pngname = resolveFilename(SCOPE_SKIN_IMAGE, "skin_default/picon_default.png")
self.nameCache["default"] = pngname
if self.pngname != pngname:
self.instance.setPixmapFromFile(pngname)
self.pngname = pngname
def findPicon(self, serviceName):
if config.nemesis.usepiconinhdd.value:
searchPaths = ('/media/hdd/%s/', eEnv.resolve('${datadir}/enigma2/%s/'),'/media/cf/%s/','/media/usb/%s/')
else:
searchPaths = (eEnv.resolve('${datadir}/enigma2/%s/'),'/media/usb/%s/','/media/cf/%s/')
for path in searchPaths:
pngname = (path % self.path) + serviceName + ".png"
if fileExists(pngname):
return pngname
return ""
|
bliti/django-nonrel-1.5 | refs/heads/nonrel-1.5 | django/utils/ipv6.py | 113 | # This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. http://code.google.com/p/ipaddr-py/
# Licensed under the Apache License, Version 2.0 (the "License").
from django.core.exceptions import ValidationError
from django.utils.six.moves import xrange
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message="This is not a valid IPv6 address"):
"""
Cleans a IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continious zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: A error message for in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message)
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in a expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
hextets = ip_str.split(':')
return hextets[-1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for _ in xrange(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if any(len(x) < 4 for x in ip_str.split(':')):
return True
return False
|
boberfly/gaffer | refs/heads/master | python/GafferUI/ViewUI.py | 7 | ##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
Gaffer.Metadata.registerNode(
GafferUI.View,
"nodeToolbar:top:type", "GafferUI.StandardNodeToolbar.top",
plugs = {
"*" : [
"toolbarLayout:section", "Top",
],
"in" : [
"plugValueWidget:type", "",
],
"editScope" : [
# Most Views don't yet have any functionality that
# uses EditScopes, so we'll opt in to showing the
# widget on specific subclasses.
"plugValueWidget:type", "",
],
"user" : [
"plugValueWidget:type", "",
],
}
)
|
emonty/deb-vhd-util | refs/heads/master | tools/python/xen/xm/migrate.py | 43 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <[email protected]>
# Copyright (c) 2005 XenSource Ltd.
#============================================================================
"""Domain migration.
"""
import sys
from xen.xm.opts import *
from main import server, serverType, get_single_vm, SERVER_XEN_API
gopts = Opts(use="""[options] DOM HOST
Migrate domain DOM to host HOST.
Xend must be running on the local host and on HOST.
""")
gopts.opt('help', short='h',
fn=set_true, default=0,
use="Print this help.")
gopts.opt('live', short='l',
fn=set_true, default=0,
use="Use live migration.")
gopts.opt('port', short='p', val='portnum',
fn=set_int, default=0,
use="Use specified port for migration.")
gopts.opt('node', short='n', val='nodenum',
fn=set_int, default=-1,
use="Use specified NUMA node on target.")
gopts.opt('ssl', short='s',
fn=set_true, default=None,
use="Use ssl connection for migration.")
gopts.opt('change_home_server', short='c',
fn=set_true, default=0,
use="Change home server for managed domains.")
def help():
return str(gopts)
def main(argv):
opts = gopts
opts.reset()
args = opts.parse(argv)
if len(args) != 2:
raise OptionError('Invalid number of arguments')
dom = args[0]
dst = args[1]
if serverType == SERVER_XEN_API:
vm_ref = get_single_vm(dom)
other_config = {
"port": opts.vals.port,
"node": opts.vals.node,
"ssl": opts.vals.ssl,
"change_home_server": opts.vals.change_home_server
}
server.xenapi.VM.migrate(vm_ref, dst, bool(opts.vals.live),
other_config)
else:
server.xend.domain.migrate(dom, dst, opts.vals.live,
opts.vals.port,
opts.vals.node,
opts.vals.ssl,
opts.vals.change_home_server)
|
lamkeewei/battleships | refs/heads/master | server/controllers/view_helper.py | 8 | ### Add the Jinja2 templating language ###
# Jinja2 is included in app engine SDK so doesn't need to be included here
import jinja2
# configure the Jinja2 templating library to read templates under server/views
JINJA_ENV = jinja2.Environment(
# path is relative to top level project
loader=jinja2.FileSystemLoader('server/views'),
extensions=['jinja2.ext.autoescape'])
### Add custom jinja filters below ###
def please_format(value):
"""Prepend 'please do' to value. """
return u'please give me %s' % value
JINJA_ENV.filters['sayplease'] = please_format
|
goodwillcoding/RIDE | refs/heads/master | rtest/__init__.py | 12133432 | |
atul-bhouraskar/django | refs/heads/master | tests/gis_tests/relatedapp/__init__.py | 12133432 | |
mbaijal/incubator-mxnet | refs/heads/master | example/ssd/__init__.py | 12133432 | |
tessercat/ddj | refs/heads/master | controllers/about.py | 1 | def index():
""" Return a dict for the about view. """
def about_page():
import os
from gluon.contrib.markdown import markdown2
md_dir = '/opt/ddj/book/markdown'
page = {}
with open(os.path.join(md_dir, 'about.md')) as fd:
page['about'] = markdown2.markdown(fd.read())
with open(os.path.join(md_dir, 'about-me.md')) as fd:
page['me'] = markdown2.markdown(fd.read())
with open(os.path.join(md_dir, 'app-resources.md')) as fd:
page['resources'] = markdown2.markdown(fd.read())
with open(os.path.join(md_dir, 'copyright.md')) as fd:
page['copyright'] = markdown2.markdown(fd.read())
with open(os.path.join(md_dir, 'references.md')) as fd:
page['references'] = markdown2.markdown(fd.read())
with open(os.path.join(md_dir, 'contact.md')) as fd:
page['contact'] = markdown2.markdown(fd.read())
return page
response.title = 'About Daoistic'
return cache.ram('about', lambda: about_page())
|
guibson91/curso-ionic2 | refs/heads/master | node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py | 2736 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
|
bbc/ebu-tt-live-toolkit | refs/heads/master | ebu_tt_live/twisted/base.py | 2 |
from zope.interface import Interface
class IBroadcaster(Interface):
def broadcast(self, channel, msg):
"""
Broadcast message to all connected clients.
:param channel:
:param msg:
:return:
"""
raise NotImplementedError()
def register(self, client):
"""
Register new client on connection opening.
:param client:
:return:
"""
raise NotImplementedError()
def unregister(self, client):
"""
Remove client from clients list on connection loss.
:param client:
:return:
"""
raise NotImplementedError()
|
GbalsaC/bitnamiP | refs/heads/master | venv/lib/python2.7/site-packages/django_ses/tests/utils.py | 7 | import sys
def unload_django_ses():
del sys.modules['django_ses.settings']
del sys.modules['django_ses']
|
vicnet/weboob | refs/heads/master | modules/vicseccard/browser.py | 2 | # -*- coding: utf-8 -*-
# Copyright(C) 2015 Oleg Plakhotniuk
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from requests.exceptions import ConnectionError, Timeout
from weboob.browser import URL, LoginBrowser, need_login
from weboob.browser.exceptions import ServerError
from weboob.browser.pages import HTMLPage
from weboob.capabilities.bank import Account, AccountNotFound, Transaction
from weboob.exceptions import BrowserIncorrectPassword
from weboob.tools.capabilities.bank.transactions import AmericanTransaction as AmTr
from weboob.tools.compat import unicode
__all__ = ['VicSecCard']
class SomePage(HTMLPage):
@property
def logged(self):
return bool(self.doc.xpath(u'//span[text()="Sign Out"]'))
class LoginPage(SomePage):
def login(self, username, password):
form = self.get_form(name='frmLogin')
form['username_input'] = username
form['userName'] = username
form['password_input'] = password
form['hiddenPassword'] = password
form['btnLogin'] = 'btnLogin'
form.submit()
class HomePage(SomePage):
def account(self):
id_ = self.doc.xpath(u'//strong[contains(text(),'
u'"Credit Card account ending in")]/text()')[0].strip()[-4:]
balance = self.doc.xpath(
u'//span[@class="description" and text()="Current Balance"]/../span[@class="total"]/text()')[0].strip()
cardlimit = self.doc.xpath(u'//span[contains(text(),"Credit limit")]'
u'/text()')[0].split()[-1]
paymin = self.doc.xpath(u'//section[@id=" account_summary"]'
u'//strong[text()="Minimum Payment Due"]/../../span[2]/text()'
)[0].strip()
a = Account()
a.id = id_
a.label = u'ACCOUNT ENDING IN %s' % id_
a.currency = Account.get_currency(balance)
a.balance = -AmTr.decimal_amount(balance)
a.type = Account.TYPE_CARD
a.cardlimit = AmTr.decimal_amount(cardlimit)
a.paymin = AmTr.decimal_amount(paymin)
# TODO: Add paydate.
# Oleg: I don't have an account with scheduled payment.
# Need to wait for a while...
return a
class RecentPage(SomePage):
def iter_transactions(self):
for li in self.doc.xpath('//section[@class="transactions"]//div/li'):
date = li.xpath('p[@data-type="date"]//text()')[0].strip()
label = li.xpath('p[@data-type="description"]//text()')[0].strip()
amount = li.xpath('p[@data-type="amount"]//text()')[0].strip()
t = Transaction()
t.date = datetime.strptime(date, '%m/%d/%Y')
t.rdate = datetime.strptime(date, '%m/%d/%Y')
t.type = Transaction.TYPE_UNKNOWN
t.raw = unicode(label)
t.label = unicode(label)
t.amount = -AmTr.decimal_amount(amount)
yield t
class VicSecCard(LoginBrowser):
BASEURL = 'https://c.comenity.net'
MAX_RETRIES = 10
TIMEOUT = 30.0
login = URL(r'/victoriassecret/$', LoginPage)
home = URL(r'/victoriassecret/secure/SecureHome.xhtml', HomePage)
recent = URL(r'/victoriassecret/secure/accountactivity/Transactions.xhtml',
RecentPage)
unknown = URL('.*', SomePage)
def get_account(self, id_):
a = next(self.iter_accounts())
if (a.id != id_):
raise AccountNotFound()
return a
@need_login
def iter_accounts(self):
yield self.home.stay_or_go().account()
@need_login
def iter_history(self, account):
for trans in self.recent.stay_or_go().iter_transactions():
yield trans
def do_login(self):
self.session.cookies.clear()
self.login.go().login(self.username, self.password)
if not self.page.logged:
raise BrowserIncorrectPassword()
def location(self, *args, **kwargs):
for i in range(self.MAX_RETRIES):
try:
return super(VicSecCard, self).location(*args, **kwargs)
except (ServerError, Timeout, ConnectionError) as e:
last_error = e
raise last_error
|
Darkmoth/python-django-4 | refs/heads/master | Thing/env/Lib/site-packages/django/contrib/gis/gdal/feature.py | 439 | from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException, OGRIndexError
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"""
This class that wraps an OGR Feature, needs to be instantiated
from a Layer object.
"""
def __init__(self, feat, layer):
"""
Initializes Feature from a pointer and its Layer object.
"""
if not feat:
raise GDALException('Cannot create OGR Feature, invalid pointer given.')
self.ptr = feat
self._layer = layer
def __del__(self):
"Releases a reference to this object."
if self._ptr and capi:
capi.destroy_feature(self._ptr)
def __getitem__(self, index):
"""
Gets the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, six.string_types):
i = self.index(index)
else:
if index < 0 or index > self.num_fields:
raise OGRIndexError('index out of range')
i = index
return Field(self, i)
def __iter__(self):
"Iterates over each field in the Feature."
for i in range(self.num_fields):
yield self[i]
def __len__(self):
"Returns the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name)
def __eq__(self, other):
"Does equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
# #### Feature Properties ####
@property
def encoding(self):
return self._layer._ds.encoding
@property
def fid(self):
"Returns the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Returns the name of the layer for the feature."
name = capi.get_feat_name(self._layer._ldefn)
return force_text(name, self.encoding, strings_only=True)
@property
def num_fields(self):
"Returns the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Returns a list of fields in the Feature."
return [capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i))
for i in range(self.num_fields)]
@property
def geom(self):
"Returns the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Returns the OGR Geometry Type for this Feture."
return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn))
# #### Feature Methods ####
def get(self, field):
"""
Returns the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, 'name', field)
return self[field_name].value
def index(self, field_name):
"Returns the index of the given field name."
i = capi.get_field_index(self.ptr, force_bytes(field_name))
if i < 0:
raise OGRIndexError('invalid OFT field name given: "%s"' % field_name)
return i
|
rmanoni/mi-instrument | refs/heads/master | mi/instrument/mclane/driver.py | 1 | """
@package mi.instrument.mclane.driver
@file marine-integrations/mi/instrument/mclane/driver.py
@author Dan Mergens
@brief Driver base class for McLane instruments
Release notes:
initial version
"""
import datetime
__author__ = 'Dan Mergens'
__license__ = 'Apache 2.0'
import re
import time
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.util import dict_equal
from mi.core.exceptions import SampleException, \
InstrumentParameterException, \
InstrumentProtocolException, \
InstrumentTimeoutException
from mi.core.instrument.instrument_protocol import \
CommandResponseInstrumentProtocol, \
RE_PATTERN, \
DEFAULT_CMD_TIMEOUT
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import \
DriverEvent, \
DriverAsyncEvent, \
DriverProtocolState, \
DriverParameter, \
ResourceAgentState
from mi.core.instrument.data_particle import \
DataParticle, \
DataParticleKey, \
CommonDataParticleType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict, \
ParameterDictType, \
ParameterDictVisibility
NEWLINE = '\r\n'
CONTROL_C = '\x03'
NUM_PORTS = 24 # number of collection bags
# default timeout.
INTER_CHARACTER_DELAY = .2 # works
# INTER_CHARACTER_DELAY = .02 - too fast
# INTER_CHARACTER_DELAY = .04
PUMP_RATE_ERROR = 1.15 # PPS is off in it's flow rate measurement by 14.5% - TODO - check RAS data
####
# Driver Constant Definitions
####
class ScheduledJob(BaseEnum):
CLOCK_SYNC = 'clock_sync'
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
FLUSH = 'DRIVER_STATE_FLUSH'
FILL = 'DRIVER_STATE_FILL'
CLEAR = 'DRIVER_STATE_CLEAR'
RECOVERY = 'DRIVER_STATE_RECOVERY' # for recovery after pump failure
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
DISCOVER = DriverEvent.DISCOVER
INIT_PARAMS = DriverEvent.INIT_PARAMS
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
GET = DriverEvent.GET
SET = DriverEvent.SET
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
# ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
CLOCK_SYNC = DriverEvent.CLOCK_SYNC
FLUSH = 'DRIVER_EVENT_FLUSH'
FILL = 'DRIVER_EVENT_FILL'
CLEAR = 'DRIVER_EVENT_CLEAR'
PUMP_STATUS = 'DRIVER_EVENT_PUMP_STATUS'
INSTRUMENT_FAILURE = 'DRIVER_EVENT_INSTRUMENT_FAILURE'
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
CLOCK_SYNC = ProtocolEvent.CLOCK_SYNC
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
# ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
CLEAR = ProtocolEvent.CLEAR
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
class Parameter(DriverParameter):
"""
Device specific parameters.
"""
FLUSH_VOLUME = "flush_volume"
FLUSH_FLOWRATE = "flush_flowrate"
FLUSH_MINFLOW = "flush_minflow"
FILL_VOLUME = "fill_volume"
FILL_FLOWRATE = "fill_flowrate"
FILL_MINFLOW = "fill_minflow"
CLEAR_VOLUME = "clear_volume"
CLEAR_FLOWRATE = "clear_flowrate"
CLEAR_MINFLOW = "clear_minflow"
class McLaneCommand(BaseEnum):
"""
Instrument command strings - case insensitive
"""
GO = NEWLINE
CONTROL_C = CONTROL_C
CLOCK = 'clock' # set the clock date and time
BATTERY = 'battery' # display battery voltage
HOME = 'home' # set the port to the home port (0)
FORWARD = 'forward' # start forward pump operation < volume flowrate minflow [time] >
REVERSE = 'reverse' # reverse pump operation < volume flowrate minflow [time] >
PORT = 'port' # display current port or set valve to supplied position
CAPACITY = 'capacity' # pump max flow rate mL/min
COPYRIGHT = 'copyright' # display version, release and copyright notice
class Prompt(BaseEnum):
"""
Device i/o prompts.
"""
CR_NL = '\r\n'
PERIOD = '.'
SUSPENDED = 'Suspended ... '
ENTER_CTRL_C = 'Enter ^C now to wake up ...'
COMMAND_INPUT = '>'
UNRECOGNIZED_COMMAND = '] unrecognized command'
class McLaneResponse(BaseEnum):
"""
Expected device response strings
"""
HOME = re.compile(r'Port: 00')
PORT = re.compile(r'Port: (\d+)') # e.g. Port: 01
# e.g. 03/25/14 20:24:02 PPS ML13003-01>
READY = re.compile(r'(\d+/\d+/\d+\s+\d+:\d+:\d+\s+)(RAS|PPS)\s+(.*)>')
# Result 00 | 75 100 25 4 | 77.2 98.5 99.1 47 031514 001813 | 29.8 1
# Result 00 | 10 100 75 60 | 10.0 85.5 100.0 7 032814 193855 | 30.0 1
PUMP = re.compile(r'(Status|Result).*(\d+)' + NEWLINE)
# Battery: 30.1V [Alkaline, 18V minimum]
BATTERY = re.compile(r'Battery:\s+(\d*\.\d+)V\s+\[.*\]') # battery voltage
# Capacity: Maxon 250mL
CAPACITY = re.compile(r'Capacity:\s(Maxon|Pittman)\s+(\d+)mL') # pump make and capacity
# McLane Research Laboratories, Inc.
# CF2 Adaptive Water Transfer System
# Version 2.02 of Jun 7 2013 18:17
# Configured for: Maxon 250ml pump
VERSION = re.compile(
r'McLane .*$' + NEWLINE +
r'CF2 .*$' + NEWLINE +
r'Version\s+(\S+)\s+of\s+(.*)$' + NEWLINE + # version and release date
r'.*$'
)
class Timeout(BaseEnum):
"""
Timeouts for commands # TODO - calculate based on flow rate & volume
"""
HOME = 30
PORT = 10 + 2 # average time to advance to next port is 10 seconds, any more indicates skipping of a port
FLUSH = 103 + 5
FILL = 2728 + 30
CLEAR = 68 + 5
CLOCK = INTER_CHARACTER_DELAY * 30 + 1
#####
# Codes for pump termination
TerminationCodes = {
0: 'Pumping in progress',
1: 'Volume reached',
2: 'Time limit reached',
3: 'Min flow reached',
4: 'Low battery',
5: 'Stopped by user',
6: 'Pump would not start',
7: 'Sudden flow obstruction',
8: 'Sudden obstruction with slip',
9: 'Sudden pressure release'
}
class TerminationCodeEnum(BaseEnum):
PUMP_IN_PROGRESS = 0
VOLUME_REACHED = 1
TIME_LIMIT_REACHED = 2
MIN_FLOW_REACHED = 3
LOW_BATTERY = 4
STOPPED_BY_USER = 5
PUMP_WOULD_NOT_START = 6
SUDDEN_FLOW_OBSTRUCTION = 7
SUDDEN_OBSTRUCTION_WITH_SLIP = 8
SUDDEN_PRESSURE_RELEASE = 9
class McLaneDataParticleType(BaseEnum):
"""
Data particle types produced by this driver
"""
# TODO - define which commands will be published to user
RAW = CommonDataParticleType.RAW
MCLANE_PARSED = 'mclane_parsed'
PUMP_STATUS = 'pump_status'
VOLTAGE_STATUS = 'battery'
VERSION_INFO = 'version'
###############################################################################
# Data Particles
###############################################################################
class McLaneSampleDataParticleKey(BaseEnum):
PORT = 'port_number'
VOLUME_COMMANDED = 'commanded_volume'
FLOW_RATE_COMMANDED = 'commanded_flowrate'
MIN_FLOW_COMMANDED = 'commanded_min_flowrate'
TIME_LIMIT = 'commanded_timelimit'
VOLUME_ACTUAL = 'cumulative_volume'
FLOW_RATE_ACTUAL = 'flowrate'
MIN_FLOW_ACTUAL = 'minimum_flowrate'
TIMER = 'elapsed_time'
TIME = 'date_time_string'
BATTERY = 'battery_voltage'
CODE = 'sampling_status_code'
# data particle for forward, reverse, and result commands
# e.g.:
# --- command --- -------- result -------------
# Result port | vol flow minf tlim | vol flow minf secs date-time | batt code
# Status 00 | 75 100 25 4 | 1.5 90.7 90.7* 1 031514 001727 | 29.9 0
class McLaneSampleDataParticle(DataParticle):
@staticmethod
def regex():
"""
get the compiled regex pattern
@return: compiled re
"""
exp = str(r'(?P<status>Status|Result)' + # status is incremental, result is the last return from the command
'\s*(?P<port>\d+)\s*\|' + # PORT
'\s*(?P<commanded_volume>\d+)' + # VOLUME_COMMANDED
'\s*(?P<commanded_flow_rate>\d+)' + # FLOW RATE COMMANDED
'\s*(?P<commanded_min_flowrate>\d+)' + # MIN RATE COMMANDED
'\s*(?P<time_limit>\d+)\s*\|' + # TLIM - TODO
'\s*(?P<volume>\d*\.?\d+)' + # VOLUME (actual)
'\s*(?P<flow_rate>\d*\.?\d+)' + # FLOW RATE (actual)
'\s*(?P<min_flow>\d*\.?\d+)' + # MIN RATE (actual)
'\*?' +
'\s*(?P<timer>\d+)' + # elapsed time (seconds)
'\s*(?P<time>\d+\s*\d+)\s*\|' + # MMDDYY HHMMSS (current date and time)
'\s*(?P<voltage>\d*\.?\d+)' + # voltage (battery)
'\s*(?P<code>\d+)' + # code enumeration
'\s*' + NEWLINE)
return exp
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
return re.compile(McLaneSampleDataParticle.regex())
def _build_parsed_values(self):
match = McLaneSampleDataParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("RASFL_SampleDataParticle: No regex match of parsed sample data: [%s]", self.raw_data)
result = [
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.PORT,
DataParticleKey.VALUE: int(match.group('port'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.VOLUME_COMMANDED,
DataParticleKey.VALUE: int(match.group('commanded_volume'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.FLOW_RATE_COMMANDED,
DataParticleKey.VALUE: int(match.group('commanded_flow_rate'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.MIN_FLOW_COMMANDED,
DataParticleKey.VALUE: int(match.group('commanded_min_flowrate'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.TIME_LIMIT,
DataParticleKey.VALUE: int(match.group('time_limit'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.VOLUME_ACTUAL,
DataParticleKey.VALUE: float(match.group('volume'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.FLOW_RATE_ACTUAL,
DataParticleKey.VALUE: float(match.group('flow_rate'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.MIN_FLOW_ACTUAL,
DataParticleKey.VALUE: float(match.group('min_flow'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.TIMER,
DataParticleKey.VALUE: int(match.group('timer'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.TIME,
DataParticleKey.VALUE: str(match.group('time'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.BATTERY,
DataParticleKey.VALUE: float(match.group('voltage'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.CODE,
DataParticleKey.VALUE: int(match.group('code'))}]
return result
###########################################################################
# Protocol
###########################################################################
# noinspection PyMethodMayBeStatic,PyUnusedLocal
class McLaneProtocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
# __metaclass__ = get_logging_metaclass(log_level='debug')
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent, ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
handlers = {
ProtocolState.UNKNOWN: [
(ProtocolEvent.ENTER, self._handler_unknown_enter),
(ProtocolEvent.DISCOVER, self._handler_unknown_discover),
],
ProtocolState.COMMAND: [
(ProtocolEvent.ENTER, self._handler_command_enter),
(ProtocolEvent.INIT_PARAMS, self._handler_command_init_params),
(ProtocolEvent.START_DIRECT, self._handler_command_start_direct),
(ProtocolEvent.CLOCK_SYNC, self._handler_sync_clock),
(ProtocolEvent.ACQUIRE_SAMPLE, self._handler_command_acquire),
# (ProtocolEvent.ACQUIRE_STATUS, self._handler_command_status),
(ProtocolEvent.CLEAR, self._handler_command_clear),
(ProtocolEvent.GET, self._handler_get),
(ProtocolEvent.SET, self._handler_command_set),
],
ProtocolState.FLUSH: [
(ProtocolEvent.ENTER, self._handler_flush_enter),
(ProtocolEvent.FLUSH, self._handler_flush_flush),
(ProtocolEvent.PUMP_STATUS, self._handler_flush_pump_status),
(ProtocolEvent.INSTRUMENT_FAILURE, self._handler_all_failure),
],
ProtocolState.FILL: [
(ProtocolEvent.ENTER, self._handler_fill_enter),
(ProtocolEvent.FILL, self._handler_fill_fill),
(ProtocolEvent.PUMP_STATUS, self._handler_fill_pump_status),
(ProtocolEvent.INSTRUMENT_FAILURE, self._handler_all_failure),
],
ProtocolState.CLEAR: [
(ProtocolEvent.ENTER, self._handler_clear_enter),
(ProtocolEvent.CLEAR, self._handler_clear_clear),
(ProtocolEvent.PUMP_STATUS, self._handler_clear_pump_status),
(ProtocolEvent.INSTRUMENT_FAILURE, self._handler_all_failure),
],
ProtocolState.RECOVERY: [
(ProtocolEvent.ENTER, self._handler_recovery_enter),
],
ProtocolState.DIRECT_ACCESS: [
(ProtocolEvent.ENTER, self._handler_direct_access_enter),
(ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct),
(ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct),
],
}
for state in handlers:
for event, handler in handlers[state]:
self._protocol_fsm.add_handler(state, event, handler)
# Add build handlers for device commands - we are only using simple commands
for cmd in McLaneCommand.list():
self._add_build_handler(cmd, self._build_command)
# Add response handlers for device commands.
# self._add_response_handler(McLaneCommand.BATTERY, self._parse_battery_response)
# self._add_response_handler(McLaneCommand.CLOCK, self._parse_clock_response)
# self._add_response_handler(McLaneCommand.PORT, self._parse_port_response)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
self._chunker = StringChunker(McLaneProtocol.sieve_function)
self._add_scheduler_event(ScheduledJob.CLOCK_SYNC, ProtocolEvent.CLOCK_SYNC)
# Start state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
self._sent_cmds = None
# TODO - reset next_port on mechanical refresh of the PPS filters - how is the driver notified?
# TODO - need to persist state for next_port to save driver restart
self.next_port = 1 # next available port
self._second_attempt = False
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples and status
"""
matchers = []
return_list = []
matchers.append(McLaneSampleDataParticle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if Capability.has(x)]
########################################################################
# implement virtual methods from base class.
########################################################################
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters. If
startup is set to true that means we are setting startup values
and immutable parameters can be set. Otherwise only READ_WRITE
parameters can be set.
must be overloaded in derived classes
@param params dictionary containing parameter name and value pairs
@param startup flag - true indicates initializing, false otherwise
"""
params = args[0]
# check for attempt to set readonly parameters (read-only or immutable set outside startup)
self._verify_not_readonly(*args, **kwargs)
old_config = self._param_dict.get_config()
for (key, val) in params.iteritems():
log.debug("KEY = " + str(key) + " VALUE = " + str(val))
self._param_dict.set_value(key, val)
new_config = self._param_dict.get_config()
log.debug('new config: %s\nold config: %s', new_config, old_config)
# check for parameter change
if not dict_equal(old_config, new_config):
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def apply_startup_params(self):
"""
Apply startup parameters
"""
# fn = "apply_startup_params"
# config = self.get_startup_config()
# log.debug("%s: startup config = %s", fn, config)
#
# for param in Parameter.list():
# if param in config:
# self._param_dict.set_value(param, config[param])
#
# log.debug("%s: new parameters", fn)
# for x in config:
# log.debug(" parameter %s: %s", x, config[x])
if self.get_current_state() != DriverProtocolState.COMMAND:
raise InstrumentProtocolException('cannot set parameters outside command state')
self._set_params(self.get_startup_config(), True)
########################################################################
# Instrument commands.
########################################################################
def _do_cmd_resp(self, cmd, *args, **kwargs):
"""
Perform a command-response on the device. Overrides the base class so it will
return the regular expression groups without concatenating them into a string.
@param cmd The command to execute.
@param args positional arguments to pass to the build handler.
@param write_delay kwarg for the amount of delay in seconds to pause
between each character. If none supplied, the DEFAULT_WRITE_DELAY
value will be used.
@param timeout optional wakeup and command timeout via kwargs.
@param response_regex kwarg with a compiled regex for the response to
match. Groups that match will be returned as a tuple.
@retval response The parsed response result.
@raises InstrumentTimeoutException if the response did not occur in time.
@raises InstrumentProtocolException if command could not be built or if response
was not recognized.
"""
# Get timeout and initialize response.
timeout = kwargs.get('timeout', DEFAULT_CMD_TIMEOUT)
response_regex = kwargs.get('response_regex', None) # required argument
write_delay = INTER_CHARACTER_DELAY
retval = None
if not response_regex:
raise InstrumentProtocolException('missing required keyword argument "response_regex"')
if response_regex and not isinstance(response_regex, RE_PATTERN):
raise InstrumentProtocolException('Response regex is not a compiled pattern!')
# Get the build handler.
build_handler = self._build_handlers.get(cmd, None)
if not build_handler:
raise InstrumentProtocolException('Cannot build command: %s' % cmd)
cmd_line = build_handler(cmd, *args)
# Wakeup the device, pass up exception if timeout
prompt = self._wakeup(timeout)
# Clear line and prompt buffers for result.
self._linebuf = ''
self._promptbuf = ''
# Send command.
log.debug('_do_cmd_resp: %s, timeout=%s, write_delay=%s, response_regex=%s',
repr(cmd_line), timeout, write_delay, response_regex)
for char in cmd_line:
self._connection.send(char)
time.sleep(write_delay)
# Wait for the prompt, prepare result and return, timeout exception
return self._get_response(timeout, response_regex=response_regex)
def _do_cmd_home(self):
"""
Move valve to the home port
@retval True if successful, False if unable to return home
"""
func = '_do_cmd_home'
log.debug('--- djm --- command home')
port = int(self._do_cmd_resp(McLaneCommand.PORT, response_regex=McLaneResponse.PORT)[0])
log.debug('--- djm --- at port: %d', port)
if port != 0:
log.debug('--- djm --- going home')
self._do_cmd_resp(McLaneCommand.HOME, response_regex=McLaneResponse.HOME, timeout=Timeout.HOME)
port = int(self._do_cmd_resp(McLaneCommand.PORT, response_regex=McLaneResponse.PORT)[0])
if port != 0:
log.error('Unable to return to home port')
return False
return True
def _do_cmd_flush(self, *args, **kwargs):
"""
Flush the home port in preparation for collecting a sample. This clears the intake port so that
the sample taken will be new.
This only starts the flush. The remainder of the flush is monitored by got_chunk.
"""
flush_volume = self._param_dict.get(Parameter.FLUSH_VOLUME)
flush_flowrate = self._param_dict.get(Parameter.FLUSH_FLOWRATE)
flush_minflow = self._param_dict.get(Parameter.FLUSH_MINFLOW)
if not self._do_cmd_home():
self._async_raise_fsm_event(ProtocolEvent.INSTRUMENT_FAILURE)
log.debug('--- djm --- flushing home port, %d %d %d',
flush_volume, flush_flowrate, flush_flowrate)
self._do_cmd_no_resp(McLaneCommand.FORWARD, flush_volume, flush_flowrate, flush_minflow)
def _do_cmd_fill(self, *args, **kwargs):
"""
Fill the sample at the next available port
"""
log.debug('--- djm --- collecting sample in port %d', self.next_port)
fill_volume = self._param_dict.get(Parameter.FILL_VOLUME)
fill_flowrate = self._param_dict.get(Parameter.FILL_FLOWRATE)
fill_minflow = self._param_dict.get(Parameter.FILL_MINFLOW)
log.debug('--- djm --- collecting sample in port %d', self.next_port)
reply = self._do_cmd_resp(McLaneCommand.PORT, self.next_port, response_regex=McLaneResponse.PORT)
log.debug('--- djm --- port returned:\n%r', reply)
self.next_port += 1 # succeed or fail, we can't use this port again
# TODO - commit next_port to the agent for persistent data store
self._do_cmd_no_resp(McLaneCommand.FORWARD, fill_volume, fill_flowrate, fill_minflow)
def _do_cmd_clear(self, *args, **kwargs):
"""
Clear the home port
"""
self._do_cmd_home()
clear_volume = self._param_dict.get(Parameter.CLEAR_VOLUME)
clear_flowrate = self._param_dict.get(Parameter.CLEAR_FLOWRATE)
clear_minflow = self._param_dict.get(Parameter.CLEAR_MINFLOW)
log.debug('--- djm --- clearing home port, %d %d %d',
clear_volume, clear_flowrate, clear_minflow)
self._do_cmd_no_resp(McLaneCommand.REVERSE, clear_volume, clear_flowrate, clear_minflow)
########################################################################
# Generic handlers.
########################################################################
def _handler_pass(self, *args, **kwargs):
pass
def _handler_all_failure(self, *args, **kwargs):
log.error('Instrument failure detected. Entering recovery mode.')
return ProtocolState.RECOVERY, ResourceAgentState.BUSY
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
# TODO - read persistent data (next port)
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; can only be COMMAND (instrument has no AUTOSAMPLE mode).
@retval (next_state, result), (ProtocolState.COMMAND, None) if successful.
"""
# force to command mode, this instrument has no autosample mode
return ProtocolState.COMMAND, ResourceAgentState.IDLE
########################################################################
# Flush
########################################################################
def _handler_flush_enter(self, *args, **kwargs):
"""
Enter the flush state. Trigger FLUSH event.
"""
log.debug('--- djm --- entering FLUSH state')
self._second_attempt = False
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._async_raise_fsm_event(ProtocolEvent.FLUSH)
def _handler_flush_flush(self, *args, **kwargs):
"""
Begin flushing the home port. Subsequent flushing will be monitored and sent to the flush_pump_status
handler.
"""
log.debug('--- djm --- in FLUSH state')
next_state = ProtocolState.FILL
next_agent_state = ResourceAgentState.BUSY
# 2. Set to home port
# 3. flush intake (home port)
# 4. wait 30 seconds
# 1. Get next available port (if no available port, bail)
log.debug('--- djm --- Flushing home port')
self._do_cmd_flush()
return None, (ResourceAgentState.BUSY, None)
def _handler_flush_pump_status(self, *args, **kwargs):
"""
Manage pump status update during flush. Status updates indicate continued pumping, Result updates
indicate completion of command. Check the termination code for success.
@args match object containing the regular expression match of the status line.
"""
match = args[0]
pump_status = match.group('status')
code = int(match.group('code'))
next_state = None
next_agent_state = None
log.debug('--- djm --- received pump status: pump status: %s, code: %d', pump_status, code)
if pump_status == 'Result':
log.debug('--- djm --- flush completed - %s', TerminationCodes[code])
if code == TerminationCodeEnum.SUDDEN_FLOW_OBSTRUCTION:
log.info('Encountered obstruction during flush, attempting to clear')
self._async_raise_fsm_event(ProtocolEvent.CLEAR)
else:
next_state = ProtocolState.FILL
next_agent_state = ResourceAgentState.BUSY
# elif pump_status == 'Status':
return next_state, next_agent_state
def _handler_flush_clear(self, *args, **kwargs):
"""
Attempt to clear home port after stoppage has occurred during flush.
This is only performed once. On the second stoppage, the driver will enter recovery mode.
"""
log.debug('--- djm --- handling clear request during flush')
if self._second_attempt:
return ProtocolState.RECOVERY, ResourceAgentState.BUSY
self._second_attempt = True
self._do_cmd_clear()
return None, None
########################################################################
# Fill
########################################################################
def _handler_fill_enter(self, *args, **kwargs):
"""
Enter the fill state. Trigger FILL event.
"""
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._async_raise_fsm_event(ProtocolEvent.FILL)
def _handler_fill_fill(self, *args, **kwargs):
"""
Send the fill command and process the first response
"""
next_state = None
next_agent_state = None
result = None
log.debug('Entering PHIL PHIL')
# 5. switch to collection port (next available)
# 6. collect sample (4000 ml)
# 7. wait 2 minutes
if self.next_port > NUM_PORTS:
log.error('Unable to collect RAS sample - %d containers full', NUM_PORTS)
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
else:
self._do_cmd_fill()
return next_state, (next_agent_state, result)
def _handler_fill_pump_status(self, *args, **kwargs):
"""
Process pump status updates during filter collection.
"""
next_state = None
next_agent_state = None
match = args[0]
pump_status = match.group('status')
code = int(match.group('code'))
if pump_status == 'Result':
if code != TerminationCodeEnum.VOLUME_REACHED:
next_state = ProtocolState.RECOVERY
next_state = ProtocolState.CLEAR # all done
# if pump_status == 'Status':
# TODO - check for bag rupture (> 93% flow rate near end of sample collect- RAS only)
return next_state, next_agent_state
########################################################################
# Clear
########################################################################
def _handler_clear_enter(self, *args, **kwargs):
"""
Enter the clear state. Trigger the CLEAR event.
"""
self._second_attempt = False
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._async_raise_fsm_event(ProtocolEvent.CLEAR)
def _handler_clear_clear(self, *args, **kwargs):
"""
Send the clear command. If there is an obstruction trigger a FLUSH, otherwise place driver in RECOVERY mode.
"""
log.debug('--- djm --- clearing home port')
# 8. return to home port
# 9. reverse flush 75 ml to pump water from exhaust line through intake line
self._do_cmd_clear()
return None, None
def _handler_clear_pump_status(self, *args, **kwargs):
"""
Parse pump status during clear action.
"""
next_state = None
next_agent_state = None
match = args[0]
pump_status = match.group('status')
code = int(match.group('code'))
if pump_status == 'Result':
if code != TerminationCodeEnum.VOLUME_REACHED:
log.error('Encountered obstruction during clear. Attempting flush...')
self._async_raise_fsm_event(ProtocolEvent.FLUSH)
else:
log.debug('--- djm --- clear complete')
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
# if Status, nothing to do
return next_state, next_agent_state
def _handler_clear_flush(self, *args, **kwargs):
"""
Attempt to recover from failed attempt to clear by flushing home port. Only try once.
"""
log.info('Attempting to flush main port during clear')
if self._second_attempt:
return ProtocolState.RECOVERY, ResourceAgentState.BUSY
self._second_attempt = True
self._do_cmd_flush()
return None, None
########################################################################
# Command handlers.
# just implemented to make DA possible, instrument has no actual command mode
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
"""
# Command device to update parameters and send a config change event if needed.
self._update_params()
self._protocol_fsm.on_event(ProtocolEvent.INIT_PARAMS)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_init_params(self, *args, **kwargs):
"""
Setup initial parameters.
"""
self._init_params()
return None, None
def _handler_command_set(self, *args, **kwargs):
"""
Set instrument parameters
"""
log.debug('handler command set called')
startup = False
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('set command requires a parameter dictionary.')
try:
startup = args[1]
except IndexError:
pass
if not isinstance(params, dict):
raise InstrumentParameterException('set parameters is not a dictionary')
self._set_params(params, startup)
return None, None
# changed = False
# for key, value in params.items():
# log.info('Command:set - setting parameter %s to %s', key, value)
# if not Parameter.has(key):
# raise InstrumentProtocolException('Attempt to set undefined parameter: %s', key)
# old_value = self._param_dict.get(key)
# if old_value != value:
# changed = True
# self._param_dict.set_value(key, value)
#
# if changed:
# self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
#
# next_state = None
# result = None
# return next_state, result
def _handler_command_start_direct(self, *args, **kwargs):
"""
Start direct access.
"""
log.debug('--- djm --- entered _handler_command_start_direct with args: %s', args)
result = None
next_state = ProtocolState.DIRECT_ACCESS
next_agent_state = ResourceAgentState.DIRECT_ACCESS
return next_state, (next_agent_state, result)
########################################################################
# Recovery handlers.
########################################################################
# TODO - not sure how to determine how to exit from this state. Probably requires a driver reset.
def _handler_recovery_enter(self, *args, **kwargs):
"""
Error recovery mode. The instrument failed to respond to a command and now requires the user to perform
diagnostics and correct before proceeding.
"""
log.debug('--- djm --- entered recovery mode')
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_execute_direct(self, data):
self._do_cmd_direct(data)
return None, None
def _handler_direct_access_stop_direct(self, *args, **kwargs):
return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None)
########################################################################
# general handlers.
########################################################################
def get_timestamp_delayed(self, fmt, delay=0):
"""
Return a formatted date string of the current utc time,
but the string return is delayed until the next second
transition.
Formatting:
http://docs.python.org/library/time.html#time.strftime
@param fmt: strftime() format string
@return: formatted date string
@raise ValueError if format is None
"""
if not fmt:
raise ValueError
now = datetime.datetime.utcnow() + datetime.timedelta(seconds=delay)
time.sleep((1e6 - now.microsecond) / 1e6)
now = datetime.datetime.utcnow() + datetime.timedelta(seconds=delay)
return now.strftime(fmt)
def _handler_sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
cmd_len = len('clock 03/20/2014 17:14:55' + NEWLINE)
delay = cmd_len * INTER_CHARACTER_DELAY
time_format = "%m/%d/%Y %H:%M:%S"
str_val = self.get_timestamp_delayed(time_format, delay)
# str_val = time.strftime(time_format, time.gmtime(time.time() + self._clock_set_offset))
log.debug("Setting instrument clock to '%s'", str_val)
ras_time = self._do_cmd_resp(McLaneCommand.CLOCK, str_val, response_regex=McLaneResponse.READY)[0]
return None, (None, {'time': ras_time})
def _handler_command_acquire(self, *args, **kwargs):
self._handler_sync_clock()
return ProtocolState.FLUSH, ResourceAgentState.BUSY
# def _handler_command_status(self, *args, **kwargs):
# # get the following:
# # - VERSION
# # - CAPACITY (pump flow)
# # - BATTERY
# # - CODES (termination codes)
# # - COPYRIGHT (termination codes)
# return None, ResourceAgentState.COMMAND
def _handler_command_clear(self, *args, **kwargs):
return ProtocolState.CLEAR, ResourceAgentState.BUSY
########################################################################
# Private helpers.
########################################################################
def _wakeup(self, wakeup_timeout=10, response_timeout=3):
"""
Over-written because waking this instrument up is a multi-step process with
two different requests required
@param wakeup_timeout The timeout to wake the device.
@param response_timeout The time to look for response to a wakeup attempt.
@throw InstrumentTimeoutException if the device could not be woken.
"""
sleep_time = .1
command = McLaneCommand.GO
# Grab start time for overall wakeup timeout.
starttime = time.time()
while True:
# Clear the prompt buffer.
log.debug("_wakeup: clearing promptbuf: %s", self._promptbuf)
self._promptbuf = ''
# Send a command and wait delay amount for response.
log.debug('_wakeup: Sending command %s, delay=%s', command.encode("hex"), response_timeout)
for char in command:
self._connection.send(char)
time.sleep(INTER_CHARACTER_DELAY)
sleep_amount = 0
while True:
time.sleep(sleep_time)
if self._promptbuf.find(Prompt.COMMAND_INPUT) != -1:
# instrument is awake
log.debug('_wakeup: got command input prompt %s', Prompt.COMMAND_INPUT)
# add inter-character delay which _do_cmd_resp() incorrectly doesn't add to
# the start of a transmission
time.sleep(INTER_CHARACTER_DELAY)
return Prompt.COMMAND_INPUT
if self._promptbuf.find(Prompt.ENTER_CTRL_C) != -1:
command = McLaneCommand.CONTROL_C
break
if self._promptbuf.find(Prompt.PERIOD) == 0:
command = McLaneCommand.CONTROL_C
break
sleep_amount += sleep_time
if sleep_amount >= response_timeout:
log.debug("_wakeup: expected response not received, buffer=%s", self._promptbuf)
break
if time.time() > starttime + wakeup_timeout:
raise InstrumentTimeoutException(
"_wakeup(): instrument failed to wakeup in %d seconds time" % wakeup_timeout)
def _build_command(self, cmd, *args):
return cmd + ' ' + ' '.join([str(x) for x in args]) + NEWLINE
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.CLOCK_SYNC, display_name="Synchronize Clock")
def _build_param_dict(self):
"""
Populate the parameter dictionary with XR-420 parameters.
For each parameter key add value formatting function for set commands.
"""
# The parameter dictionary.
self._param_dict = ProtocolParameterDict()
# Add parameter handlers to parameter dictionary for instrument configuration parameters.
self._param_dict.add(Parameter.FLUSH_VOLUME,
r'Flush Volume: (.*)mL',
None,
self._int_to_string,
type=ParameterDictType.INT,
# default_value=150,
default_value=10, # djm - fast test value
units='mL',
startup_param=True,
display_name="flush_volume",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FLUSH_FLOWRATE,
r'Flush Flow Rate: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=100,
units='mL/min',
startup_param=True,
display_name="flush_flow_rate",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FLUSH_MINFLOW,
r'Flush Min Flow: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=75,
units='mL/min',
startup_param=True,
display_name="flush_min_flow",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FILL_VOLUME,
r'Fill Volume: (.*)mL',
None,
self._int_to_string,
type=ParameterDictType.INT,
# default_value=4000,
default_value=10, # djm - fast test value
units='mL',
startup_param=True,
display_name="fill_volume",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FILL_FLOWRATE,
r'Fill Flow Rate: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=100,
units='mL/min',
startup_param=True,
display_name="fill_flow_rate",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FILL_MINFLOW,
r'Fill Min Flow: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=75,
units='mL/min',
startup_param=True,
display_name="fill_min_flow",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.CLEAR_VOLUME,
r'Reverse Volume: (.*)mL',
None,
self._int_to_string,
type=ParameterDictType.INT,
# default_value=100,
default_value=10, # djm - fast test value
units='mL',
startup_param=True,
display_name="clear_volume",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.CLEAR_FLOWRATE,
r'Reverse Flow Rate: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=100,
units='mL/min',
startup_param=True,
display_name="clear_flow_rate",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.CLEAR_MINFLOW,
r'Reverse Min Flow: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=75,
units='mL/min',
startup_param=True,
display_name="clear_min_flow",
visibility=ParameterDictVisibility.IMMUTABLE)
def _update_params(self):
"""
Update the parameter dictionary.
"""
log.debug("_update_params:")
# def _parse_battery_response(self, response, prompt):
# """
# Parse handler for battery command.
# @param response command response string.
# @param prompt prompt following command response.
# @throws InstrumentProtocolException if battery command misunderstood.
# """
# log.debug("_parse_battery_response: response=%s, prompt=%s", response, prompt)
# if prompt == Prompt.UNRECOGNIZED_COMMAND:
# raise InstrumentProtocolException('battery command not recognized: %s.' % response)
#
# if not self._param_dict.update(response):
# raise InstrumentProtocolException('battery command not parsed: %s.' % response)
#
# return
#
# def _parse_clock_response(self, response, prompt):
# """
# Parse handler for clock command.
# @param response command response string.
# @param prompt prompt following command response.
# @throws InstrumentProtocolException if clock command misunderstood.
# @retval the joined string from the regular expression match
# """
# # extract current time from response
# log.debug('--- djm --- parse_clock_response: response: %r', response)
# ras_time_string = ' '.join(response.split()[:2])
# time_format = "%m/%d/%y %H:%M:%S"
# ras_time = time.strptime(ras_time_string, time_format)
# ras_time = list(ras_time)
# ras_time[-1] = 0 # tm_isdst field set to 0 - using GMT, no DST
#
# return tuple(ras_time)
#
# def _parse_port_response(self, response, prompt):
# """
# Parse handler for port command.
# @param response command response string.
# @param prompt prompt following command response.
# @throws InstrumentProtocolException if port command misunderstood.
# @retval the joined string from the regular expression match
# """
# # extract current port from response
# log.debug('--- djm --- parse_port_response: response: %r', response)
# port = int(response)
#
# return port
|
eshijia/SUR | refs/heads/SURmagnum | magnum/common/pythonk8sclient/client/models/V1beta3_EndpointSubset.py | 15 | #!/usr/bin/env python
"""
Copyright 2015 Reverb Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class V1beta3_EndpointSubset(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'addresses': 'list[V1beta3_EndpointAddress]',
'ports': 'list[V1beta3_EndpointPort]'
}
self.attributeMap = {
'addresses': 'addresses',
'ports': 'ports'
}
#IP addresses which offer the related ports
self.addresses = None # list[V1beta3_EndpointAddress]
#port numbers available on the related IP addresses
self.ports = None # list[V1beta3_EndpointPort]
|
imsparsh/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/ctypes/test/test_functions.py | 45 | """
Here is probably the place to write the docs, since the test-cases
show how the type behave.
Later...
"""
from ctypes import *
import sys, unittest
try:
WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
WINFUNCTYPE = CFUNCTYPE
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
if sys.platform == "win32":
windll = WinDLL(_ctypes_test.__file__)
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class RECT(Structure):
_fields_ = [("left", c_int), ("top", c_int),
("right", c_int), ("bottom", c_int)]
class FunctionTestCase(unittest.TestCase):
def test_mro(self):
# in Python 2.3, this raises TypeError: MRO conflict among bases classes,
# in Python 2.2 it works.
#
# But in early versions of _ctypes.c, the result of tp_new
# wasn't checked, and it even crashed Python.
# Found by Greg Chapman.
try:
class X(object, Array):
_length_ = 5
_type_ = "i"
except TypeError:
pass
from _ctypes import _Pointer
try:
class X(object, _Pointer):
pass
except TypeError:
pass
from _ctypes import _SimpleCData
try:
class X(object, _SimpleCData):
_type_ = "i"
except TypeError:
pass
try:
class X(object, Structure):
_fields_ = []
except TypeError:
pass
def test_wchar_parm(self):
try:
c_wchar
except NameError:
return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(1, "x", 3, 4, 5.0, 6.0)
self.assertEqual(result, 139)
self.assertEqual(type(result), int)
def test_wchar_result(self):
try:
c_wchar
except NameError:
return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_wchar
result = f(0, 0, 0, 0, 0, 0)
self.assertEqual(result, '\x00')
def test_voidresult(self):
f = dll._testfunc_v
f.restype = None
f.argtypes = [c_int, c_int, POINTER(c_int)]
result = c_int()
self.assertEqual(None, f(1, 2, byref(result)))
self.assertEqual(result.value, 3)
def test_intresult(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_int
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), int)
# If we declare the function to return a short,
# is the high part split off?
f.restype = c_short
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
result = f(1, 2, 3, 0x10004, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
# You cannot assign character format codes as restype any longer
self.assertRaises(TypeError, setattr, f, "restype", "i")
def test_floatresult(self):
f = dll._testfunc_f_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_float
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
def test_doubleresult(self):
f = dll._testfunc_d_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_double
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
def test_longdoubleresult(self):
f = dll._testfunc_D_bhilfD
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_longdouble]
f.restype = c_longdouble
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
def test_longlongresult(self):
try:
c_longlong
except NameError:
return
f = dll._testfunc_q_bhilfd
f.restype = c_longlong
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
f = dll._testfunc_q_bhilfdq
f.restype = c_longlong
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double, c_longlong]
result = f(1, 2, 3, 4, 5.0, 6.0, 21)
self.assertEqual(result, 42)
def test_stringresult(self):
f = dll._testfunc_p_p
f.argtypes = None
f.restype = c_char_p
result = f(b"123")
self.assertEqual(result, b"123")
result = f(None)
self.assertEqual(result, None)
def test_pointers(self):
f = dll._testfunc_p_p
f.restype = POINTER(c_int)
f.argtypes = [POINTER(c_int)]
# This only works if the value c_int(42) passed to the
# function is still alive while the pointer (the result) is
# used.
v = c_int(42)
self.assertEqual(pointer(v).contents.value, 42)
result = f(pointer(v))
self.assertEqual(type(result), POINTER(c_int))
self.assertEqual(result.contents.value, 42)
# This on works...
result = f(pointer(v))
self.assertEqual(result.contents.value, v.value)
p = pointer(c_int(99))
result = f(p)
self.assertEqual(result.contents.value, 99)
arg = byref(v)
result = f(arg)
self.assertNotEqual(result.contents, v.value)
self.assertRaises(ArgumentError, f, byref(c_short(22)))
# It is dangerous, however, because you don't control the lifetime
# of the pointer:
result = f(byref(c_int(99)))
self.assertNotEqual(result.contents, 99)
def test_errors(self):
f = dll._testfunc_p_p
f.restype = c_int
class X(Structure):
_fields_ = [("y", c_int)]
self.assertRaises(TypeError, f, X()) #cannot convert parameter
################################################################
def test_shorts(self):
f = dll._testfunc_callback_i_if
args = []
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def callback(v):
args.append(v)
return v
CallBack = CFUNCTYPE(c_int, c_int)
cb = CallBack(callback)
f(2**18, cb)
self.assertEqual(args, expected)
################################################################
def test_callbacks(self):
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
def callback(value):
#print "called back with", value
return value
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
# test with prototype
f.argtypes = [c_int, MyCallback]
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
AnotherCallback = WINFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
# check that the prototype works: we call f with wrong
# argument types
cb = AnotherCallback(callback)
self.assertRaises(ArgumentError, f, -10, cb)
def test_callbacks_2(self):
# Can also use simple datatypes as argument type specifiers
# for the callback function.
# In this case the call receives an instance of that type
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
f.argtypes = [c_int, MyCallback]
def callback(value):
#print "called back with", value
self.assertEqual(type(value), int)
return value
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
f.restype = c_longlong
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
f.argtypes = [c_longlong, MyCallback]
def callback(value):
self.assertTrue(isinstance(value, int))
return value & 0x7FFFFFFF
cb = MyCallback(callback)
self.assertEqual(13577625587, f(1000000000000, cb))
def test_errors(self):
self.assertRaises(AttributeError, getattr, dll, "_xxx_yyy")
self.assertRaises(ValueError, c_int.in_dll, dll, "_xxx_yyy")
def test_byval(self):
# without prototype
ptin = POINT(1, 2)
ptout = POINT()
# EXPORT int _testfunc_byval(point in, point *pout)
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 3, 1, 2
self.assertEqual(got, expected)
# with prototype
ptin = POINT(101, 102)
ptout = POINT()
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
dll._testfunc_byval.restype = c_int
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 203, 101, 102
self.assertEqual(got, expected)
def test_struct_return_2H(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
dll.ret_2h_func.restype = S2H
dll.ret_2h_func.argtypes = [S2H]
inp = S2H(99, 88)
s2h = dll.ret_2h_func(inp)
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
if sys.platform == "win32":
def test_struct_return_2H_stdcall(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
windll.s_ret_2h_func.restype = S2H
windll.s_ret_2h_func.argtypes = [S2H]
s2h = windll.s_ret_2h_func(S2H(99, 88))
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
def test_struct_return_8H(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
dll.ret_8i_func.restype = S8I
dll.ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = dll.ret_8i_func(inp)
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
if sys.platform == "win32":
def test_struct_return_8H_stdcall(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
windll.s_ret_8i_func.restype = S8I
windll.s_ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = windll.s_ret_8i_func(inp)
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
def test_sf1651235(self):
# see http://www.python.org/sf/1651235
proto = CFUNCTYPE(c_int, RECT, POINT)
def callback(*args):
return 0
callback = proto(callback)
self.assertRaises(ArgumentError, lambda: callback((1, 2, 3, 4), POINT()))
if __name__ == '__main__':
unittest.main()
|
Gitlab11/odoo | refs/heads/8.0 | addons/l10n_et/__init__.py | 438 | #-*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Michael Telahun Makonnen <[email protected]>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
kevinmel2000/sl4a | refs/heads/master | python/src/Tools/scripts/reindent-rst.py | 50 | #!/usr/bin/env python
# Make a reST file compliant to our pre-commit hook.
# Currently just remove trailing whitespace.
from __future__ import with_statement
import sys, re, shutil
ws_re = re.compile(r'\s+(\r?\n)$')
def main(argv=sys.argv):
rv = 0
for filename in argv[1:]:
try:
with open(filename, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(r'\1', line) for line in lines]
if new_lines != lines:
print 'Fixing %s...' % filename
shutil.copyfile(filename, filename + '.bak')
with open(filename, 'wb') as f:
f.writelines(new_lines)
except Exception, err:
print 'Cannot fix %s: %s' % (filename, err)
rv = 1
return rv
if __name__ == '__main__':
sys.exit(main())
|
AccelAI/accel.ai | refs/heads/master | flask-aws/lib/python2.7/site-packages/sqlalchemy/orm/deprecated_interfaces.py | 33 | # orm/deprecated_interfaces.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import event, util
from .interfaces import EXT_CONTINUE
@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces")
class MapperExtension(object):
"""Base implementation for :class:`.Mapper` event hooks.
.. note::
:class:`.MapperExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.MapperEvents`.
New extension classes subclass :class:`.MapperExtension` and are specified
using the ``extension`` mapper() argument, which is a single
:class:`.MapperExtension` or a list of such::
from sqlalchemy.orm.interfaces import MapperExtension
class MyExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
print "instance %s before insert !" % instance
m = mapper(User, users_table, extension=MyExtension())
A single mapper can maintain a chain of ``MapperExtension``
objects. When a particular mapping event occurs, the
corresponding method on each ``MapperExtension`` is invoked
serially, and each method has the ability to halt the chain
from proceeding further::
m = mapper(User, users_table, extension=[ext1, ext2, ext3])
Each ``MapperExtension`` method returns the symbol
EXT_CONTINUE by default. This symbol generally means "move
to the next ``MapperExtension`` for processing". For methods
that return objects like translated rows or new object
instances, EXT_CONTINUE means the result of the method
should be ignored. In some cases it's required for a
default mapper activity to be performed, such as adding a
new instance to a result list.
The symbol EXT_STOP has significance within a chain
of ``MapperExtension`` objects that the chain will be stopped
when this symbol is returned. Like EXT_CONTINUE, it also
has additional significance in some cases that a default
mapper activity will not be performed.
"""
@classmethod
def _adapt_instrument_class(cls, self, listener):
cls._adapt_listener_methods(self, listener, ('instrument_class',))
@classmethod
def _adapt_listener(cls, self, listener):
cls._adapt_listener_methods(
self, listener,
(
'init_instance',
'init_failed',
'translate_row',
'create_instance',
'append_result',
'populate_instance',
'reconstruct_instance',
'before_insert',
'after_insert',
'before_update',
'after_update',
'before_delete',
'after_delete'
))
@classmethod
def _adapt_listener_methods(cls, self, listener, methods):
for meth in methods:
me_meth = getattr(MapperExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
if meth == 'reconstruct_instance':
def go(ls_meth):
def reconstruct(instance, ctx):
ls_meth(self, instance)
return reconstruct
event.listen(self.class_manager, 'load',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_instance':
def go(ls_meth):
def init_instance(instance, args, kwargs):
ls_meth(self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_instance
event.listen(self.class_manager, 'init',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_failed':
def go(ls_meth):
def init_failed(instance, args, kwargs):
util.warn_exception(
ls_meth, self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_failed
event.listen(self.class_manager, 'init_failure',
go(ls_meth), raw=False, propagate=True)
else:
event.listen(self, "%s" % meth, ls_meth,
raw=False, retval=True, propagate=True)
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed, and has
applied instrumentation to the mapped class.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when its constructor is called.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when its constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def translate_row(self, mapper, context, row):
"""Perform pre-processing on the given result row and return a
new row instance.
This is called when the mapper first receives a row, before
the object identity or the instance itself has been derived
from that row. The given row may or may not be a
``RowProxy`` object - it will always be a dictionary-like
object which contains mapped columns as keys. The
returned object should also be a dictionary-like object
which recognizes mapped columns as keys.
If the ultimate return value is EXT_CONTINUE, the row
is not translated.
"""
return EXT_CONTINUE
def create_instance(self, mapper, selectcontext, row, class_):
"""Receive a row when a new object instance is about to be
created from that row.
The method can choose to create the instance itself, or it can return
EXT_CONTINUE to indicate normal object creation should take place.
mapper
The mapper doing the operation
selectcontext
The QueryContext generated from the Query.
row
The result row from the database
class\_
The class we are mapping.
return value
A new object instance, or EXT_CONTINUE
"""
return EXT_CONTINUE
def append_result(self, mapper, selectcontext, row, instance,
result, **flags):
"""Receive an object instance before that instance is appended
to a result list.
If this method returns EXT_CONTINUE, result appending will proceed
normally. if this method returns any other value or None,
result appending will not proceed for this instance, giving
this extension an opportunity to do the appending itself, if
desired.
mapper
The mapper doing the operation.
selectcontext
The QueryContext generated from the Query.
row
The result row from the database.
instance
The object instance to be appended to the result.
result
List to which results are being appended.
\**flags
extra information about the row, same as criterion in
``create_row_processor()`` method of
:class:`~sqlalchemy.orm.interfaces.MapperProperty`
"""
return EXT_CONTINUE
def populate_instance(self, mapper, selectcontext, row,
instance, **flags):
"""Receive an instance before that instance has
its attributes populated.
This usually corresponds to a newly loaded instance but may
also correspond to an already-loaded instance which has
unloaded attributes to be populated. The method may be called
many times for a single instance, as multiple result rows are
used to populate eagerly loaded collections.
If this method returns EXT_CONTINUE, instance population will
proceed normally. If any other value or None is returned,
instance population will not proceed, giving this extension an
opportunity to populate the instance itself, if desired.
.. deprecated:: 0.5
Most usages of this hook are obsolete. For a
generic "object has been newly created from a row" hook, use
``reconstruct_instance()``, or the ``@orm.reconstructor``
decorator.
"""
return EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_insert(self, mapper, connection, instance):
"""Receive an object instance before that instance is inserted
into its table.
This is a good place to set up primary key values and such
that aren't handled otherwise.
Column-based attributes can be modified within this method
which will result in the new value being inserted. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_insert(self, mapper, connection, instance):
"""Receive an object instance after that instance is inserted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_update(self, mapper, connection, instance):
"""Receive an object instance before that instance is updated.
Note that this method is called for all instances that are marked as
"dirty", even those which have no net changes to their column-based
attributes. An object is marked as dirty when any of its column-based
attributes have a "set attribute" operation called or when any of its
collections are modified. If, at update time, no column-based
attributes have any net changes, no UPDATE statement will be issued.
This means that an instance being sent to before_update is *not* a
guarantee that an UPDATE statement will be issued (although you can
affect the outcome here).
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
Column-based attributes can be modified within this method
which will result in the new value being updated. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_update(self, mapper, connection, instance):
"""Receive an object instance after that instance is updated.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_delete(self, mapper, connection, instance):
"""Receive an object instance before that instance is deleted.
Note that *no* changes to the overall flush plan can be made
here; and manipulation of the ``Session`` will not have the
desired effect. To manipulate the ``Session`` within an
extension, use ``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
"""Receive an object instance after that instance is deleted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces")
class SessionExtension(object):
"""Base implementation for :class:`.Session` event hooks.
.. note::
:class:`.SessionExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.SessionEvents`.
Subclasses may be installed into a :class:`.Session` (or
:class:`.sessionmaker`) using the ``extension`` keyword
argument::
from sqlalchemy.orm.interfaces import SessionExtension
class MySessionExtension(SessionExtension):
def before_commit(self, session):
print "before commit!"
Session = sessionmaker(extension=MySessionExtension())
The same :class:`.SessionExtension` instance can be used
with any number of sessions.
"""
@classmethod
def _adapt_listener(cls, self, listener):
for meth in [
'before_commit',
'after_commit',
'after_rollback',
'before_flush',
'after_flush',
'after_flush_postexec',
'after_begin',
'after_attach',
'after_bulk_update',
'after_bulk_delete',
]:
me_meth = getattr(SessionExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
event.listen(self, meth, getattr(listener, meth))
def before_commit(self, session):
"""Execute right before commit is called.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_commit(self, session):
"""Execute after a commit has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_rollback(self, session):
"""Execute after a rollback has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def before_flush(self, session, flush_context, instances):
"""Execute before flush process has started.
`instances` is an optional list of objects which were passed to
the ``flush()`` method. """
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes."""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction. """
def after_begin(self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
`transaction` is the SessionTransaction. This method is called
after an engine level transaction is begun on a connection. """
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge. """
def after_bulk_update(self, session, query, query_context, result):
"""Execute after a bulk update operation to the session.
This is called after a session.query(...).update()
`query` is the query object that this update operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
def after_bulk_delete(self, session, query, query_context, result):
"""Execute after a bulk delete operation to the session.
This is called after a session.query(...).delete()
`query` is the query object that this delete operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces")
class AttributeExtension(object):
"""Base implementation for :class:`.AttributeImpl` event hooks, events
that fire upon attribute mutations in user code.
.. note::
:class:`.AttributeExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.AttributeEvents`.
:class:`.AttributeExtension` is used to listen for set,
remove, and append events on individual mapped attributes.
It is established on an individual mapped attribute using
the `extension` argument, available on
:func:`.column_property`, :func:`.relationship`, and
others::
from sqlalchemy.orm.interfaces import AttributeExtension
from sqlalchemy.orm import mapper, relationship, column_property
class MyAttrExt(AttributeExtension):
def append(self, state, value, initiator):
print "append event !"
return value
def set(self, state, value, oldvalue, initiator):
print "set event !"
return value
mapper(SomeClass, sometable, properties={
'foo':column_property(sometable.c.foo, extension=MyAttrExt()),
'bar':relationship(Bar, extension=MyAttrExt())
})
Note that the :class:`.AttributeExtension` methods
:meth:`~.AttributeExtension.append` and
:meth:`~.AttributeExtension.set` need to return the
``value`` parameter. The returned value is used as the
effective value, and allows the extension to change what is
ultimately persisted.
AttributeExtension is assembled within the descriptors associated
with a mapped class.
"""
active_history = True
"""indicates that the set() method would like to receive the 'old' value,
even if it means firing lazy callables.
Note that ``active_history`` can also be set directly via
:func:`.column_property` and :func:`.relationship`.
"""
@classmethod
def _adapt_listener(cls, self, listener):
event.listen(self, 'append', listener.append,
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'remove', listener.remove,
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'set', listener.set,
active_history=listener.active_history,
raw=True, retval=True)
def append(self, state, value, initiator):
"""Receive a collection append event.
The returned value will be used as the actual value to be
appended.
"""
return value
def remove(self, state, value, initiator):
"""Receive a remove event.
No return value is defined.
"""
pass
def set(self, state, value, oldvalue, initiator):
"""Receive a set event.
The returned value will be used as the actual value to be
set.
"""
return value
|
talishte/ctigre | refs/heads/master | env/lib/python2.7/site-packages/django/template/loaders/app_directories.py | 105 | """
Wrapper for loading templates from "templates" directories in INSTALLED_APPS
packages.
"""
import os
import sys
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
from django.utils.importlib import import_module
from django.utils import six
# At compile time, cache the directories to search.
if six.PY2:
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
app_template_dirs = []
for app in settings.INSTALLED_APPS:
try:
mod = import_module(app)
except ImportError as e:
raise ImproperlyConfigured('ImportError %s: %s' % (app, e.args[0]))
template_dir = os.path.join(os.path.dirname(mod.__file__), 'templates')
if os.path.isdir(template_dir):
if six.PY2:
template_dir = template_dir.decode(fs_encoding)
app_template_dirs.append(template_dir)
# It won't change, so convert it to a tuple to save memory.
app_template_dirs = tuple(app_template_dirs)
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = app_template_dirs
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of template_dir.
pass
def load_template_source(self, template_name, template_dirs=None):
for filepath in self.get_template_sources(template_name, template_dirs):
try:
with open(filepath, 'rb') as fp:
return (fp.read().decode(settings.FILE_CHARSET), filepath)
except IOError:
pass
raise TemplateDoesNotExist(template_name)
|
fusionpig/ansible | refs/heads/devel | lib/ansible/utils/module_docs_fragments/openstack.py | 97 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard openstack documentation fragment
DOCUMENTATION = '''
options:
cloud:
description:
- Named cloud to operate against. Provides default values for I(auth) and
I(auth_type). This parameter is not needed if I(auth) is provided or if
OpenStack OS_* environment variables are present.
required: false
auth:
description:
- Dictionary containing auth information as needed by the cloud's auth
plugin strategy. For the default I(password) plugin, this would contain
I(auth_url), I(username), I(password), I(project_name) and any
information about domains if the cloud supports them. For other plugins,
this param will need to contain whatever parameters that auth plugin
requires. This parameter is not needed if a named cloud is provided or
OpenStack OS_* environment variables are present.
required: false
auth_type:
description:
- Name of the auth plugin to use. If the cloud uses something other than
password authentication, the name of the plugin should be indicated here
and the contents of the I(auth) parameter should be updated accordingly.
required: false
default: password
region_name:
description:
- Name of the region.
required: false
availability_zone:
description:
- Name of the availability zone.
required: false
wait:
description:
- Should ansible wait until the requested resource is complete.
required: false
default: "yes"
choices: ["yes", "no"]
timeout:
description:
- How long should ansible wait for the requested resource.
required: false
default: 180
api_timeout:
description:
- How long should the socket layer wait before timing out for API calls.
If this is omitted, nothing will be passed to the requests library.
required: false
default: None
validate_certs:
description:
- Whether or not SSL API requests should be verified.
required: false
default: True
aliases: ['verify']
cacert:
description:
- A path to a CA Cert bundle that can be used as part of verifying
SSL API requests.
required: false
default: None
cert:
description:
- A path to a client certificate to use as part of the SSL transaction
required: false
default: None
key:
description:
- A path to a client key to use as part of the SSL transaction
required: false
default: None
endpoint_type:
description:
- Endpoint URL type to fetch from the service catalog.
choices: [public, internal, admin]
required: false
default: public
requirements:
- python >= 2.7
- shade
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
may be user instead of providing explicit values.
- Auth information is driven by os-client-config, which means that values
can come from a yaml config file in /etc/ansible/openstack.yaml,
/etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
standard environment variables, then finally by explicit parameters in
plays. More information can be found at
U(http://docs.openstack.org/developer/os-client-config)
'''
|
ThomasYeoLab/CBIG | refs/heads/master | stable_projects/fMRI_dynamics/Kong2021_pMFM/part2_pMFM_control_analysis/Constant_I/scripts/CBIG_pMFM_step1_training_conI.py | 1 | # /usr/bin/env python
'''
Written by Kong Xiaolu and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
'''
import os
import numpy as np
import time
import torch
import CBIG_pMFM_basic_functions as fc
import warnings
def get_init(myelin_data, gradient_data, highest_order, init_para):
'''
This function is implemented to calculate the initial parametrized
coefficients
'''
n_node = myelin_data.shape[0]
amatrix = np.zeros((n_node, highest_order + 1))
bmatrix = np.zeros((n_node, highest_order + 1))
for i in range(highest_order + 1):
amatrix[:, i] = myelin_data**(i)
bmatrix[:, i] = gradient_data**(i)
cmatrix = np.hstack((amatrix, bmatrix[:, 1:highest_order + 1]))
para = np.linalg.inv(cmatrix.T @ cmatrix) @ cmatrix.T @ init_para
return para, cmatrix
def CBIG_mfm_optimization_desikan_main(gpu_index=0, random_seed=1):
'''
This function is to implement the optimization processes of mean
field model.
The objective function is the summation of FC correlation cost and
FCD KS statistics cost.
The optimization process is highly automatic and generate 500
candidate parameter sets for
main results.
Args:
gpu_index: index of gpu used for optimization
random_seed: random seed for optimization
Returns:
None
'''
# Setting random seed and GPU
torch.cuda.set_device(gpu_index)
random_seed_cuda = random_seed
random_seed_np = random_seed
torch.manual_seed(random_seed_cuda)
rng = np.random.Generator(np.random.PCG64(random_seed_np))
# Create output folders
output_path = '../output/step1_training_results/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
# Initializing input parameters
highest_order = 1
N = 2 * (2 * highest_order + 1) + 2
myelin_data = fc.csv_matrix_read('../input/myelin.csv')
myelin_data = myelin_data[:, 0]
gradient_data = fc.csv_matrix_read('../input/rsfc_gradient.csv')
gradient_data = gradient_data[:, 0]
n_node = myelin_data.shape[0]
dim = n_node * 2 + 2
search_range = np.zeros((dim, 2))
search_range[:n_node, :] = [0, 1]
search_range[n_node, :] = [0, 0.5]
search_range[n_node + 1, :] = [1, 10]
search_range[n_node + 2:, :] = [0.0005, 0.01]
init_para = rng.uniform(0, 1, dim) * (
search_range[:, 1] - search_range[:, 0]) + search_range[:, 0]
start_point_w, template_mat = get_init(myelin_data, gradient_data,
highest_order, init_para[0:n_node])
start_point_s, template_mat = get_init(
myelin_data, gradient_data, highest_order, init_para[n_node + 2:])
# Initializing childrens
xmean = np.zeros(N)
xmean[0:2 * highest_order + 1] = start_point_w
xmean[2 * highest_order + 1:(2 * highest_order + 1) +
1] = init_para[n_node]
xmean[(2 * highest_order + 1) + 1] = init_para[n_node + 1]
xmean[(2 * highest_order + 1) + 2:N] = start_point_s
# Initializing optimization hyper-parameters
sigma = 0.15
sigmaS = 0.15
stoppoint = 0.3
maxloop = 500
n_dup = 3
# CMA-ES parameters setting
Lambda = 500
mu = 40
weights = np.log(mu + 1 / 2) - np.log(np.arange(1, mu + 1))
weights = weights / np.sum(weights)
mueff = 1 / np.sum(weights**2)
# Strategy parameter setting: adaptation
cc = (4 + mueff / N) / (N + 4 + 2 * mueff / N)
cs = (mueff + 2) / (N + mueff + 5)
c1 = 2 / ((N + 1.3)**2 + mueff)
cmu = np.minimum(1 - c1,
2 * (mueff - 2 + 1 / mueff) / ((N + 2)**2 + mueff))
damps = 1 + 2 * np.maximum(0, np.sqrt((mueff - 1) / (N + 1)) - 1) + cs
# Initializing dynamic strategy parameters and constants'''
pc = np.zeros(N)
ps = np.zeros(N)
B = np.eye(N)
D = np.zeros(N)
D[0:2 * highest_order + 1] = start_point_w[0] / 2
D[2 * highest_order + 1:(2 * highest_order + 1) + 1] = init_para[n_node]
D[(2 * highest_order + 1) + 1] = 0.4
D[(2 * highest_order + 1) + 2:N] = 0.001 / 2
C = np.dot(np.dot(B, np.diag(np.power(D, 2))), B.T)
invsqrtC = np.dot(np.dot(B, np.diag(np.power(D, -1))), B.T)
chiN = N**0.5 * (1 - 1 / (4 * N) + 1 / (21 * N ^ 2))
# Evolution loop
countloop = 0
arx = np.zeros([N, Lambda])
input_para = np.zeros((dim, Lambda))
xmin = np.zeros([N + 3, maxloop])
stop_count = 0
while countloop < maxloop:
start_time = time.time()
# Generating lambda offspring
arx[:, 0] = xmean
j = 0
while j < Lambda:
arx[:, j] = xmean + sigma * np.dot(B, (D * rng.standard_normal(N)))
input_para[0:n_node, j] = template_mat @ arx[0:(
2 * highest_order + 1), j]
input_para[n_node, j] = arx[(2 * highest_order + 1), j]
input_para[n_node + 1, j] = arx[(2 * highest_order + 1) + 1, j]
input_para[n_node + 2:, j] = template_mat @ arx[
(2 * highest_order + 1) + 2:N, j]
if (input_para[:, j] < search_range[:, 0]).any() or (
input_para[:, j] > search_range[:, 1]).any():
j = j - 1
j = j + 1
# Calculating costs of offspring
input_para_m = np.zeros((n_node * 3 + 1, input_para.shape[1]))
input_para_m[0:n_node, :] = input_para[0:n_node, :]
input_para_m[n_node:2 * n_node, :] = np.repeat(
input_para[n_node:n_node + 1, :], n_node, 0)
input_para_m[2 * n_node:, :] = input_para[n_node + 1:, :]
total_cost, fc_cost, fcd_cost = fc.CBIG_combined_cost_train(
input_para_m, n_dup)
countloop = countloop + 1
# Sort by total cost and compute weighted mean
arfitsort = np.sort(total_cost)
arindex = np.argsort(total_cost)
xold = xmean
xmean = np.dot(arx[:, arindex[0:mu]], weights)
xshow = xmean - xold
# Cumulation
ps = (1 - cs) * ps + np.sqrt(cs * (2 - cs) * mueff) * np.dot(
invsqrtC, xshow) / sigma
hsig = (np.linalg.norm(ps) / np.sqrt(1 - (1 - cs)**
(2 * countloop)) / chiN <
(1.4 + 2 / (N + 1))) * 1
pc = (1 - cc) * pc + hsig * np.sqrt(cc *
(2 - cc) * mueff) * xshow / sigma
# Adapting covariance matrix C
artmp = (1 / sigma) * (
arx[:, arindex[0:mu]] - np.tile(xold, [mu, 1]).T)
C = (1 - c1 - cmu) * C + c1 * (
np.outer(pc, pc) + (1 - hsig) * cc * (2 - cc) * C) + cmu * np.dot(
artmp, np.dot(np.diag(weights), artmp.T))
# Adapting step size
sigma = sigma * np.exp((cs / damps) * (np.linalg.norm(ps) / chiN - 1))
sigma = min(sigma, sigmaS)
# Decomposition
if 1 > 1 / (c1 + cmu) / N / 10:
C = np.triu(C, k=1) + np.triu(C).T
if np.isnan(C).any():
break
D, B = np.linalg.eigh(C)
D = D.real
B = B.real
D = np.sqrt(D)
invsqrtC = np.dot(B, np.dot(np.diag(D**(-1)), B.T))
# Monitoring the evolution status
ps_norm = np.linalg.norm(ps)
print('******** Generation: ' + str(countloop) + ' ********')
print('Norm of P-sigma: ', ps_norm)
print('The mean of total cost: ', np.mean(arfitsort[0:mu]))
print('Sigma: ', sigma)
xmin[0:N, countloop - 1] = arx[:, arindex[0]]
xmin[N, countloop - 1] = fc_cost[arindex[0]]
xmin[N + 1, countloop - 1] = fcd_cost[arindex[0]]
xmin[N + 2, countloop - 1] = np.min(total_cost)
print('Best total cost: ', np.min(total_cost))
print('FC correlation cost: ', fc_cost[arindex[0]])
print('FCD KS statistics cost: ', fcd_cost[arindex[0]])
elapsed_time = time.time() - start_time
print('Elapsed time for this evolution is : ', elapsed_time)
print('******************************************')
# break
if arfitsort[0] < stoppoint and ps_norm < 11:
stop_count = stop_count + 1
if stop_count >= 5 or sigma < 0.001:
break
save_name = [output_path] + ['random_seed_', str(random_seed), '.csv']
np.savetxt(''.join(save_name), xmin, delimiter=',')
if __name__ == "__main__":
warnings.filterwarnings("ignore", category=RuntimeWarning)
for i in range(1, 11):
CBIG_mfm_optimization_desikan_main(random_seed=i)
|
cancan101/tensorflow | refs/heads/master | tensorflow/contrib/labeled_tensor/python/ops/ops_test.py | 32 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import ops
from tensorflow.contrib.labeled_tensor.python.ops import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test as test_lib
class Base(test_util.Base):
def setUp(self):
super(Base, self).setUp()
self.x_size = 7
self.channel_size = 3
self.z_size = 4
self.probs_size = 11
tensor = math_ops.range(0, self.x_size * self.channel_size * self.z_size *
self.probs_size)
tensor = array_ops.reshape(
tensor, [self.x_size, self.channel_size, self.z_size, self.probs_size])
a0 = ('x', range(self.x_size))
a1 = ('channel', ['red', 'green', 'blue'])
a2 = 'z'
a3 = ('probs', np.linspace(0.0, 1.0, self.probs_size))
self.tensor = tensor
self.a0 = a0
self.a1 = a1
self.a2 = a2
self.a2_resolved = ('z', self.z_size)
self.a3 = a3
self.original_lt = core.LabeledTensor(tensor, [a0, a1, a2, a3])
self.x_probs_lt = core.slice_function(self.original_lt, {'z': 0})
self.x_probs_lt = ops.select(self.x_probs_lt, {'channel': 'red'})
self.channel_probs_lt = core.slice_function(self.original_lt,
{'x': 3,
'z': 0})
class SelectTest(Base):
def test_name(self):
select_lt = ops.select(self.original_lt, {'channel': 'green'})
self.assertIn('lt_select', select_lt.name)
def test_scalar(self):
select_lt = ops.select(self.original_lt, {'channel': 'green'})
golden_lt = core.LabeledTensor(self.tensor[:, 1, :, :],
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slice(self):
select_lt = ops.select(self.original_lt, {'channel': slice('red', 'green')})
a1_sliced = ('channel', ['red', 'green'])
golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slices(self):
select_lt = ops.select(self.original_lt,
{'x': slice(1, 4),
'channel': slice('green', None)})
a0_sliced = ('x', range(1, 5))
a1_sliced = ('channel', ['green', 'blue'])
golden_lt = core.LabeledTensor(self.tensor[1:5, 1:, :, :],
[a0_sliced, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_list(self):
select_lt = ops.select(self.original_lt, {'channel': ['red', 'green']})
a1_sliced = ('channel', ['red', 'green'])
golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_list_one_item(self):
select_lt = ops.select(self.original_lt, {'channel': ['red']})
a1_sliced = ('channel', ['red'])
golden_lt = core.LabeledTensor(self.tensor[:, :1, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_list_zero_items(self):
select_lt = ops.select(self.original_lt, {'channel': []})
golden_lt = core.LabeledTensor(self.tensor[:, :0, :, :],
[self.a0, 'channel', self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_scalars(self):
select_lt = ops.select(self.original_lt, {'x': 1, 'channel': 'green'})
golden_lt = core.LabeledTensor(self.tensor[1, 1, :, :], [self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.select(self.original_lt, {'foo': 1})
with self.assertRaises(ValueError):
ops.select(self.original_lt, {'z': 1})
with self.assertRaises(KeyError):
ops.select(self.original_lt, {'channel': 'purple'})
with self.assertRaises(KeyError):
ops.select(self.original_lt, {'channel': ['red', 'purple']})
with self.assertRaises(NotImplementedError):
ops.select(self.original_lt, {'channel': ['red'], 'x': [1]})
with self.assertRaises(NotImplementedError):
ops.select(self.original_lt, {'channel': ['red'], 'x': 1})
with self.assertRaises(NotImplementedError):
ops.select(self.original_lt, {'channel': slice('red', 'green', 2)})
class ConcatTest(Base):
def setUp(self):
super(ConcatTest, self).setUp()
self.red_lt = ops.select(self.original_lt, {'channel': ['red']})
self.green_lt = ops.select(self.original_lt, {'channel': ['green']})
self.blue_lt = ops.select(self.original_lt, {'channel': ['blue']})
def test_name(self):
concat_lt = ops.concat([self.red_lt, self.blue_lt], 'channel')
self.assertIn('lt_concat', concat_lt.name)
def test(self):
concat_lt = ops.concat([self.red_lt, self.green_lt], 'channel')
golden_lt = ops.select(self.original_lt, {'channel': ['red', 'green']})
self.assertLabeledTensorsEqual(concat_lt, golden_lt)
def test_transposed(self):
green_transposed = core.transpose(self.green_lt,
['probs', 'channel', 'z', 'x'])
with self.assertRaises(ValueError):
ops.concat([self.red_lt, green_transposed], 'channel')
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.concat([], 'channel')
with self.assertRaises(ValueError):
ops.concat([self.red_lt, self.red_lt], 'channel')
with self.assertRaises(ValueError):
ops.concat([self.red_lt, self.red_lt], 'foo')
class PackTest(Base):
def test_name(self):
pack_lt = ops.pack([self.original_lt, self.original_lt], 'batch')
self.assertIn('lt_pack', pack_lt.name)
def test(self):
pack_lt = ops.pack([self.original_lt, self.original_lt], 'batch')
golden_lt = core.LabeledTensor(
array_ops.stack([self.original_lt.tensor, self.original_lt.tensor]),
['batch', self.a0, self.a1, self.a2, self.a3])
self.assertLabeledTensorsEqual(pack_lt, golden_lt)
def test_axis(self):
pack_lt = ops.pack(
[self.original_lt, self.original_lt], new_axis='batch', axis_position=4)
golden_lt = core.LabeledTensor(
array_ops.stack(
[self.original_lt.tensor, self.original_lt.tensor], axis=4),
[self.a0, self.a1, self.a2, self.a3, 'batch'])
self.assertLabeledTensorsEqual(pack_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.pack([self.original_lt, self.original_lt], 'channel')
class UnpackTest(Base):
def test_name(self):
unpack_lts = ops.unpack(self.original_lt)
for t in unpack_lts:
self.assertIn('lt_unpack', t.name)
def test(self):
unpack_lt = ops.unpack(self.original_lt)[0]
golden_lt = core.LabeledTensor(
array_ops.unstack(self.original_lt.tensor)[0],
[self.a1, self.a2, self.a3])
self.assertLabeledTensorsEqual(unpack_lt, golden_lt)
def test_axis(self):
unpack_lt = ops.unpack(self.original_lt, axis_name='z')[0]
golden_lt = core.LabeledTensor(
array_ops.unstack(
self.original_lt.tensor, axis=2)[0], [self.a0, self.a1, self.a3])
self.assertLabeledTensorsEqual(unpack_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.unpack(self.original_lt, axis_name='not_found')
class ReshapeTest(Base):
def test_name(self):
reshape_lt = ops.reshape(self.original_lt, ['channel'], ['foo'])
self.assertIn('lt_reshape', reshape_lt.name)
def test_identity(self):
reshape_lt = ops.reshape(self.original_lt,
self.original_lt.axes.keys(),
self.original_lt.axes.values())
self.assertLabeledTensorsEqual(reshape_lt, self.original_lt)
def test_known_size(self):
new_dim_size = self.channel_size * self.z_size * self.probs_size
reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'],
[('new_dim', new_dim_size)])
golden_lt = core.LabeledTensor(
array_ops.reshape(self.original_lt.tensor, [self.x_size, -1]),
[self.original_lt.axes['x'], 'new_dim'])
self.assertLabeledTensorsEqual(reshape_lt, golden_lt)
def test_unknown_size(self):
reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'],
['new_dim'])
golden_lt = core.LabeledTensor(
array_ops.reshape(self.original_lt.tensor, [self.x_size, -1]),
[self.original_lt.axes['x'], 'new_dim'])
self.assertLabeledTensorsEqual(reshape_lt, golden_lt)
def test_unknown_dimension(self):
orig_lt = core.LabeledTensor(
array_ops.placeholder(dtypes.float32, [None]), ['x'])
reshape_lt = ops.reshape(orig_lt, ['x'], ['y', ('z', 1)])
self.assertEqual(reshape_lt.axes, core.Axes([('y', None), ('z', 1)]))
with self.test_session() as sess:
result = sess.run(reshape_lt, feed_dict={orig_lt.tensor: [1, 2]})
np.testing.assert_array_equal(result, [[1], [2]])
def test_with_labels(self):
new_dim_size = self.channel_size * self.z_size * self.probs_size
reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'],
[('new_dim', range(new_dim_size))])
golden_lt = core.LabeledTensor(
array_ops.reshape(self.original_lt.tensor, [self.x_size, -1]),
[self.original_lt.axes['x'], ('new_dim', range(new_dim_size))])
self.assertLabeledTensorsEqual(reshape_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'not contained in the set'):
ops.reshape(self.original_lt, ['foo'], ['bar'])
with self.assertRaisesRegexp(core.AxisOrderError,
'not a slice of axis names'):
ops.reshape(self.original_lt, ['probs', 'z'], ['bar'])
with self.assertRaisesRegexp(ValueError, 'at most one axis in new_axes'):
ops.reshape(self.original_lt, ['probs'], ['foo', 'bar'])
class RenameAxisTest(Base):
def test_name(self):
rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'foo')
self.assertIn('lt_rename_axis', rename_axis_lt.name)
def test_identity(self):
rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'channel')
self.assertLabeledTensorsEqual(rename_axis_lt, self.original_lt)
def test_new_name(self):
rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'foo')
expected_axes = [(name if name != 'channel' else 'foo', axis.value)
for name, axis in self.original_lt.axes.items()]
expected_lt = core.LabeledTensor(self.original_lt.tensor, expected_axes)
self.assertLabeledTensorsEqual(rename_axis_lt, expected_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'not contained in the set'):
ops.rename_axis(self.original_lt, 'foo', 'bar')
class BatchTest(Base):
def setUp(self):
super(BatchTest, self).setUp()
tensors = []
for i in range(10):
offset_lt = core.LabeledTensor(constant_op.constant(i), [])
tensors.append(core.add(self.original_lt, offset_lt))
self.pack_lt = ops.pack(tensors, 'batch')
def test_name(self):
batch_ops = ops.batch(
[self.pack_lt, self.pack_lt], batch_size=2, enqueue_many=True)
for bo in batch_ops:
self.assertIn('lt_batch', bo.name)
def test_enqueue_many(self):
[batch_2_op] = ops.batch([self.pack_lt], batch_size=2, enqueue_many=True)
self.assertEqual(len(batch_2_op.axes['batch']), 2)
[batch_10_op] = ops.batch([batch_2_op], batch_size=10, enqueue_many=True)
self.assertLabeledTensorsEqual(self.pack_lt, batch_10_op)
def test_no_enqueue_many(self):
[batch_2_op] = ops.batch([self.original_lt], batch_size=2)
self.assertEqual(len(batch_2_op.axes['batch']), 2)
[batch_10_op] = ops.batch([batch_2_op], batch_size=10, enqueue_many=True)
self.assertLabeledTensorsEqual(
ops.pack(10 * [self.original_lt], 'batch'), batch_10_op)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.batch([self.original_lt], 3, enqueue_many=True)
def test_allow_smaller_final_batch(self):
[batch_2_op] = ops.batch(
[self.original_lt], batch_size=2, allow_smaller_final_batch=True)
self.assertEqual(batch_2_op.axes['batch'].size, None)
class ShuffleBatchTest(Base):
def setUp(self):
super(ShuffleBatchTest, self).setUp()
tensors = []
for i in range(10):
offset_lt = core.LabeledTensor(constant_op.constant(i), [])
tensors.append(core.add(self.original_lt, offset_lt))
self.pack_lt = ops.pack(tensors, 'batch')
def test_name(self):
batch_lts = ops.shuffle_batch(
[self.pack_lt, self.pack_lt], batch_size=2, enqueue_many=True)
for blt in batch_lts:
self.assertIn('lt_shuffle_batch', blt.name)
def test_enqueue_many(self):
[batch_2_lt] = ops.shuffle_batch(
[self.pack_lt],
batch_size=2,
enqueue_many=True,
min_after_dequeue=8,
seed=0)
self.assertEqual(len(batch_2_lt.axes['batch']), 2)
[batch_10_lt] = ops.batch([batch_2_lt], batch_size=10, enqueue_many=True)
self.assertEqual(batch_10_lt.axes, self.pack_lt.axes)
[batch_10, pack] = self.eval([batch_10_lt.tensor, self.pack_lt.tensor])
self.assertFalse((batch_10 == pack).all())
def test_allow_smaller_final_batch(self):
[batch_2_op] = ops.shuffle_batch(
[self.original_lt], batch_size=2, allow_smaller_final_batch=True)
self.assertEqual(batch_2_op.axes['batch'].size, None)
class RandomCropTest(Base):
def test_name(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 3})
self.assertIn('lt_random_crop', crop_lt.name)
def test_single(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 3})
self.assertEqual(
core.Axes([self.a0, self.a1, self.a2_resolved, ('probs', 3)]),
crop_lt.axes)
def test_double(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 3, 'channel': 2})
self.assertEqual(
core.Axes([self.a0, ('channel', 2), self.a2_resolved, ('probs', 3)]),
crop_lt.axes)
def test_size1(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 1})
self.assertEqual(
core.Axes([self.a0, self.a1, self.a2_resolved, ('probs', 1)]),
crop_lt.axes)
def test_different_seeds(self):
crop_0_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=0)
crop_1_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=1)
self.assertEqual(crop_0_lt.axes, crop_1_lt.axes)
[crop_0, crop_1] = self.eval([crop_0_lt.tensor, crop_1_lt.tensor])
self.assertFalse((crop_0 == crop_1).all())
def test_identical_seeds(self):
crop_0_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=0)
crop_1_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=0)
self.assertLabeledTensorsEqual(crop_0_lt, crop_1_lt)
def test_crop_idempotent(self):
crop_0_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=0)
crop_1_lt = ops.random_crop(crop_0_lt, {'probs': 3, 'channel': 2}, seed=1)
self.assertLabeledTensorsEqual(crop_0_lt, crop_1_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.random_crop(self.original_lt, {'foobar': 2})
class MapFnTest(Base):
def test_name(self):
map_lt = ops.map_fn(core.identity, self.original_lt)
self.assertIn('lt_map_fn', map_lt.name)
def test_identity(self):
map_lt = ops.map_fn(core.identity, self.original_lt)
self.assertLabeledTensorsEqual(map_lt, self.original_lt)
def test_callable_object(self):
class Identity(object):
def __call__(self, other):
return other
map_lt = ops.map_fn(Identity(), self.original_lt)
self.assertLabeledTensorsEqual(map_lt, self.original_lt)
def test_slice(self):
map_lt = ops.map_fn(lambda t: core.slice_function(t, {'channel': 1}),
self.original_lt)
slice_lt = core.slice_function(self.original_lt, {'channel': 1})
self.assertLabeledTensorsEqual(map_lt, slice_lt)
def test_string(self):
def fn(entry_lt):
op = string_ops.string_join([entry_lt, 'world'])
return core.LabeledTensor(op, [])
tensor_lt = ops.constant(['hi', 'bye'], axes=['batch'])
map_lt = ops.map_fn(fn, tensor_lt)
golden_lt = ops.constant(['hiworld', 'byeworld'], axes=['batch'])
self.assertLabeledTensorsEqual(map_lt, golden_lt)
class FoldlTest(Base):
def test_name(self):
foldl_lt = ops.foldl(core.add, self.original_lt,
core.slice_function(self.original_lt, {'x': 0}))
self.assertIn('lt_foldl', foldl_lt.name)
def test_sum(self):
initializer_lt = ops.constant([0, 10], axes=['y'])
tensor_lt = ops.constant([[1, 2], [3, 4], [5, 6]], axes=['x', 'y'])
foldl_lt = ops.foldl(core.add, tensor_lt, initializer_lt)
golden_lt = ops.constant([9, 22], axes=['y'])
self.assertLabeledTensorsEqual(foldl_lt, golden_lt)
class SqueezeTest(Base):
def setUp(self):
super(SqueezeTest, self).setUp()
self.squeezable_lt = core.slice_function(
self.original_lt, {'channel': slice(0, 1),
'probs': slice(0, 1)})
def test_name(self):
squeeze_lt = ops.squeeze(self.squeezable_lt)
self.assertIn('lt_squeeze', squeeze_lt.name)
def test_none(self):
none_lt = ops.squeeze(self.squeezable_lt, None)
axes_lt = ops.squeeze(self.squeezable_lt, ['channel', 'probs'])
self.assertLabeledTensorsEqual(none_lt, axes_lt)
def test(self):
squeeze_lt = ops.squeeze(self.squeezable_lt, ['probs'])
golden_lt = core.slice_function(self.squeezable_lt, {'probs': 0})
self.assertLabeledTensorsEqual(squeeze_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.squeeze(self.original_lt, ['channel'])
with self.assertRaises(ValueError):
ops.squeeze(self.squeezable_lt, ['foo'])
class MatMulTest(Base):
def test_name(self):
x_lt = core.LabeledTensor(array_ops.ones((3,)), ['x'])
matmul_lt = ops.matmul(x_lt, x_lt)
self.assertIn('lt_matmul', matmul_lt.name)
def test_vector_vector(self):
x_lt = core.LabeledTensor(math_ops.range(3), ['x'])
matmul_lt = ops.matmul(x_lt, x_lt)
golden_lt = core.convert_to_labeled_tensor(5)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
def test_matrix_vector(self):
xy_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(6), (2, 3)), ['x', 'y'])
y_lt = core.LabeledTensor(math_ops.range(3), ['y'])
matmul_lt = ops.matmul(xy_lt, y_lt)
golden_lt = core.LabeledTensor(
math_ops.matmul(xy_lt.tensor, array_ops.reshape(y_lt.tensor,
(-1, 1)))[:, 0], ['x'])
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(y_lt, xy_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
def test_matrix_matrix(self):
xy_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(6), (2, 3)), ['x', 'y'])
yz_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(12), (3, 4)), ['y', 'z'])
matmul_lt = ops.matmul(xy_lt, yz_lt)
golden_lt = core.LabeledTensor(
math_ops.matmul(xy_lt.tensor, yz_lt.tensor), ['x', 'z'])
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
transpose = lambda x: core.transpose(x, list(x.axes.keys())[::-1])
matmul_lt = ops.matmul(xy_lt, transpose(yz_lt))
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(transpose(xy_lt), yz_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(transpose(xy_lt), transpose(yz_lt))
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(yz_lt, xy_lt)
self.assertLabeledTensorsEqual(matmul_lt, transpose(golden_lt))
def test_matrix_matrix_axis_order(self):
xy_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(6), (2, 3)), ['x', 'y'])
yz_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(12), (3, 4)), ['y', 'z'])
golden_lt = core.LabeledTensor(
math_ops.matmul(xy_lt.tensor, yz_lt.tensor), ['x', 'z'])
with core.axis_order_scope(['x', 'y', 'z']):
matmul_lt = ops.matmul(xy_lt, yz_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(yz_lt, xy_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
def test_invalid(self):
scalar_lt = core.LabeledTensor(array_ops.ones(()), [])
x_lt = core.LabeledTensor(array_ops.ones((2,)), ['x'])
x2_lt = core.LabeledTensor(array_ops.ones((3,)), ['x'])
y_lt = core.LabeledTensor(array_ops.ones((3,)), ['y'])
xy_lt = core.LabeledTensor(array_ops.ones((2, 3)), ['x', 'y'])
xyz_lt = core.LabeledTensor(array_ops.ones((2, 3, 1)), ['x', 'y', 'z'])
with self.assertRaisesRegexp(ValueError, 'inputs with at least rank'):
ops.matmul(x_lt, scalar_lt)
with self.assertRaises(NotImplementedError):
ops.matmul(x_lt, xyz_lt)
with self.assertRaisesRegexp(ValueError, 'exactly one axis in common'):
ops.matmul(x_lt, y_lt)
with self.assertRaises(NotImplementedError):
ops.matmul(xy_lt, xy_lt)
with self.assertRaisesRegexp(ValueError, 'does not match'):
ops.matmul(x_lt, x2_lt)
class ReduceSumTest(Base):
def test_name(self):
sum_lt = ops.reduce_sum(self.original_lt, {'channel'})
self.assertIn('lt_reduce_sum', sum_lt.name)
def test_drop_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_drop_scalar_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, 'channel')
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_keep_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, {('channel', 'hihowareyou')})
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(
self.original_lt.tensor, 1, keep_dims=True),
[self.a0, ('channel', ['hihowareyou']), self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_keep_scalar_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, ('channel', 'hihowareyou'))
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(
self.original_lt.tensor, 1, keep_dims=True),
[self.a0, ('channel', ['hihowareyou']), self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_scalar(self):
scalar_lt = core.LabeledTensor(constant_op.constant(42), [])
reduce_lt = ops.reduce_sum(scalar_lt, [])
self.assertLabeledTensorsEqual(reduce_lt, scalar_lt)
def test_empty_list(self):
reduce_lt = ops.reduce_sum(self.original_lt, [])
self.assertLabeledTensorsEqual(reduce_lt, self.original_lt)
def test_none(self):
sum_lt = ops.reduce_sum(self.original_lt)
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(self.original_lt.tensor), [])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_function_docstring_and_name(self):
self.assertIn('tf.reduce_sum', ops.reduce_sum.__doc__)
self.assertEqual('reduce_sum', ops.reduce_sum.__name__)
class ReduceMeanTest(Base):
def test_name(self):
actual_lt = ops.reduce_mean(self.original_lt, {'channel'})
self.assertIn('lt_reduce_mean', actual_lt.name)
def test(self):
actual_lt = ops.reduce_mean(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_mean(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(actual_lt, golden_lt)
class ReduceProdTest(Base):
def test_name(self):
result_lt = ops.reduce_prod(self.original_lt, {'channel'})
self.assertIn('lt_reduce_prod', result_lt.name)
def test(self):
result_lt = ops.reduce_prod(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_prod(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class ReduceMinTest(Base):
def test_name(self):
result_lt = ops.reduce_min(self.original_lt, {'channel'})
self.assertIn('lt_reduce_min', result_lt.name)
def test(self):
result_lt = ops.reduce_min(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_min(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class ReduceMaxTest(Base):
def test_name(self):
result_lt = ops.reduce_max(self.original_lt, {'channel'})
self.assertIn('lt_reduce_max', result_lt.name)
def test(self):
result_lt = ops.reduce_max(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_max(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class BaseReduceBoolean(Base):
def setUp(self):
super(BaseReduceBoolean, self).setUp()
self.bool_tensor = math_ops.cast(self.original_lt.tensor > 5, dtypes.bool)
self.bool_lt = core.LabeledTensor(self.bool_tensor, self.original_lt.axes)
class ReduceAllTest(BaseReduceBoolean):
def test_name(self):
result_lt = ops.reduce_all(self.bool_lt, {'channel'})
self.assertIn('lt_reduce_all', result_lt.name)
def test(self):
result_lt = ops.reduce_all(self.bool_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_all(self.bool_tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class ReduceAnyTest(BaseReduceBoolean):
def test_name(self):
result_lt = ops.reduce_any(self.bool_lt, {'channel'})
self.assertIn('lt_reduce_any', result_lt.name)
def test(self):
result_lt = ops.reduce_any(self.bool_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_any(self.bool_tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class TileTest(Base):
def test_name(self):
tile_lt = ops.tile(self.original_lt, {'z': 2})
self.assertIn('lt_tile', tile_lt.name)
def test(self):
for multiple in [2, constant_op.constant(2)]:
tile_lt = ops.tile(self.original_lt, {'z': multiple})
golden_op = array_ops.tile(self.original_lt.tensor, [1, 1, multiple, 1])
golden_axes = [
'z' if axis.name == 'z' else axis
for axis in self.original_lt.axes.values()
]
golden_lt = core.LabeledTensor(golden_op, golden_axes)
self.assertLabeledTensorsEqual(tile_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'are not contained in the set'):
ops.tile(self.original_lt, {'foo': 5})
with self.assertRaisesRegexp(ValueError, 'axes with tick labels'):
ops.tile(self.original_lt, {'x': 5})
class PadTest(Base):
def test_name(self):
pad_lt = ops.pad(self.original_lt,
{'x': (1, 1),
'channel': ([], ['alpha'])})
self.assertIn('lt_pad', pad_lt.name)
def test(self):
pad_lt = ops.pad(self.original_lt,
{'x': (1, 1),
'channel': ([], ['alpha'])})
golden_op = array_ops.pad(self.original_lt.tensor, [[1, 1], [0, 1], [0, 0],
[0, 0]])
golden_axes = [('x', self.x_size + 2),
('channel', ['red', 'green', 'blue', 'alpha']), self.a2,
self.a3]
golden_lt = core.LabeledTensor(golden_op, golden_axes)
self.assertLabeledTensorsEqual(pad_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'are not contained in the set'):
ops.pad(self.original_lt, {'foo': (1, 1), 'channel': ([], ['alpha'])})
class ConstantTest(Base):
def test_name(self):
constant_lt = ops.constant(1)
self.assertIn('lt_constant', constant_lt.name)
def test_scalar(self):
constant_lt = ops.constant(1)
golden_lt = core.LabeledTensor(constant_op.constant(1), [])
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
def test_infer_shape(self):
constant_lt = ops.constant([1, 2], axes=['x'])
golden_lt = core.LabeledTensor(constant_op.constant([1, 2]), ['x'])
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
def test_specify_shape(self):
constant_lt = ops.constant(1, axes=[('x', 3)])
golden_lt = core.LabeledTensor(constant_op.constant(1, shape=(3,)), ['x'])
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
def test_existing_axes(self):
golden_lt = core.LabeledTensor(constant_op.constant([1, 2]), ['x'])
constant_lt = ops.constant([1, 2], axes=golden_lt.axes)
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
class ZerosLikeTest(Base):
def test_name(self):
like_lt = ops.zeros_like(self.original_lt)
self.assertIn('lt_zeros_like', like_lt.name)
def test(self):
like_lt = ops.zeros_like(self.original_lt)
golden_lt = core.LabeledTensor(
array_ops.zeros_like(self.original_lt.tensor), self.original_lt.axes)
self.assertLabeledTensorsEqual(like_lt, golden_lt)
class OnesLikeTest(Base):
def test_name(self):
like_lt = ops.ones_like(self.original_lt)
self.assertIn('lt_ones_like', like_lt.name)
def test(self):
like_lt = ops.ones_like(self.original_lt)
golden_lt = core.LabeledTensor(
array_ops.ones_like(self.original_lt.tensor), self.original_lt.axes)
self.assertLabeledTensorsEqual(like_lt, golden_lt)
class CastTest(Base):
def test_name(self):
cast_lt = ops.cast(self.original_lt, dtypes.float16)
self.assertIn('lt_cast', cast_lt.name)
def test(self):
cast_lt = ops.cast(self.original_lt, dtypes.float16)
golden_lt = core.LabeledTensor(
math_ops.cast(self.original_lt.tensor, dtypes.float16),
self.original_lt.axes)
self.assertLabeledTensorsEqual(cast_lt, golden_lt)
class VerifyTensorAllFiniteTest(Base):
def setUp(self):
super(VerifyTensorAllFiniteTest, self).setUp()
self.finite_lt = core.LabeledTensor(constant_op.constant(42.0), [])
self.nan_lt = core.LabeledTensor(constant_op.constant(np.nan), [])
self.checked_finite_lt = ops.verify_tensor_all_finite(self.finite_lt, '')
self.checked_nan_lt = ops.verify_tensor_all_finite(self.nan_lt, '')
def test_name(self):
self.assertIn('lt_verify_tensor_all_finite', self.checked_finite_lt.name)
self.assertIn('lt_verify_tensor_all_finite', self.checked_nan_lt.name)
def test_finite(self):
self.assertLabeledTensorsEqual(self.finite_lt, self.checked_finite_lt)
def test_nan(self):
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'Tensor had NaN values'):
self.eval([self.checked_nan_lt])
class BooleanMaskTest(Base):
def test_name(self):
mask = core.LabeledTensor(math_ops.range(7) > 3, [self.a0])
masked_lt = ops.boolean_mask(self.original_lt, mask)
self.assertIn('lt_boolean_mask', masked_lt.name)
def test(self):
mask = core.LabeledTensor(math_ops.range(7) > 3, [self.a0])
masked_lt = ops.boolean_mask(self.original_lt, mask)
golden_lt = core.LabeledTensor(
array_ops.boolean_mask(self.original_lt.tensor, mask.tensor),
['x', self.a1, self.a2, self.a3])
self.assertLabeledTensorsEqual(masked_lt, golden_lt)
def test_invalid_rank(self):
mask = core.LabeledTensor(array_ops.ones((7, 3)) > 3, [self.a0, self.a1])
with self.assertRaises(NotImplementedError):
ops.boolean_mask(self.original_lt, mask)
def test_mismatched_axis(self):
mask = core.LabeledTensor(math_ops.range(7) > 3, ['foo'])
with self.assertRaisesRegexp(ValueError, 'not equal'):
ops.boolean_mask(self.original_lt, mask)
class WhereTest(Base):
def test_name(self):
condition = core.LabeledTensor(math_ops.range(5) < 3, ['x'])
where_lt = ops.where(condition, condition, condition)
self.assertIn('lt_where', where_lt.name)
def test(self):
condition = core.LabeledTensor(math_ops.range(5) < 3, ['x'])
x = core.LabeledTensor(array_ops.ones(5), ['x'])
y = core.LabeledTensor(array_ops.zeros(5), ['x'])
where_lt = ops.where(condition, x, y)
golden_lt = core.LabeledTensor(
array_ops.concat([array_ops.ones(3), array_ops.zeros(2)], 0), ['x'])
self.assertLabeledTensorsEqual(where_lt, golden_lt)
def test_mismatched_axes(self):
condition = core.LabeledTensor(math_ops.range(5) < 3, ['x'])
with self.assertRaisesRegexp(ValueError, 'equal axes'):
ops.where(condition, condition[:3], condition)
with self.assertRaisesRegexp(ValueError, 'equal axes'):
ops.where(condition, condition, condition[:3])
if __name__ == '__main__':
test_lib.main()
|
ybellavance/python-for-android | refs/heads/master | python-modules/twisted/twisted/application/app.py | 49 | # -*- test-case-name: twisted.test.test_application,twisted.test.test_twistd -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, os, pdb, getpass, traceback, signal, warnings
from twisted.python import runtime, log, usage, failure, util, logfile
from twisted.python.versions import Version
from twisted.python.reflect import qual
from twisted.python.deprecate import deprecated
from twisted.python.log import ILogObserver
from twisted.persisted import sob
from twisted.application import service, reactors
from twisted.internet import defer
from twisted import copyright
# Expose the new implementation of installReactor at the old location.
from twisted.application.reactors import installReactor
from twisted.application.reactors import NoSuchReactor
class _BasicProfiler(object):
"""
@ivar saveStats: if C{True}, save the stats information instead of the
human readable format
@type saveStats: C{bool}
@ivar profileOutput: the name of the file use to print profile data.
@type profileOutput: C{str}
"""
def __init__(self, profileOutput, saveStats):
self.profileOutput = profileOutput
self.saveStats = saveStats
def _reportImportError(self, module, e):
"""
Helper method to report an import error with a profile module. This
has to be explicit because some of these modules are removed by
distributions due to them being non-free.
"""
s = "Failed to import module %s: %s" % (module, e)
s += """
This is most likely caused by your operating system not including
the module due to it being non-free. Either do not use the option
--profile, or install the module; your operating system vendor
may provide it in a separate package.
"""
raise SystemExit(s)
class ProfileRunner(_BasicProfiler):
"""
Runner for the standard profile module.
"""
def run(self, reactor):
"""
Run reactor under the standard profiler.
"""
try:
import profile
except ImportError, e:
self._reportImportError("profile", e)
p = profile.Profile()
p.runcall(reactor.run)
if self.saveStats:
p.dump_stats(self.profileOutput)
else:
tmp, sys.stdout = sys.stdout, open(self.profileOutput, 'a')
try:
p.print_stats()
finally:
sys.stdout, tmp = tmp, sys.stdout
tmp.close()
class HotshotRunner(_BasicProfiler):
"""
Runner for the hotshot profile module.
"""
def run(self, reactor):
"""
Run reactor under the hotshot profiler.
"""
try:
import hotshot.stats
except (ImportError, SystemExit), e:
# Certain versions of Debian (and Debian derivatives) raise
# SystemExit when importing hotshot if the "non-free" profiler
# module is not installed. Someone eventually recognized this
# as a bug and changed the Debian packaged Python to raise
# ImportError instead. Handle both exception types here in
# order to support the versions of Debian which have this
# behavior. The bug report which prompted the introduction of
# this highly undesirable behavior should be available online at
# <http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=334067>.
# There seems to be no corresponding bug report which resulted
# in the behavior being removed. -exarkun
self._reportImportError("hotshot", e)
# this writes stats straight out
p = hotshot.Profile(self.profileOutput)
p.runcall(reactor.run)
if self.saveStats:
# stats are automatically written to file, nothing to do
return
else:
s = hotshot.stats.load(self.profileOutput)
s.strip_dirs()
s.sort_stats(-1)
if getattr(s, 'stream', None) is not None:
# Python 2.5 and above supports a stream attribute
s.stream = open(self.profileOutput, 'w')
s.print_stats()
s.stream.close()
else:
# But we have to use a trick for Python < 2.5
tmp, sys.stdout = sys.stdout, open(self.profileOutput, 'w')
try:
s.print_stats()
finally:
sys.stdout, tmp = tmp, sys.stdout
tmp.close()
class CProfileRunner(_BasicProfiler):
"""
Runner for the cProfile module.
"""
def run(self, reactor):
"""
Run reactor under the cProfile profiler.
"""
try:
import cProfile, pstats
except ImportError, e:
self._reportImportError("cProfile", e)
p = cProfile.Profile()
p.runcall(reactor.run)
if self.saveStats:
p.dump_stats(self.profileOutput)
else:
stream = open(self.profileOutput, 'w')
s = pstats.Stats(p, stream=stream)
s.strip_dirs()
s.sort_stats(-1)
s.print_stats()
stream.close()
class AppProfiler(object):
"""
Class which selects a specific profile runner based on configuration
options.
@ivar profiler: the name of the selected profiler.
@type profiler: C{str}
"""
profilers = {"profile": ProfileRunner, "hotshot": HotshotRunner,
"cprofile": CProfileRunner}
def __init__(self, options):
saveStats = options.get("savestats", False)
profileOutput = options.get("profile", None)
self.profiler = options.get("profiler", "hotshot").lower()
if self.profiler in self.profilers:
profiler = self.profilers[self.profiler](profileOutput, saveStats)
self.run = profiler.run
else:
raise SystemExit("Unsupported profiler name: %s" % (self.profiler,))
class AppLogger(object):
"""
Class managing logging faciliy of the application.
@ivar _logfilename: The name of the file to which to log, if other than the
default.
@type _logfilename: C{str}
@ivar _observer: log observer added at C{start} and removed at C{stop}.
@type _observer: C{callable}
"""
_observer = None
def __init__(self, options):
self._logfilename = options.get("logfile", "")
def start(self, application):
"""
Initialize the logging system.
If an L{ILogObserver} component has been set on C{application}, then
it will be used as the log observer. Otherwise a log observer will be
created based on the command-line options.
@param application: The application on which to check for an
L{ILogObserver}.
"""
observer = application.getComponent(ILogObserver, None)
if observer is None:
observer = self._getLogObserver()
self._observer = observer
log.startLoggingWithObserver(self._observer)
self._initialLog()
def _initialLog(self):
"""
Print twistd start log message.
"""
from twisted.internet import reactor
log.msg("twistd %s (%s %s) starting up." % (copyright.version,
sys.executable,
runtime.shortPythonVersion()))
log.msg('reactor class: %s.' % (qual(reactor.__class__),))
def _getLogObserver(self):
"""
Create a log observer to be added to the logging system before running
this application.
"""
if self._logfilename == '-' or not self._logfilename:
logFile = sys.stdout
else:
logFile = logfile.LogFile.fromFullPath(self._logfilename)
return log.FileLogObserver(logFile).emit
def stop(self):
"""
Print twistd stop log message.
"""
log.msg("Server Shut Down.")
if self._observer is not None:
log.removeObserver(self._observer)
self._observer = None
def fixPdb():
def do_stop(self, arg):
self.clear_all_breaks()
self.set_continue()
from twisted.internet import reactor
reactor.callLater(0, reactor.stop)
return 1
def help_stop(self):
print """stop - Continue execution, then cleanly shutdown the twisted reactor."""
def set_quit(self):
os._exit(0)
pdb.Pdb.set_quit = set_quit
pdb.Pdb.do_stop = do_stop
pdb.Pdb.help_stop = help_stop
def runReactorWithLogging(config, oldstdout, oldstderr, profiler=None, reactor=None):
"""
Start the reactor, using profiling if specified by the configuration, and
log any error happening in the process.
@param config: configuration of the twistd application.
@type config: L{ServerOptions}
@param oldstdout: initial value of C{sys.stdout}.
@type oldstdout: C{file}
@param oldstderr: initial value of C{sys.stderr}.
@type oldstderr: C{file}
@param profiler: object used to run the reactor with profiling.
@type profiler: L{AppProfiler}
@param reactor: The reactor to use. If C{None}, the global reactor will
be used.
"""
if reactor is None:
from twisted.internet import reactor
try:
if config['profile']:
if profiler is not None:
profiler.run(reactor)
elif config['debug']:
sys.stdout = oldstdout
sys.stderr = oldstderr
if runtime.platformType == 'posix':
signal.signal(signal.SIGUSR2, lambda *args: pdb.set_trace())
signal.signal(signal.SIGINT, lambda *args: pdb.set_trace())
fixPdb()
pdb.runcall(reactor.run)
else:
reactor.run()
except:
if config['nodaemon']:
file = oldstdout
else:
file = open("TWISTD-CRASH.log",'a')
traceback.print_exc(file=file)
file.flush()
def getPassphrase(needed):
if needed:
return getpass.getpass('Passphrase: ')
else:
return None
def getSavePassphrase(needed):
if needed:
passphrase = util.getPassword("Encryption passphrase: ")
else:
return None
class ApplicationRunner(object):
"""
An object which helps running an application based on a config object.
Subclass me and implement preApplication and postApplication
methods. postApplication generally will want to run the reactor
after starting the application.
@ivar config: The config object, which provides a dict-like interface.
@ivar application: Available in postApplication, but not
preApplication. This is the application object.
@ivar profilerFactory: Factory for creating a profiler object, able to
profile the application if options are set accordingly.
@ivar profiler: Instance provided by C{profilerFactory}.
@ivar loggerFactory: Factory for creating object responsible for logging.
@ivar logger: Instance provided by C{loggerFactory}.
"""
profilerFactory = AppProfiler
loggerFactory = AppLogger
def __init__(self, config):
self.config = config
self.profiler = self.profilerFactory(config)
self.logger = self.loggerFactory(config)
def run(self):
"""
Run the application.
"""
self.preApplication()
self.application = self.createOrGetApplication()
self.logger.start(self.application)
self.postApplication()
self.logger.stop()
def startReactor(self, reactor, oldstdout, oldstderr):
"""
Run the reactor with the given configuration. Subclasses should
probably call this from C{postApplication}.
@see: L{runReactorWithLogging}
"""
runReactorWithLogging(
self.config, oldstdout, oldstderr, self.profiler, reactor)
def preApplication(self):
"""
Override in subclass.
This should set up any state necessary before loading and
running the Application.
"""
raise NotImplementedError()
def postApplication(self):
"""
Override in subclass.
This will be called after the application has been loaded (so
the C{application} attribute will be set). Generally this
should start the application and run the reactor.
"""
raise NotImplementedError()
def createOrGetApplication(self):
"""
Create or load an Application based on the parameters found in the
given L{ServerOptions} instance.
If a subcommand was used, the L{service.IServiceMaker} that it
represents will be used to construct a service to be added to
a newly-created Application.
Otherwise, an application will be loaded based on parameters in
the config.
"""
if self.config.subCommand:
# If a subcommand was given, it's our responsibility to create
# the application, instead of load it from a file.
# loadedPlugins is set up by the ServerOptions.subCommands
# property, which is iterated somewhere in the bowels of
# usage.Options.
plg = self.config.loadedPlugins[self.config.subCommand]
ser = plg.makeService(self.config.subOptions)
application = service.Application(plg.tapname)
ser.setServiceParent(application)
else:
passphrase = getPassphrase(self.config['encrypted'])
application = getApplication(self.config, passphrase)
return application
def getApplication(config, passphrase):
s = [(config[t], t)
for t in ['python', 'source', 'file'] if config[t]][0]
filename, style = s[0], {'file':'pickle'}.get(s[1],s[1])
try:
log.msg("Loading %s..." % filename)
application = service.loadApplication(filename, style, passphrase)
log.msg("Loaded.")
except Exception, e:
s = "Failed to load application: %s" % e
if isinstance(e, KeyError) and e.args[0] == "application":
s += """
Could not find 'application' in the file. To use 'twistd -y', your .tac
file must create a suitable object (e.g., by calling service.Application())
and store it in a variable named 'application'. twistd loads your .tac file
and scans the global variables for one of this name.
Please read the 'Using Application' HOWTO for details.
"""
traceback.print_exc(file=log.logfile)
log.msg(s)
log.deferr()
sys.exit('\n' + s + '\n')
return application
def _reactorZshAction():
return "(%s)" % " ".join([r.shortName for r in reactors.getReactorTypes()])
class ReactorSelectionMixin:
"""
Provides options for selecting a reactor to install.
If a reactor is installed, the short name which was used to locate it is
saved as the value for the C{"reactor"} key.
"""
zsh_actions = {"reactor" : _reactorZshAction}
messageOutput = sys.stdout
def opt_help_reactors(self):
"""
Display a list of possibly available reactor names.
"""
for r in reactors.getReactorTypes():
self.messageOutput.write(' %-4s\t%s\n' %
(r.shortName, r.description))
raise SystemExit(0)
def opt_reactor(self, shortName):
"""
Which reactor to use (see --help-reactors for a list of possibilities)
"""
# Actually actually actually install the reactor right at this very
# moment, before any other code (for example, a sub-command plugin)
# runs and accidentally imports and installs the default reactor.
#
# This could probably be improved somehow.
try:
installReactor(shortName)
except NoSuchReactor:
msg = ("The specified reactor does not exist: '%s'.\n"
"See the list of available reactors with "
"--help-reactors" % (shortName,))
raise usage.UsageError(msg)
except Exception, e:
msg = ("The specified reactor cannot be used, failed with error: "
"%s.\nSee the list of available reactors with "
"--help-reactors" % (e,))
raise usage.UsageError(msg)
else:
self["reactor"] = shortName
opt_r = opt_reactor
class ServerOptions(usage.Options, ReactorSelectionMixin):
longdesc = ("twistd reads a twisted.application.service.Application out "
"of a file and runs it.")
optFlags = [['savestats', None,
"save the Stats object rather than the text output of "
"the profiler."],
['no_save','o', "do not save state on shutdown"],
['encrypted', 'e',
"The specified tap/aos file is encrypted."]]
optParameters = [['logfile','l', None,
"log to a specified file, - for stdout"],
['profile', 'p', None,
"Run in profile mode, dumping results to specified file"],
['profiler', None, "hotshot",
"Name of the profiler to use (%s)." %
", ".join(AppProfiler.profilers)],
['file','f','twistd.tap',
"read the given .tap file"],
['python','y', None,
"read an application from within a Python file "
"(implies -o)"],
['source', 's', None,
"Read an application from a .tas file (AOT format)."],
['rundir','d','.',
'Change to a supplied directory before running'],
['report-profile', None, None,
'E-mail address to use when reporting dynamic execution '
'profiler stats. This should not be combined with '
'other profiling options. This will only take effect '
'if the application to be run has an application '
'name.']]
#zsh_altArgDescr = {"foo":"use this description for foo instead"}
#zsh_multiUse = ["foo", "bar"]
zsh_mutuallyExclusive = [("file", "python", "source")]
zsh_actions = {"file":'_files -g "*.tap"',
"python":'_files -g "*.(tac|py)"',
"source":'_files -g "*.tas"',
"rundir":"_dirs"}
#zsh_actionDescr = {"logfile":"log file name", "random":"random seed"}
def __init__(self, *a, **kw):
self['debug'] = False
usage.Options.__init__(self, *a, **kw)
def opt_debug(self):
"""
run the application in the Python Debugger (implies nodaemon),
sending SIGUSR2 will drop into debugger
"""
defer.setDebugging(True)
failure.startDebugMode()
self['debug'] = True
opt_b = opt_debug
def opt_spew(self):
"""Print an insanely verbose log of everything that happens.
Useful when debugging freezes or locks in complex code."""
sys.settrace(util.spewer)
try:
import threading
except ImportError:
return
threading.settrace(util.spewer)
def opt_report_profile(self, value):
"""
DEPRECATED.
Manage --report-profile option, which does nothing currently.
"""
warnings.warn("--report-profile option is deprecated and a no-op "
"since Twisted 8.0.", category=DeprecationWarning)
def parseOptions(self, options=None):
if options is None:
options = sys.argv[1:] or ["--help"]
usage.Options.parseOptions(self, options)
def postOptions(self):
if self.subCommand or self['python']:
self['no_save'] = True
def subCommands(self):
from twisted import plugin
plugins = plugin.getPlugins(service.IServiceMaker)
self.loadedPlugins = {}
for plug in plugins:
self.loadedPlugins[plug.tapname] = plug
yield (plug.tapname, None, lambda: plug.options(), plug.description)
subCommands = property(subCommands)
def run(runApp, ServerOptions):
config = ServerOptions()
try:
config.parseOptions()
except usage.error, ue:
print config
print "%s: %s" % (sys.argv[0], ue)
else:
runApp(config)
def initialLog():
AppLogger({})._initialLog()
initialLog = deprecated(Version("Twisted", 8, 2, 0))(initialLog)
def convertStyle(filein, typein, passphrase, fileout, typeout, encrypt):
application = service.loadApplication(filein, typein, passphrase)
sob.IPersistable(application).setStyle(typeout)
passphrase = getSavePassphrase(encrypt)
if passphrase:
fileout = None
sob.IPersistable(application).save(filename=fileout, passphrase=passphrase)
def startApplication(application, save):
from twisted.internet import reactor
service.IService(application).startService()
if save:
p = sob.IPersistable(application)
reactor.addSystemEventTrigger('after', 'shutdown', p.save, 'shutdown')
reactor.addSystemEventTrigger('before', 'shutdown',
service.IService(application).stopService)
|
carlos-jenkins/ops-topology-lib-vtysh | refs/heads/master | lib/topology_lib_vtysh/exceptions.py | 1 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Vtysh auto-generated typed exceptions module.
.. warning::
This is auto-generated, do not modify manually!!
"""
from __future__ import unicode_literals, absolute_import
from __future__ import print_function, division
from re import match
from collections import OrderedDict
class VtyshException(Exception):
"""
Base exception class for vtysh shell errors.
:param str output: The shell output that triggered this exception.
"""
def __init__(self, output):
super(VtyshException, self).__init__()
self.output = output
class UnknownVtyshException(VtyshException):
"""
Generic exception raised when the specific exception could not be
determined.
"""
class UnknownCommandException(VtyshException):
"""
This is a typed exception that will be raised when any of the following
regular expressions match the output of a command:
::
Unknown command
"""
class IncompleteCommandException(VtyshException):
"""
This is a typed exception that will be raised when any of the following
regular expressions match the output of a command:
::
Command incomplete
"""
VTYSH_EXCEPTIONS = OrderedDict([
(
UnknownCommandException,
[
'Unknown command',
]
),
(
IncompleteCommandException,
[
'Command incomplete',
]
),
])
def determine_exception(output):
"""
Determine which exception to raise from shell error message.
:param str output: The shell output error.
:rtype: VtyshException subclass.
:return: The corresponding exception class for given message.
"""
output = output.lower() # recommended
for exc, matches in VTYSH_EXCEPTIONS.items():
for expression in matches:
if match(expression, output):
return exc
return UnknownVtyshException
__all__ = [
'VtyshException',
'UnknownVtyshException',
'UnknownCommandException',
'IncompleteCommandException',
'VTYSH_EXCEPTIONS',
'determine_exception'
]
|
ecbftw/nanown | refs/heads/master | test/services/multipacket-echo.py | 1 | #!/usr/bin/env python3
#
# Copyright (C) 2015 Blindspot Security LLC
# by Timothy D. Morgan
# twits: @ecbftw
import sys
import time
import socketserver
import http.server
class EchoHandler(http.server.BaseHTTPRequestHandler):
"""
"""
def do_GET(self):
#resolution = time.clock_getres(time.CLOCK_MONOTONIC)
received = int(time.monotonic()*1000000000)
wait_time = 0
if 't=' in self.path:
wait_time = int(self.path.split('t=', 1)[1], 10)
self.send_response(200)
self.send_header('Content-Type','text/plain; charset=UTF-8')
self.end_headers()
self.wfile.write(b'header\n')
self.wfile.flush()
# Use a busy-wait with monotonic clock. More accurate than time.sleep()
finish = received + wait_time
now = int(time.monotonic()*1000000000)
while now < finish:
now = int(time.monotonic()*1000000000)
self.wfile.write(b'body\n')
self.wfile.flush()
self.wfile.write(b'more content\n')
self.wfile.flush()
self.wfile.write(b'footer\n')
self.wfile.flush()
if __name__ == "__main__":
HOST, PORT = "0.0.0.0", 3240
server = socketserver.TCPServer((HOST, PORT), EchoHandler)
server.serve_forever()
|
codecollision/DropboxToFlickr | refs/heads/master | django/core/management/commands/sqlreset.py | 313 | from optparse import make_option
from django.core.management.base import AppCommand
from django.core.management.sql import sql_reset
from django.db import connections, DEFAULT_DB_ALIAS
class Command(AppCommand):
help = "Prints the DROP TABLE SQL, then the CREATE TABLE SQL, for the given app name(s)."
option_list = AppCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to print the '
'SQL for. Defaults to the "default" database.'),
)
output_transaction = True
def handle_app(self, app, **options):
return u'\n'.join(sql_reset(app, self.style, connections[options.get('database', DEFAULT_DB_ALIAS)])).encode('utf-8')
|
nirmeshk/oh-mainline | refs/heads/master | vendor/packages/django-debug-toolbar/tests/panels/test_logging.py | 26 | from __future__ import absolute_import, unicode_literals
import logging
from debug_toolbar.panels.logging import (
collector, MESSAGE_IF_STRING_REPRESENTATION_INVALID)
from ..base import BaseTestCase
class LoggingPanelTestCase(BaseTestCase):
def setUp(self):
super(LoggingPanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('LoggingPanel')
self.logger = logging.getLogger(__name__)
collector.clear_collection()
def test_happy_case(self):
self.logger.info('Nothing to see here, move along!')
self.panel.process_response(self.request, self.response)
records = self.panel.get_stats()['records']
self.assertEqual(1, len(records))
self.assertEqual('Nothing to see here, move along!',
records[0]['message'])
def test_formatting(self):
self.logger.info('There are %d %s', 5, 'apples')
self.panel.process_response(self.request, self.response)
records = self.panel.get_stats()['records']
self.assertEqual(1, len(records))
self.assertEqual('There are 5 apples',
records[0]['message'])
def test_failing_formatting(self):
class BadClass(object):
def __str__(self):
raise Exception('Please not stringify me!')
# should not raise exception, but fail silently
self.logger.debug('This class is misbehaving: %s', BadClass())
self.panel.process_response(self.request, self.response)
records = self.panel.get_stats()['records']
self.assertEqual(1, len(records))
self.assertEqual(MESSAGE_IF_STRING_REPRESENTATION_INVALID,
records[0]['message'])
|
tmeits/pybrain | refs/heads/master | pybrain/structure/connections/permutation.py | 25 | # -*- coding: utf-8 -_*-
__author__ = 'Justin Bayer, [email protected]'
__version__ = '$Id$'
from scipy import array
from pybrain.structure.connections.connection import Connection
from pybrain.utilities import permute
class PermutationConnection(Connection):
"""Connection that permutes the input by a given permutation."""
def __init__(self, inmod, outmod, permutation, blocksize, *args, **kwargs):
Connection.__init__(self, inmod, outmod, *args, **kwargs)
if self.indim != self.outdim:
raise ValueError("Indim (%i) does not equal outdim (%i)" % (
self.indim, self.outdim))
if len(permutation) * blocksize != self.indim:
raise ValueError(
"Permutation has wrong size: should be %i but is %i." %(
(self.indim // blocksize), len(permutation)))
self.permutation = array(permutation)
self.invpermutation = permute(list(range(len(permutation))), permutation)
self.blocksize = blocksize
def _forwardImplementation(self, inbuf, outbuf):
inbuf = inbuf.reshape(self.indim // self.blocksize, self.blocksize)
inbuf = permute(inbuf, self.permutation)
inbuf.shape = self.indim,
outbuf += inbuf
def _backwardImplementation(self, outerr, inerr, inbuf):
outerr = outerr.reshape(self.indim // self.blocksize, self.blocksize)
outerr = permute(outerr, self.invpermutation)
outerr.shape = self.indim,
inerr += outerr
|
KaranToor/MA450 | refs/heads/master | google-cloud-sdk/platform/bq/third_party/google/apputils/file_util.py | 25 | #!/usr/bin/env python
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple file system utilities."""
__author__ = ('[email protected] (Evan LaForge)',
'[email protected] (Matthew Blecker)')
import errno
import os
import pwd
import shutil
import stat
import tempfile
class PasswdError(Exception):
"""Exception class for errors loading a password from a file."""
def ListDirPath(dir_name):
"""Like os.listdir with prepended dir_name, which is often more convenient."""
return [os.path.join(dir_name, fn) for fn in os.listdir(dir_name)]
def Read(filename):
"""Read entire contents of file with name 'filename'."""
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
def Write(filename, contents, overwrite_existing=True, mode=0666):
"""Create a file 'filename' with 'contents', with the mode given in 'mode'.
The 'mode' is modified by the umask, as in open(2). If
'overwrite_existing' is False, the file will be opened in O_EXCL mode.
Args:
filename: str; the name of the file
contents: str; the data to write to the file
overwrite_existing: bool; whether or not to allow the write if the file
already exists
mode: int; permissions with which to create the file (default is 0666 octal)
"""
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
if not overwrite_existing:
flags |= os.O_EXCL
fd = os.open(filename, flags, mode)
try:
os.write(fd, contents)
finally:
os.close(fd)
def AtomicWrite(filename, contents, mode=0666):
"""Create a file 'filename' with 'contents' atomically.
As in Write, 'mode' is modified by the umask. This creates and moves
a temporary file, and errors doing the above will be propagated normally,
though it will try to clean up the temporary file in that case.
This is very similar to the prodlib function with the same name.
Args:
filename: str; the name of the file
contents: str; the data to write to the file
mode: int; permissions with which to create the file (default is 0666 octal)
"""
(fd, tmp_filename) = tempfile.mkstemp(dir=os.path.dirname(filename))
try:
os.write(fd, contents)
finally:
os.close(fd)
try:
os.chmod(tmp_filename, mode)
os.rename(tmp_filename, filename)
except OSError, exc:
try:
os.remove(tmp_filename)
except OSError, e:
exc = OSError('%s. Additional errors cleaning up: %s' % (exc, e))
raise exc
def MkDirs(directory, force_mode=None):
"""Makes a directory including its parent directories.
This function is equivalent to os.makedirs() but it avoids a race
condition that os.makedirs() has. The race is between os.mkdir() and
os.path.exists() which fail with errors when run in parallel.
Args:
directory: str; the directory to make
force_mode: optional octal, chmod dir to get rid of umask interaction
Raises:
Whatever os.mkdir() raises when it fails for any reason EXCLUDING
"dir already exists". If a directory already exists, it does not
raise anything. This behaviour is different than os.makedirs()
"""
name = os.path.normpath(directory)
dirs = name.split(os.path.sep)
for i in range(0, len(dirs)):
path = os.path.sep.join(dirs[:i+1])
try:
if path:
os.mkdir(path)
# only chmod if we created
if force_mode is not None:
os.chmod(path, force_mode)
except OSError, exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(path)):
raise
def RmDirs(dir_name):
"""Removes dir_name and every non-empty directory in dir_name.
Unlike os.removedirs and shutil.rmtree, this function doesn't raise an error
if the directory does not exist.
Args:
dir_name: Directory to be removed.
"""
try:
shutil.rmtree(dir_name)
except OSError, err:
if err.errno != errno.ENOENT:
raise
try:
parent_directory = os.path.dirname(dir_name)
while parent_directory:
try:
os.rmdir(parent_directory)
except OSError, err:
if err.errno != errno.ENOENT:
raise
parent_directory = os.path.dirname(parent_directory)
except OSError, err:
if err.errno not in (errno.EACCES, errno.ENOTEMPTY):
raise
def HomeDir(user=None):
"""Find the home directory of a user.
Args:
user: int, str, or None - the uid or login of the user to query for,
or None (the default) to query for the current process' effective user
Returns:
str - the user's home directory
Raises:
TypeError: if user is not int, str, or None.
"""
if user is None:
pw_struct = pwd.getpwuid(os.geteuid())
elif isinstance(user, int):
pw_struct = pwd.getpwuid(user)
elif isinstance(user, str):
pw_struct = pwd.getpwnam(user)
else:
raise TypeError('user must be None or an instance of int or str')
return pw_struct.pw_dir
|
peterfpeterson/mantid | refs/heads/master | qt/python/mantidqt/widgets/test/test_fitpropertybrowserplotinteraction.py | 3 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from unittest.mock import Mock, MagicMock, ANY
from matplotlib.lines import Line2D
from mantid.plots import MantidAxes
from mantid.simpleapi import CreateSampleWorkspace
from mantidqt.widgets.fitpropertybrowser import FitPropertyBrowser
from mantidqt.widgets.fitpropertybrowser.fitpropertybrowserplotinteraction import FitPropertyBrowserPlotInteraction
from mantid.api import AnalysisDataService, FunctionFactory, WorkspaceFactory
import matplotlib
matplotlib.use('AGG') # noqa
X_COLUMN_LABEL = 'x_column'
Y_COLUMN_LABEL = 'y_column'
FULL_FUNCTION = FunctionFactory.createInitialized("name=FlatBackground,A0=1;name=LinearBackground,A0=1,"
"A1=2;name=GausOsc,A=0.2,Sigma=0.2,Frequency=0.1,Phi=0")
FUNCTION_1 = FunctionFactory.createInitialized("name=FlatBackground,A0=1")
FUNCTION_2 = FunctionFactory.createInitialized("name=LinearBackground,A0=1,A1=2")
FUNCTION_3 = FunctionFactory.createInitialized("name=GausOsc,A=0.2,Sigma=0.2,Frequency=0.1,Phi=0")
class FitPropertyBrowserPlotInteractionTest(unittest.TestCase):
def setup_mock_fit_browser(self, workspace_creator, workspace_name, function, function_prefix):
workspace_creator(workspace_name)
self.fit_browser.workspaceName = Mock(return_value=workspace_name)
self.fit_browser.currentHandler.return_value = self.create_mock_handler(function, function_prefix)
def create_table_workspace(self, table_name):
table = WorkspaceFactory.createTable()
table.addColumn('double', X_COLUMN_LABEL, 1)
table.addColumn('double', Y_COLUMN_LABEL, 2)
for i in range(1, 10):
table.addRow([0.1 * i, 5])
AnalysisDataService.Instance().addOrReplace(table_name, table)
self.fit_browser.getXColumnName.return_value = X_COLUMN_LABEL
self.fit_browser.getYColumnName.return_value = Y_COLUMN_LABEL
self.fit_browser.getErrColumnName.return_value = None
self.fit_browser.startX.return_value = 0.15
self.fit_browser.endX.return_value = 0.95
def create_workspace2D(self, workspace_name):
CreateSampleWorkspace(OutputWorkspace=workspace_name)
self.fit_browser.workspaceIndex.return_value = 1
self.fit_browser.startX.return_value = 0
self.fit_browser.endX.return_value = 20000
def create_mock_handler(self, function, function_prefix):
mock_handler = MagicMock()
mock_handler.ifun = MagicMock(return_value=function)
mock_handler.functionPrefix = MagicMock(return_value=function_prefix)
return mock_handler
def create_mock_guess_lines(self):
line_1, line_2, line_3 = MagicMock(spec=Line2D), MagicMock(spec=Line2D), MagicMock(spec=Line2D)
mock_lines = [("f0." + FUNCTION_1.name(), line_1), ("f1." + FUNCTION_2.name(), line_2),
("f2." + FUNCTION_3.name(), line_3)]
self.browser_plot_interaction.guess_lines = dict(mock_lines)
return line_1, line_2, line_3
def setUp(self):
self.fit_browser = MagicMock(spec=FitPropertyBrowser)
self.fit_browser.getFittingFunction = Mock(return_value=FULL_FUNCTION)
# Mock figure
self.canvas = MagicMock()
self.figure = MagicMock()
self.axes = MagicMock(spec=MantidAxes)
self.figure.get_axes.return_value = [self.axes]
self.canvas.figure = self.figure
self.browser_plot_interaction = FitPropertyBrowserPlotInteraction(self.fit_browser, self.canvas)
def tearDown(self):
AnalysisDataService.clear()
def test_plot_guess_all_evaluates_correct_function(self):
workspace_name = "test_workspace"
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FULL_FUNCTION, "")
self.browser_plot_interaction.evaluate_function = Mock()
self.browser_plot_interaction.plot_guess_all()
self.browser_plot_interaction.evaluate_function.assert_called_once_with(workspace_name, FULL_FUNCTION,
workspace_name + '_guess')
def test_plot_guess_all_correctly_calls_plot(self):
workspace_name = "test_workspace"
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FULL_FUNCTION, "")
self.browser_plot_interaction.plot_guess_all()
self.figure.get_axes.assert_called_once()
self.axes.plot.assert_called_once_with(ANY, wkspIndex=1, label=workspace_name + '_guess',
distribution=True,
update_axes_labels=False, autoscale_on_update=False)
def test_plot_current_guess_evaluates_correct_function(self):
workspace_name = "test_workspace"
prefix = 'f1'
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.evaluate_function = Mock()
self.browser_plot_interaction.plot_current_guess()
self.browser_plot_interaction.evaluate_function.assert_called_once_with(workspace_name, FUNCTION_2,
prefix + '.' + FUNCTION_2.name())
def test_plot_current_guess_correctly_calls_plot(self):
workspace_name = "test_workspace"
prefix = 'f1'
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.plot_current_guess()
self.figure.get_axes.assert_called_once()
self.axes.plot.assert_called_once_with(ANY, wkspIndex=1, label=prefix + '.' + FUNCTION_2.name(),
distribution=True,
update_axes_labels=False, autoscale_on_update=False)
def test_plot_guess_all_plots_for_table_workspaces(self):
table_name = "table_name"
function = FUNCTION_2
self.setup_mock_fit_browser(self.create_table_workspace, table_name, function, "")
self.browser_plot_interaction.plot_guess_all()
self.figure.get_axes.assert_called_once()
self.axes.plot.assert_called_once_with(ANY, wkspIndex=1, label=table_name + '_guess',
distribution=True,
update_axes_labels=False, autoscale_on_update=False)
def test_remove_function_correctly_updates_stored_prefixed_functions(self):
workspace_name = "test_workspace"
prefix = 'f1'
self.create_mock_guess_lines()
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.slot_for_function_removed()
self.assertEqual(list(self.browser_plot_interaction.guess_lines.keys()), ['f0.FlatBackground', 'f1.GausOsc'])
def test_remove_function_correctly_removes_line(self):
workspace_name = "test_workspace"
prefix = 'f1'
line_1, line_2, line_3 = self.create_mock_guess_lines()
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.slot_for_function_removed()
line_2.remove.assert_called_once()
def test_remove_function_correctly_updates_legend(self):
workspace_name = "test_workspace"
prefix = 'f1'
line_1, line_2, line_3 = self.create_mock_guess_lines()
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.slot_for_function_removed()
# Make legend will be called twice, once when removing the line and the second time to update the legend
# based on the new prefixes
self.assertEqual(self.axes.make_legend.call_count, 2)
line_3.set_label.assert_called_once_with('f1.GausOsc')
def test_remove_function_updates_guess_all(self):
workspace_name = "test_workspace"
prefix = 'f1'
old_line = MagicMock(spec=Line2D)
self.browser_plot_interaction.guess_all_line = old_line
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.slot_for_function_removed()
old_line.remove.assert_called_once()
self.axes.plot.assert_called_once_with(ANY, wkspIndex=1, label=workspace_name + '_guess',
distribution=True,
update_axes_labels=False, autoscale_on_update=False,
color=old_line.get_color())
def test_changing_parameters_refreshes_guess_all(self):
workspace_name = "test_workspace"
prefix = 'f1'
old_line = MagicMock(spec=Line2D)
self.browser_plot_interaction.guess_all_line = old_line
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.parameters_changed_slot('f1')
old_line.remove.assert_called_once()
self.axes.plot.assert_called_once_with(ANY, wkspIndex=1, label=workspace_name + '_guess',
distribution=True,
update_axes_labels=False, autoscale_on_update=False,
color=old_line.get_color())
def test_changing_parameters_refreshes_current_guess(self):
workspace_name = "test_workspace"
prefix = 'f1'
line_1, line_2, line_3 = self.create_mock_guess_lines()
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.parameters_changed_slot('f1')
line_2.remove.assert_called_once()
self.axes.plot.assert_called_once_with(ANY, wkspIndex=1, label=prefix + '.' + FUNCTION_2.name(),
distribution=True,
update_axes_labels=False, autoscale_on_update=False,
color=line_2.get_color())
if __name__ == '__main__':
unittest.main()
|
vrovachev/kaspersky-framework | refs/heads/master | test_kasp/network/test_network.py | 1 | import pytest
class TestNetwork:
@pytest.mark.network
def test_network(self):
print("network test")
|
SteveMcGrath/Concord | refs/heads/master | site/app/views/cfp.py | 1 | from flask import render_template, flash, redirect, session, url_for, abort, g
from flask.ext.login import login_user, logout_user, current_user, login_required
from app import app, db, login_manager, forms
from app.models import User, Ticket
from sqlalchemy import desc
from datetime import datetime
@app.route('/cfp')
def cfp():
return render_template('/cfp/home.html', title='Call for Papers') |
rlugojr/django | refs/heads/master | django/contrib/gis/gdal/raster/band.py | 24 | from ctypes import byref, c_double, c_int, c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import raster as capi
from django.contrib.gis.shortcuts import numpy
from django.utils.encoding import force_text
from .const import GDAL_INTEGER_TYPES, GDAL_PIXEL_TYPES, GDAL_TO_CTYPES
class GDALBand(GDALBase):
"""
Wrap a GDAL raster band, needs to be obtained from a GDALRaster object.
"""
def __init__(self, source, index):
self.source = source
self._ptr = capi.get_ds_raster_band(source._ptr, index)
def _flush(self):
"""
Call the flush method on the Band's parent raster and force a refresh
of the statistics attribute when requested the next time.
"""
self.source._flush()
self._stats_refresh = True
@property
def description(self):
"""
Return the description string of the band.
"""
return force_text(capi.get_band_description(self._ptr))
@property
def width(self):
"""
Width (X axis) in pixels of the band.
"""
return capi.get_band_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels of the band.
"""
return capi.get_band_ysize(self._ptr)
@property
def pixel_count(self):
"""
Return the total number of pixels in this band.
"""
return self.width * self.height
_stats_refresh = False
def statistics(self, refresh=False, approximate=False):
"""
Compute statistics on the pixel values of this band.
The return value is a tuple with the following structure:
(minimum, maximum, mean, standard deviation).
If approximate=True, the statistics may be computed based on overviews
or a subset of image tiles.
If refresh=True, the statistics will be computed from the data directly,
and the cache will be updated where applicable.
For empty bands (where all pixel values are nodata), all statistics
values are returned as None.
For raster formats using Persistent Auxiliary Metadata (PAM) services,
the statistics might be cached in an auxiliary file.
"""
# Prepare array with arguments for capi function
smin, smax, smean, sstd = c_double(), c_double(), c_double(), c_double()
stats_args = [
self._ptr, c_int(approximate), byref(smin), byref(smax),
byref(smean), byref(sstd), c_void_p(), c_void_p(),
]
if refresh or self._stats_refresh:
func = capi.compute_band_statistics
else:
# Add additional argument to force computation if there is no
# existing PAM file to take the values from.
force = True
stats_args.insert(2, c_int(force))
func = capi.get_band_statistics
# Computation of statistics fails for empty bands.
try:
func(*stats_args)
result = smin.value, smax.value, smean.value, sstd.value
except GDALException:
result = (None, None, None, None)
self._stats_refresh = False
return result
@property
def min(self):
"""
Return the minimum pixel value for this band.
"""
return self.statistics()[0]
@property
def max(self):
"""
Return the maximum pixel value for this band.
"""
return self.statistics()[1]
@property
def mean(self):
"""
Return the mean of all pixel values of this band.
"""
return self.statistics()[2]
@property
def std(self):
"""
Return the standard deviation of all pixel values of this band.
"""
return self.statistics()[3]
@property
def nodata_value(self):
"""
Return the nodata value for this band, or None if it isn't set.
"""
# Get value and nodata exists flag
nodata_exists = c_int()
value = capi.get_band_nodata_value(self._ptr, nodata_exists)
if not nodata_exists:
value = None
# If the pixeltype is an integer, convert to int
elif self.datatype() in GDAL_INTEGER_TYPES:
value = int(value)
return value
@nodata_value.setter
def nodata_value(self, value):
"""
Set the nodata value for this band.
"""
if value is None:
if not capi.delete_band_nodata_value:
raise ValueError('GDAL >= 2.1 required to delete nodata values.')
capi.delete_band_nodata_value(self._ptr)
elif not isinstance(value, (int, float)):
raise ValueError('Nodata value must be numeric or None.')
else:
capi.set_band_nodata_value(self._ptr, value)
self._flush()
def datatype(self, as_string=False):
"""
Return the GDAL Pixel Datatype for this band.
"""
dtype = capi.get_band_datatype(self._ptr)
if as_string:
dtype = GDAL_PIXEL_TYPES[dtype]
return dtype
def data(self, data=None, offset=None, size=None, shape=None, as_memoryview=False):
"""
Read or writes pixel values for this band. Blocks of data can
be accessed by specifying the width, height and offset of the
desired block. The same specification can be used to update
parts of a raster by providing an array of values.
Allowed input data types are bytes, memoryview, list, tuple, and array.
"""
if not offset:
offset = (0, 0)
if not size:
size = (self.width - offset[0], self.height - offset[1])
if not shape:
shape = size
if any(x <= 0 for x in size):
raise ValueError('Offset too big for this raster.')
if size[0] > self.width or size[1] > self.height:
raise ValueError('Size is larger than raster.')
# Create ctypes type array generator
ctypes_array = GDAL_TO_CTYPES[self.datatype()] * (shape[0] * shape[1])
if data is None:
# Set read mode
access_flag = 0
# Prepare empty ctypes array
data_array = ctypes_array()
else:
# Set write mode
access_flag = 1
# Instantiate ctypes array holding the input data
if isinstance(data, (bytes, memoryview)) or (numpy and isinstance(data, numpy.ndarray)):
data_array = ctypes_array.from_buffer_copy(data)
else:
data_array = ctypes_array(*data)
# Access band
capi.band_io(self._ptr, access_flag, offset[0], offset[1],
size[0], size[1], byref(data_array), shape[0],
shape[1], self.datatype(), 0, 0)
# Return data as numpy array if possible, otherwise as list
if data is None:
if as_memoryview:
return memoryview(data_array)
elif numpy:
# reshape() needs a reshape parameter with the height first.
return numpy.frombuffer(
data_array, dtype=numpy.dtype(data_array)
).reshape(tuple(reversed(size)))
else:
return list(data_array)
else:
self._flush()
class BandList(list):
def __init__(self, source):
self.source = source
list.__init__(self)
def __iter__(self):
for idx in range(1, len(self) + 1):
yield GDALBand(self.source, idx)
def __len__(self):
return capi.get_ds_raster_count(self.source._ptr)
def __getitem__(self, index):
try:
return GDALBand(self.source, index + 1)
except GDALException:
raise GDALException('Unable to get band index %d' % index)
|
Innovahn/cybex | refs/heads/master | addons/account/wizard/account_journal_select.py | 385 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class account_journal_select(osv.osv_memory):
"""
Account Journal Select
"""
_name = "account.journal.select"
_description = "Account Journal Select"
def action_open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
if context is None:
context = {}
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_move_line_select')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id])[0]
cr.execute('select journal_id, period_id from account_journal_period where id=%s', (context['active_id'],))
res = cr.fetchone()
if res:
journal_id, period_id = res
result['domain'] = str([('journal_id', '=', journal_id), ('period_id', '=', period_id)])
result['context'] = str({'journal_id': journal_id, 'period_id': period_id})
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Alexander-M-Waldman/local_currency_site | refs/heads/master | lib/python2.7/site-packages/django/__init__.py | 3 | from django.utils.version import get_version
VERSION = (1, 9, 6, 'final', 0)
__version__ = get_version(VERSION)
def setup():
"""
Configure the settings (this happens as a side effect of accessing the
first setting), configure logging and populate the app registry.
"""
from django.apps import apps
from django.conf import settings
from django.utils.log import configure_logging
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
apps.populate(settings.INSTALLED_APPS)
|
HofiOne/xbmc | refs/heads/master | lib/gtest/test/gtest_env_var_test.py | 343 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
def testXmlOutputFile(self):
"""Tests that $XML_OUTPUT_FILE affects the output flag."""
SetEnvVar('GTEST_OUTPUT', None)
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/bar.xml', GetFlag('output'))
def testXmlOutputFileOverride(self):
"""Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT"""
SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml')
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output'))
if __name__ == '__main__':
gtest_test_utils.Main()
|
meteozond/django-rq | refs/heads/master | django_rq/urls.py | 1 | from django.conf.urls import patterns, url
urlpatterns = patterns('django_rq.views',
url(r'^$', 'stats', name='rq_home'),
url(r'^(?P<queue_connection>[^/]+)/(?P<queue_name>[^/]+)/$', 'jobs', name='rq_jobs'),
url(r'^(?P<queue_connection>[^/]+)/(?P<queue_name>[^/]+)/(?P<job_id>[-\w]+)/$', 'job_detail',
name='rq_job_detail'),
url(r'^(?P<queue_connection>[^/]+)/(?P<queue_name>[^/]+)/(?P<job_id>[-\w]+)/delete/$',
'delete_job', name='rq_delete_job'),
url(r'^(?P<queue_connection>[^/]+)/(?P<queue_name>[^/]+)/(?P<job_id>[-\w]+)/requeue/$',
'requeue_job_view', name='rq_requeue_job'),
)
|
Vifon/ranger | refs/heads/master | ranger/ext/vcs/svn.py | 3 | # This file is part of ranger, the console file manager.
# License: GNU GPL version 3, see the file "AUTHORS" for details.
"""Subversion module"""
from __future__ import (absolute_import, division, print_function)
from datetime import datetime
import os
from xml.etree import ElementTree as etree
from .vcs import Vcs, VcsError
class SVN(Vcs):
"""VCS implementation for Subversion"""
# Generic
_status_translations = (
('ADR', 'staged'),
('C', 'conflict'),
('I', 'ignored'),
('M~', 'changed'),
('X', 'none'),
('?', 'untracked'),
('!', 'deleted'),
)
def _log(self, refspec=None, maxres=None, filelist=None):
"""Retrieves log message and parses it"""
args = ['log', '--xml']
if refspec:
args += ['--limit', '1', '--revision', refspec]
elif maxres:
args += ['--limit', str(maxres)]
if filelist:
args += ['--'] + filelist
try:
output = self._run(args)
except VcsError:
return None
if not output:
return None
log = []
for entry in etree.fromstring(output).findall('./logentry'):
new = {}
new['short'] = entry.get('revision')
new['revid'] = entry.get('revision')
new['author'] = entry.find('./author').text
new['date'] = datetime.strptime(
entry.find('./date').text,
'%Y-%m-%dT%H:%M:%S.%fZ',
)
new['summary'] = entry.find('./msg').text.split('\n')[0]
log.append(new)
return log
def _status_translate(self, code):
"""Translate status code"""
for code_x, status in self._status_translations:
if code in code_x:
return status
return 'unknown'
def _remote_url(self):
"""Remote url"""
try:
output = self._run(['info', '--xml'])
except VcsError:
return None
if not output:
return None
return etree.fromstring(output).find('./entry/url').text or None
# Action Interface
def action_add(self, filelist=None):
args = ['add']
if filelist:
args += ['--'] + filelist
self._run(args, catchout=False)
def action_reset(self, filelist=None):
args = ['revert', '--']
if filelist:
args += filelist
else:
args += self.rootvcs.status_subpaths.keys() # pylint: disable=no-member
self._run(args, catchout=False)
# Data Interface
def data_status_root(self):
statuses = set()
# Paths with status
lines = self._run(['status']).split('\n')
if not lines:
return 'sync'
for line in lines:
code = line[0]
if code == ' ':
continue
statuses.add(self._status_translate(code))
for status in self.DIRSTATUSES:
if status in statuses:
return status
return 'sync'
def data_status_subpaths(self):
statuses = {}
# Paths with status
lines = self._run(['status']).split('\n')
for line in lines:
code, path = line[0], line[8:]
if code == ' ':
continue
statuses[os.path.normpath(path)] = self._status_translate(code)
return statuses
def data_status_remote(self):
remote = self._remote_url()
if remote is None or remote.startswith('file://'):
return 'none'
return 'unknown'
def data_branch(self):
return None
def data_info(self, rev=None):
if rev is None:
rev = self.HEAD
log = self._log(refspec=rev)
if not log:
if rev == self.HEAD:
return None
else:
raise VcsError('Revision {0:s} does not exist'.format(rev))
elif len(log) == 1:
return log[0]
else:
raise VcsError('More than one instance of revision {0:s}'.format(rev))
|
berecik/seastone | refs/heads/master | utils/forms.py | 236 | from django import forms
# place form definition here |
prune998/ansible | refs/heads/devel | lib/ansible/modules/network/nxos/nxos_vrf_af.py | 21 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vrf_af
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VRF AF.
description:
- Manages VRF AF
author: Gabriele Gerbino (@GGabriele)
notes:
- Default, where supported, restores params default value.
options:
vrf:
description:
- Name of the VRF.
required: true
afi:
description:
- Address-Family Identifier (AFI).
required: true
choices: ['ipv4', 'ipv6']
default: null
safi:
description:
- Sub Address-Family Identifier (SAFI).
required: true
choices: ['unicast', 'multicast']
default: null
route_target_both_auto_evpn:
description:
- Enable/Disable the EVPN route-target 'auto' setting for both
import and export target communities.
required: false
choices: ['true', 'false']
default: null
state:
description:
- Determines whether the config should be present or
not on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_vrf_af:
interface: nve1
vni: 6000
ingress_replication: true
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"afi": "ipv4", "route_target_both_auto_evpn": true,
"safi": "unicast", "vrf": "test"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"afi": "ipv4", "route_target_both_auto_evpn": false,
"safi": "unicast", "vrf": "test"}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"afi": "ipv4", "route_target_both_auto_evpn": true,
"safi": "unicast", "vrf": "test"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["vrf context test", "address-family ipv4 unicast",
"route-target both auto evpn"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
BOOL_PARAMS = ['route_target_both_auto_evpn']
PARAM_TO_COMMAND_KEYMAP = {
'route_target_both_auto_evpn': 'route-target both auto evpn',
}
PARAM_TO_DEFAULT_KEYMAP = {}
WARNINGS = []
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
if arg in BOOL_PARAMS:
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
else:
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
parents = ['vrf context {0}'.format(module.params['vrf'])]
parents.append('address-family {0} {1}'.format(module.params['afi'],
module.params['safi']))
config = netcfg.get_section(parents)
if config:
splitted_config = config.splitlines()
vrf_index = False
for index in range(0, len(splitted_config) - 1):
if 'vrf' in splitted_config[index].strip():
vrf_index = index
break
if vrf_index:
config = '\n'.join(splitted_config[0:vrf_index])
for arg in args:
if arg not in ['afi', 'safi', 'vrf']:
existing[arg] = get_value(arg, config, module)
existing['afi'] = module.params['afi']
existing['safi'] = module.params['safi']
existing['vrf'] = module.params['vrf']
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
parents = ['vrf context {0}'.format(module.params['vrf'])]
parents.append('address-family {0} {1}'.format(module.params['afi'],
module.params['safi']))
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
commands = []
parents = ['vrf context {0}'.format(module.params['vrf'])]
commands.append('no address-family {0} {1}'.format(module.params['afi'],
module.params['safi']))
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
vrf=dict(required=True, type='str'),
safi=dict(required=True, type='str', choices=['unicast','multicast']),
afi=dict(required=True, type='str', choices=['ipv4','ipv6']),
route_target_both_auto_evpn=dict(required=False, type='bool'),
m_facts=dict(required=False, default=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
state = module.params['state']
args = [
'vrf',
'safi',
'afi',
'route_target_both_auto_evpn'
]
existing = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'interface':
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
|
xin3liang/platform_external_chromium_org | refs/heads/master | third_party/libxml/src/gentest.py | 298 | #!/usr/bin/python -u
#
# generate a tester program for the API
#
import sys
import os
import string
try:
import libxml2
except:
print "libxml2 python bindings not available, skipping testapi.c generation"
sys.exit(0)
if len(sys.argv) > 1:
srcPref = sys.argv[1] + '/'
else:
srcPref = ''
#
# Modules we want to skip in API test
#
skipped_modules = [ "SAX", "xlink", "threads", "globals",
"xmlmemory", "xmlversion", "xmlexports",
#deprecated
"DOCBparser",
]
#
# defines for each module
#
modules_defines = {
"HTMLparser": "LIBXML_HTML_ENABLED",
"catalog": "LIBXML_CATALOG_ENABLED",
"xmlreader": "LIBXML_READER_ENABLED",
"relaxng": "LIBXML_SCHEMAS_ENABLED",
"schemasInternals": "LIBXML_SCHEMAS_ENABLED",
"xmlschemas": "LIBXML_SCHEMAS_ENABLED",
"xmlschemastypes": "LIBXML_SCHEMAS_ENABLED",
"xpath": "LIBXML_XPATH_ENABLED",
"xpathInternals": "LIBXML_XPATH_ENABLED",
"xinclude": "LIBXML_XINCLUDE_ENABLED",
"xpointer": "LIBXML_XPTR_ENABLED",
"xmlregexp" : "LIBXML_REGEXP_ENABLED",
"xmlautomata" : "LIBXML_AUTOMATA_ENABLED",
"xmlsave" : "LIBXML_OUTPUT_ENABLED",
"DOCBparser" : "LIBXML_DOCB_ENABLED",
"xmlmodule" : "LIBXML_MODULES_ENABLED",
"pattern" : "LIBXML_PATTERN_ENABLED",
"schematron" : "LIBXML_SCHEMATRON_ENABLED",
}
#
# defines for specific functions
#
function_defines = {
"htmlDefaultSAXHandlerInit": "LIBXML_HTML_ENABLED",
"xmlSAX2EndElement" : "LIBXML_SAX1_ENABLED",
"xmlSAX2StartElement" : "LIBXML_SAX1_ENABLED",
"xmlSAXDefaultVersion" : "LIBXML_SAX1_ENABLED",
"UTF8Toisolat1" : "LIBXML_OUTPUT_ENABLED",
"xmlCleanupPredefinedEntities": "LIBXML_LEGACY_ENABLED",
"xmlInitializePredefinedEntities": "LIBXML_LEGACY_ENABLED",
"xmlSetFeature": "LIBXML_LEGACY_ENABLED",
"xmlGetFeature": "LIBXML_LEGACY_ENABLED",
"xmlGetFeaturesList": "LIBXML_LEGACY_ENABLED",
"xmlIOParseDTD": "LIBXML_VALID_ENABLED",
"xmlParseDTD": "LIBXML_VALID_ENABLED",
"xmlParseDoc": "LIBXML_SAX1_ENABLED",
"xmlParseMemory": "LIBXML_SAX1_ENABLED",
"xmlRecoverDoc": "LIBXML_SAX1_ENABLED",
"xmlParseFile": "LIBXML_SAX1_ENABLED",
"xmlRecoverFile": "LIBXML_SAX1_ENABLED",
"xmlRecoverMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXParseFileWithData": "LIBXML_SAX1_ENABLED",
"xmlSAXParseMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXUserParseMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXParseDoc": "LIBXML_SAX1_ENABLED",
"xmlSAXParseDTD": "LIBXML_SAX1_ENABLED",
"xmlSAXUserParseFile": "LIBXML_SAX1_ENABLED",
"xmlParseEntity": "LIBXML_SAX1_ENABLED",
"xmlParseExternalEntity": "LIBXML_SAX1_ENABLED",
"xmlSAXParseMemoryWithData": "LIBXML_SAX1_ENABLED",
"xmlParseBalancedChunkMemory": "LIBXML_SAX1_ENABLED",
"xmlParseBalancedChunkMemoryRecover": "LIBXML_SAX1_ENABLED",
"xmlSetupParserForBuffer": "LIBXML_SAX1_ENABLED",
"xmlStopParser": "LIBXML_PUSH_ENABLED",
"xmlAttrSerializeTxtContent": "LIBXML_OUTPUT_ENABLED",
"xmlSAXParseFile": "LIBXML_SAX1_ENABLED",
"xmlSAXParseEntity": "LIBXML_SAX1_ENABLED",
"xmlNewTextChild": "LIBXML_TREE_ENABLED",
"xmlNewDocRawNode": "LIBXML_TREE_ENABLED",
"xmlNewProp": "LIBXML_TREE_ENABLED",
"xmlReconciliateNs": "LIBXML_TREE_ENABLED",
"xmlValidateNCName": "LIBXML_TREE_ENABLED",
"xmlValidateNMToken": "LIBXML_TREE_ENABLED",
"xmlValidateName": "LIBXML_TREE_ENABLED",
"xmlNewChild": "LIBXML_TREE_ENABLED",
"xmlValidateQName": "LIBXML_TREE_ENABLED",
"xmlSprintfElementContent": "LIBXML_OUTPUT_ENABLED",
"xmlValidGetPotentialChildren" : "LIBXML_VALID_ENABLED",
"xmlValidGetValidElements" : "LIBXML_VALID_ENABLED",
"docbDefaultSAXHandlerInit" : "LIBXML_DOCB_ENABLED",
"xmlTextReaderPreservePattern" : "LIBXML_PATTERN_ENABLED",
}
#
# Some functions really need to be skipped for the tests.
#
skipped_functions = [
# block on I/O
"xmlFdRead", "xmlReadFd", "xmlCtxtReadFd",
"htmlFdRead", "htmlReadFd", "htmlCtxtReadFd",
"xmlReaderNewFd", "xmlReaderForFd",
"xmlIORead", "xmlReadIO", "xmlCtxtReadIO",
"htmlIORead", "htmlReadIO", "htmlCtxtReadIO",
"xmlReaderNewIO", "xmlBufferDump", "xmlNanoFTPConnect",
"xmlNanoFTPConnectTo", "xmlNanoHTTPMethod", "xmlNanoHTTPMethodRedir",
# Complex I/O APIs
"xmlCreateIOParserCtxt", "xmlParserInputBufferCreateIO",
"xmlRegisterInputCallbacks", "xmlReaderForIO",
"xmlOutputBufferCreateIO", "xmlRegisterOutputCallbacks",
"xmlSaveToIO", "xmlIOHTTPOpenW",
# library state cleanup, generate false leak informations and other
# troubles, heavillyb tested otherwise.
"xmlCleanupParser", "xmlRelaxNGCleanupTypes", "xmlSetListDoc",
"xmlSetTreeDoc", "xmlUnlinkNode",
# hard to avoid leaks in the tests
"xmlStrcat", "xmlStrncat", "xmlCatalogAddLocal", "xmlNewTextWriterDoc",
"xmlXPathNewValueTree", "xmlXPathWrapString",
# unimplemented
"xmlTextReaderReadInnerXml", "xmlTextReaderReadOuterXml",
"xmlTextReaderReadString",
# destructor
"xmlListDelete", "xmlOutputBufferClose", "xmlNanoFTPClose", "xmlNanoHTTPClose",
# deprecated
"xmlCatalogGetPublic", "xmlCatalogGetSystem", "xmlEncodeEntities",
"xmlNewGlobalNs", "xmlHandleEntity", "xmlNamespaceParseNCName",
"xmlNamespaceParseNSDef", "xmlNamespaceParseQName",
"xmlParseNamespace", "xmlParseQuotedString", "xmlParserHandleReference",
"xmlScanName",
"xmlDecodeEntities",
# allocators
"xmlMemFree",
# verbosity
"xmlCatalogSetDebug", "xmlShellPrintXPathError", "xmlShellPrintNode",
# Internal functions, no user space should really call them
"xmlParseAttribute", "xmlParseAttributeListDecl", "xmlParseName",
"xmlParseNmtoken", "xmlParseEntityValue", "xmlParseAttValue",
"xmlParseSystemLiteral", "xmlParsePubidLiteral", "xmlParseCharData",
"xmlParseExternalID", "xmlParseComment", "xmlParsePITarget", "xmlParsePI",
"xmlParseNotationDecl", "xmlParseEntityDecl", "xmlParseDefaultDecl",
"xmlParseNotationType", "xmlParseEnumerationType", "xmlParseEnumeratedType",
"xmlParseAttributeType", "xmlParseAttributeListDecl",
"xmlParseElementMixedContentDecl", "xmlParseElementChildrenContentDecl",
"xmlParseElementContentDecl", "xmlParseElementDecl", "xmlParseMarkupDecl",
"xmlParseCharRef", "xmlParseEntityRef", "xmlParseReference",
"xmlParsePEReference", "xmlParseDocTypeDecl", "xmlParseAttribute",
"xmlParseStartTag", "xmlParseEndTag", "xmlParseCDSect", "xmlParseContent",
"xmlParseElement", "xmlParseVersionNum", "xmlParseVersionInfo",
"xmlParseEncName", "xmlParseEncodingDecl", "xmlParseSDDecl",
"xmlParseXMLDecl", "xmlParseTextDecl", "xmlParseMisc",
"xmlParseExternalSubset", "xmlParserHandlePEReference",
"xmlSkipBlankChars",
]
#
# These functions have side effects on the global state
# and hence generate errors on memory allocation tests
#
skipped_memcheck = [ "xmlLoadCatalog", "xmlAddEncodingAlias",
"xmlSchemaInitTypes", "xmlNanoFTPProxy", "xmlNanoFTPScanProxy",
"xmlNanoHTTPScanProxy", "xmlResetLastError", "xmlCatalogConvert",
"xmlCatalogRemove", "xmlLoadCatalogs", "xmlCleanupCharEncodingHandlers",
"xmlInitCharEncodingHandlers", "xmlCatalogCleanup",
"xmlSchemaGetBuiltInType",
"htmlParseFile", "htmlCtxtReadFile", # loads the catalogs
"xmlTextReaderSchemaValidate", "xmlSchemaCleanupTypes", # initialize the schemas type system
"xmlCatalogResolve", "xmlIOParseDTD" # loads the catalogs
]
#
# Extra code needed for some test cases
#
extra_pre_call = {
"xmlSAXUserParseFile": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlSAXUserParseMemory": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParseBalancedChunkMemory": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParseBalancedChunkMemoryRecover": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParserInputBufferCreateFd":
"if (fd >= 0) fd = -1;",
}
extra_post_call = {
"xmlAddChild":
"if (ret_val == NULL) { xmlFreeNode(cur) ; cur = NULL ; }",
"xmlAddEntity":
"if (ret_val != NULL) { xmlFreeNode(ret_val) ; ret_val = NULL; }",
"xmlAddChildList":
"if (ret_val == NULL) { xmlFreeNodeList(cur) ; cur = NULL ; }",
"xmlAddSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlAddNextSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlAddPrevSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlDocSetRootElement":
"if (doc == NULL) { xmlFreeNode(root) ; root = NULL ; }",
"xmlReplaceNode":
"""if (cur != NULL) {
xmlUnlinkNode(cur);
xmlFreeNode(cur) ; cur = NULL ; }
if (old != NULL) {
xmlUnlinkNode(old);
xmlFreeNode(old) ; old = NULL ; }
ret_val = NULL;""",
"xmlTextMerge":
"""if ((first != NULL) && (first->type != XML_TEXT_NODE)) {
xmlUnlinkNode(second);
xmlFreeNode(second) ; second = NULL ; }""",
"xmlBuildQName":
"""if ((ret_val != NULL) && (ret_val != ncname) &&
(ret_val != prefix) && (ret_val != memory))
xmlFree(ret_val);
ret_val = NULL;""",
"xmlNewDocElementContent":
"""xmlFreeDocElementContent(doc, ret_val); ret_val = NULL;""",
"xmlDictReference": "xmlDictFree(dict);",
# Functions which deallocates one of their parameters
"xmlXPathConvertBoolean": """val = NULL;""",
"xmlXPathConvertNumber": """val = NULL;""",
"xmlXPathConvertString": """val = NULL;""",
"xmlSaveFileTo": """buf = NULL;""",
"xmlSaveFormatFileTo": """buf = NULL;""",
"xmlIOParseDTD": "input = NULL;",
"xmlRemoveProp": "cur = NULL;",
"xmlNewNs": "if ((node == NULL) && (ret_val != NULL)) xmlFreeNs(ret_val);",
"xmlCopyNamespace": "if (ret_val != NULL) xmlFreeNs(ret_val);",
"xmlCopyNamespaceList": "if (ret_val != NULL) xmlFreeNsList(ret_val);",
"xmlNewTextWriter": "if (ret_val != NULL) out = NULL;",
"xmlNewTextWriterPushParser": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;} if (ret_val != NULL) ctxt = NULL;",
"xmlNewIOInputStream": "if (ret_val != NULL) input = NULL;",
"htmlParseChunk": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"htmlParseDocument": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseDocument": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseChunk": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseExtParsedEnt": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlDOMWrapAdoptNode": "if ((node != NULL) && (node->parent == NULL)) {xmlUnlinkNode(node);xmlFreeNode(node);node = NULL;}",
"xmlBufferSetAllocationScheme": "if ((buf != NULL) && (scheme == XML_BUFFER_ALLOC_IMMUTABLE) && (buf->content != NULL) && (buf->content != static_buf_content)) { xmlFree(buf->content); buf->content = NULL;}"
}
modules = []
def is_skipped_module(name):
for mod in skipped_modules:
if mod == name:
return 1
return 0
def is_skipped_function(name):
for fun in skipped_functions:
if fun == name:
return 1
# Do not test destructors
if string.find(name, 'Free') != -1:
return 1
return 0
def is_skipped_memcheck(name):
for fun in skipped_memcheck:
if fun == name:
return 1
return 0
missing_types = {}
def add_missing_type(name, func):
try:
list = missing_types[name]
list.append(func)
except:
missing_types[name] = [func]
generated_param_types = []
def add_generated_param_type(name):
generated_param_types.append(name)
generated_return_types = []
def add_generated_return_type(name):
generated_return_types.append(name)
missing_functions = {}
missing_functions_nr = 0
def add_missing_functions(name, module):
global missing_functions_nr
missing_functions_nr = missing_functions_nr + 1
try:
list = missing_functions[module]
list.append(name)
except:
missing_functions[module] = [name]
#
# Provide the type generators and destructors for the parameters
#
def type_convert(str, name, info, module, function, pos):
# res = string.replace(str, " ", " ")
# res = string.replace(str, " ", " ")
# res = string.replace(str, " ", " ")
res = string.replace(str, " *", "_ptr")
# res = string.replace(str, "*", "_ptr")
res = string.replace(res, " ", "_")
if res == 'const_char_ptr':
if string.find(name, "file") != -1 or \
string.find(name, "uri") != -1 or \
string.find(name, "URI") != -1 or \
string.find(info, "filename") != -1 or \
string.find(info, "URI") != -1 or \
string.find(info, "URL") != -1:
if string.find(function, "Save") != -1 or \
string.find(function, "Create") != -1 or \
string.find(function, "Write") != -1 or \
string.find(function, "Fetch") != -1:
return('fileoutput')
return('filepath')
if res == 'void_ptr':
if module == 'nanoftp' and name == 'ctx':
return('xmlNanoFTPCtxtPtr')
if function == 'xmlNanoFTPNewCtxt' or \
function == 'xmlNanoFTPConnectTo' or \
function == 'xmlNanoFTPOpen':
return('xmlNanoFTPCtxtPtr')
if module == 'nanohttp' and name == 'ctx':
return('xmlNanoHTTPCtxtPtr')
if function == 'xmlNanoHTTPMethod' or \
function == 'xmlNanoHTTPMethodRedir' or \
function == 'xmlNanoHTTPOpen' or \
function == 'xmlNanoHTTPOpenRedir':
return('xmlNanoHTTPCtxtPtr');
if function == 'xmlIOHTTPOpen':
return('xmlNanoHTTPCtxtPtr')
if string.find(name, "data") != -1:
return('userdata')
if string.find(name, "user") != -1:
return('userdata')
if res == 'xmlDoc_ptr':
res = 'xmlDocPtr'
if res == 'xmlNode_ptr':
res = 'xmlNodePtr'
if res == 'xmlDict_ptr':
res = 'xmlDictPtr'
if res == 'xmlNodePtr' and pos != 0:
if (function == 'xmlAddChild' and pos == 2) or \
(function == 'xmlAddChildList' and pos == 2) or \
(function == 'xmlAddNextSibling' and pos == 2) or \
(function == 'xmlAddSibling' and pos == 2) or \
(function == 'xmlDocSetRootElement' and pos == 2) or \
(function == 'xmlReplaceNode' and pos == 2) or \
(function == 'xmlTextMerge') or \
(function == 'xmlAddPrevSibling' and pos == 2):
return('xmlNodePtr_in');
if res == 'const xmlBufferPtr':
res = 'xmlBufferPtr'
if res == 'xmlChar_ptr' and name == 'name' and \
string.find(function, "EatName") != -1:
return('eaten_name')
if res == 'void_ptr*':
res = 'void_ptr_ptr'
if res == 'char_ptr*':
res = 'char_ptr_ptr'
if res == 'xmlChar_ptr*':
res = 'xmlChar_ptr_ptr'
if res == 'const_xmlChar_ptr*':
res = 'const_xmlChar_ptr_ptr'
if res == 'const_char_ptr*':
res = 'const_char_ptr_ptr'
if res == 'FILE_ptr' and module == 'debugXML':
res = 'debug_FILE_ptr';
if res == 'int' and name == 'options':
if module == 'parser' or module == 'xmlreader':
res = 'parseroptions'
return res
known_param_types = []
def is_known_param_type(name, rtype):
global test
for type in known_param_types:
if type == name:
return 1
for type in generated_param_types:
if type == name:
return 1
if name[-3:] == 'Ptr' or name[-4:] == '_ptr':
if rtype[0:6] == 'const ':
crtype = rtype[6:]
else:
crtype = rtype
define = 0
if modules_defines.has_key(module):
test.write("#ifdef %s\n" % (modules_defines[module]))
define = 1
test.write("""
#define gen_nb_%s 1
static %s gen_%s(int no ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
return(NULL);
}
static void des_%s(int no ATTRIBUTE_UNUSED, %s val ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
}
""" % (name, crtype, name, name, rtype))
if define == 1:
test.write("#endif\n\n")
add_generated_param_type(name)
return 1
return 0
#
# Provide the type destructors for the return values
#
known_return_types = []
def is_known_return_type(name):
for type in known_return_types:
if type == name:
return 1
return 0
#
# Copy the beginning of the C test program result
#
try:
input = open("testapi.c", "r")
except:
input = open(srcPref + "testapi.c", "r")
test = open('testapi.c.new', 'w')
def compare_and_save():
global test
test.close()
try:
input = open("testapi.c", "r").read()
except:
input = ''
test = open('testapi.c.new', "r").read()
if input != test:
try:
os.system("rm testapi.c; mv testapi.c.new testapi.c")
except:
os.system("mv testapi.c.new testapi.c")
print("Updated testapi.c")
else:
print("Generated testapi.c is identical")
line = input.readline()
while line != "":
if line == "/* CUT HERE: everything below that line is generated */\n":
break;
if line[0:15] == "#define gen_nb_":
type = string.split(line[15:])[0]
known_param_types.append(type)
if line[0:19] == "static void desret_":
type = string.split(line[19:], '(')[0]
known_return_types.append(type)
test.write(line)
line = input.readline()
input.close()
if line == "":
print "Could not find the CUT marker in testapi.c skipping generation"
test.close()
sys.exit(0)
print("Scanned testapi.c: found %d parameters types and %d return types\n" % (
len(known_param_types), len(known_return_types)))
test.write("/* CUT HERE: everything below that line is generated */\n")
#
# Open the input API description
#
doc = libxml2.readFile(srcPref + 'doc/libxml2-api.xml', None, 0)
if doc == None:
print "Failed to load doc/libxml2-api.xml"
sys.exit(1)
ctxt = doc.xpathNewContext()
#
# Generate a list of all function parameters and select only
# those used in the api tests
#
argtypes = {}
args = ctxt.xpathEval("/api/symbols/function/arg")
for arg in args:
mod = arg.xpathEval('string(../@file)')
func = arg.xpathEval('string(../@name)')
if (mod not in skipped_modules) and (func not in skipped_functions):
type = arg.xpathEval('string(@type)')
if not argtypes.has_key(type):
argtypes[type] = func
# similarly for return types
rettypes = {}
rets = ctxt.xpathEval("/api/symbols/function/return")
for ret in rets:
mod = ret.xpathEval('string(../@file)')
func = ret.xpathEval('string(../@name)')
if (mod not in skipped_modules) and (func not in skipped_functions):
type = ret.xpathEval('string(@type)')
if not rettypes.has_key(type):
rettypes[type] = func
#
# Generate constructors and return type handling for all enums
# which are used as function parameters
#
enums = ctxt.xpathEval("/api/symbols/typedef[@type='enum']")
for enum in enums:
module = enum.xpathEval('string(@file)')
name = enum.xpathEval('string(@name)')
#
# Skip any enums which are not in our filtered lists
#
if (name == None) or ((name not in argtypes) and (name not in rettypes)):
continue;
define = 0
if argtypes.has_key(name) and is_known_param_type(name, name) == 0:
values = ctxt.xpathEval("/api/symbols/enum[@type='%s']" % name)
i = 0
vals = []
for value in values:
vname = value.xpathEval('string(@name)')
if vname == None:
continue;
i = i + 1
if i >= 5:
break;
vals.append(vname)
if vals == []:
print "Didn't find any value for enum %s" % (name)
continue
if modules_defines.has_key(module):
test.write("#ifdef %s\n" % (modules_defines[module]))
define = 1
test.write("#define gen_nb_%s %d\n" % (name, len(vals)))
test.write("""static %s gen_%s(int no, int nr ATTRIBUTE_UNUSED) {\n""" %
(name, name))
i = 1
for value in vals:
test.write(" if (no == %d) return(%s);\n" % (i, value))
i = i + 1
test.write(""" return(0);
}
static void des_%s(int no ATTRIBUTE_UNUSED, %s val ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
}
""" % (name, name));
known_param_types.append(name)
if (is_known_return_type(name) == 0) and (name in rettypes):
if define == 0 and modules_defines.has_key(module):
test.write("#ifdef %s\n" % (modules_defines[module]))
define = 1
test.write("""static void desret_%s(%s val ATTRIBUTE_UNUSED) {
}
""" % (name, name))
known_return_types.append(name)
if define == 1:
test.write("#endif\n\n")
#
# Load the interfaces
#
headers = ctxt.xpathEval("/api/files/file")
for file in headers:
name = file.xpathEval('string(@name)')
if (name == None) or (name == ''):
continue
#
# Some module may be skipped because they don't really consists
# of user callable APIs
#
if is_skipped_module(name):
continue
#
# do not test deprecated APIs
#
desc = file.xpathEval('string(description)')
if string.find(desc, 'DEPRECATED') != -1:
print "Skipping deprecated interface %s" % name
continue;
test.write("#include <libxml/%s.h>\n" % name)
modules.append(name)
#
# Generate the callers signatures
#
for module in modules:
test.write("static int test_%s(void);\n" % module);
#
# Generate the top caller
#
test.write("""
/**
* testlibxml2:
*
* Main entry point of the tester for the full libxml2 module,
* it calls all the tester entry point for each module.
*
* Returns the number of error found
*/
static int
testlibxml2(void)
{
int test_ret = 0;
""")
for module in modules:
test.write(" test_ret += test_%s();\n" % module)
test.write("""
printf("Total: %d functions, %d tests, %d errors\\n",
function_tests, call_tests, test_ret);
return(test_ret);
}
""")
#
# How to handle a function
#
nb_tests = 0
def generate_test(module, node):
global test
global nb_tests
nb_cond = 0
no_gen = 0
name = node.xpathEval('string(@name)')
if is_skipped_function(name):
return
#
# check we know how to handle the args and return values
# and store the informations for the generation
#
try:
args = node.xpathEval("arg")
except:
args = []
t_args = []
n = 0
for arg in args:
n = n + 1
rtype = arg.xpathEval("string(@type)")
if rtype == 'void':
break;
info = arg.xpathEval("string(@info)")
nam = arg.xpathEval("string(@name)")
type = type_convert(rtype, nam, info, module, name, n)
if is_known_param_type(type, rtype) == 0:
add_missing_type(type, name);
no_gen = 1
if (type[-3:] == 'Ptr' or type[-4:] == '_ptr') and \
rtype[0:6] == 'const ':
crtype = rtype[6:]
else:
crtype = rtype
t_args.append((nam, type, rtype, crtype, info))
try:
rets = node.xpathEval("return")
except:
rets = []
t_ret = None
for ret in rets:
rtype = ret.xpathEval("string(@type)")
info = ret.xpathEval("string(@info)")
type = type_convert(rtype, 'return', info, module, name, 0)
if rtype == 'void':
break
if is_known_return_type(type) == 0:
add_missing_type(type, name);
no_gen = 1
t_ret = (type, rtype, info)
break
test.write("""
static int
test_%s(void) {
int test_ret = 0;
""" % (name))
if no_gen == 1:
add_missing_functions(name, module)
test.write("""
/* missing type support */
return(test_ret);
}
""")
return
try:
conds = node.xpathEval("cond")
for cond in conds:
test.write("#if %s\n" % (cond.get_content()))
nb_cond = nb_cond + 1
except:
pass
define = 0
if function_defines.has_key(name):
test.write("#ifdef %s\n" % (function_defines[name]))
define = 1
# Declare the memory usage counter
no_mem = is_skipped_memcheck(name)
if no_mem == 0:
test.write(" int mem_base;\n");
# Declare the return value
if t_ret != None:
test.write(" %s ret_val;\n" % (t_ret[1]))
# Declare the arguments
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
# add declaration
test.write(" %s %s; /* %s */\n" % (crtype, nam, info))
test.write(" int n_%s;\n" % (nam))
test.write("\n")
# Cascade loop on of each argument list of values
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
#
test.write(" for (n_%s = 0;n_%s < gen_nb_%s;n_%s++) {\n" % (
nam, nam, type, nam))
# log the memory usage
if no_mem == 0:
test.write(" mem_base = xmlMemBlocks();\n");
# prepare the call
i = 0;
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
#
test.write(" %s = gen_%s(n_%s, %d);\n" % (nam, type, nam, i))
i = i + 1;
# do the call, and clanup the result
if extra_pre_call.has_key(name):
test.write(" %s\n"% (extra_pre_call[name]))
if t_ret != None:
test.write("\n ret_val = %s(" % (name))
need = 0
for arg in t_args:
(nam, type, rtype, crtype, info) = arg
if need:
test.write(", ")
else:
need = 1
if rtype != crtype:
test.write("(%s)" % rtype)
test.write("%s" % nam);
test.write(");\n")
if extra_post_call.has_key(name):
test.write(" %s\n"% (extra_post_call[name]))
test.write(" desret_%s(ret_val);\n" % t_ret[0])
else:
test.write("\n %s(" % (name));
need = 0;
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
if need:
test.write(", ")
else:
need = 1
if rtype != crtype:
test.write("(%s)" % rtype)
test.write("%s" % nam)
test.write(");\n")
if extra_post_call.has_key(name):
test.write(" %s\n"% (extra_post_call[name]))
test.write(" call_tests++;\n");
# Free the arguments
i = 0;
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
# This is a hack to prevent generating a destructor for the
# 'input' argument in xmlTextReaderSetup. There should be
# a better, more generic way to do this!
if string.find(info, 'destroy') == -1:
test.write(" des_%s(n_%s, " % (type, nam))
if rtype != crtype:
test.write("(%s)" % rtype)
test.write("%s, %d);\n" % (nam, i))
i = i + 1;
test.write(" xmlResetLastError();\n");
# Check the memory usage
if no_mem == 0:
test.write(""" if (mem_base != xmlMemBlocks()) {
printf("Leak of %%d blocks found in %s",
xmlMemBlocks() - mem_base);
test_ret++;
""" % (name));
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
test.write(""" printf(" %%d", n_%s);\n""" % (nam))
test.write(""" printf("\\n");\n""")
test.write(" }\n")
for arg in t_args:
test.write(" }\n")
test.write(" function_tests++;\n")
#
# end of conditional
#
while nb_cond > 0:
test.write("#endif\n")
nb_cond = nb_cond -1
if define == 1:
test.write("#endif\n")
nb_tests = nb_tests + 1;
test.write("""
return(test_ret);
}
""")
#
# Generate all module callers
#
for module in modules:
# gather all the functions exported by that module
try:
functions = ctxt.xpathEval("/api/symbols/function[@file='%s']" % (module))
except:
print "Failed to gather functions from module %s" % (module)
continue;
# iterate over all functions in the module generating the test
i = 0
nb_tests_old = nb_tests
for function in functions:
i = i + 1
generate_test(module, function);
# header
test.write("""static int
test_%s(void) {
int test_ret = 0;
if (quiet == 0) printf("Testing %s : %d of %d functions ...\\n");
""" % (module, module, nb_tests - nb_tests_old, i))
# iterate over all functions in the module generating the call
for function in functions:
name = function.xpathEval('string(@name)')
if is_skipped_function(name):
continue
test.write(" test_ret += test_%s();\n" % (name))
# footer
test.write("""
if (test_ret != 0)
printf("Module %s: %%d errors\\n", test_ret);
return(test_ret);
}
""" % (module))
#
# Generate direct module caller
#
test.write("""static int
test_module(const char *module) {
""");
for module in modules:
test.write(""" if (!strcmp(module, "%s")) return(test_%s());\n""" % (
module, module))
test.write(""" return(0);
}
""");
print "Generated test for %d modules and %d functions" %(len(modules), nb_tests)
compare_and_save()
missing_list = []
for missing in missing_types.keys():
if missing == 'va_list' or missing == '...':
continue;
n = len(missing_types[missing])
missing_list.append((n, missing))
def compare_missing(a, b):
return b[0] - a[0]
missing_list.sort(compare_missing)
print "Missing support for %d functions and %d types see missing.lst" % (missing_functions_nr, len(missing_list))
lst = open("missing.lst", "w")
lst.write("Missing support for %d types" % (len(missing_list)))
lst.write("\n")
for miss in missing_list:
lst.write("%s: %d :" % (miss[1], miss[0]))
i = 0
for n in missing_types[miss[1]]:
i = i + 1
if i > 5:
lst.write(" ...")
break
lst.write(" %s" % (n))
lst.write("\n")
lst.write("\n")
lst.write("\n")
lst.write("Missing support per module");
for module in missing_functions.keys():
lst.write("module %s:\n %s\n" % (module, missing_functions[module]))
lst.close()
|
sammcveety/incubator-beam | refs/heads/master | sdks/python/apache_beam/transforms/core.py | 5 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Core PTransform subclasses, such as FlatMap, GroupByKey, and Map."""
from __future__ import absolute_import
import copy
import inspect
import types
from apache_beam import pvalue
from apache_beam import typehints
from apache_beam.coders import typecoders
from apache_beam.internal import util
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.transforms import ptransform
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.display import HasDisplayData
from apache_beam.transforms.ptransform import PTransform
from apache_beam.transforms.ptransform import PTransformWithSideInputs
from apache_beam.transforms.window import MIN_TIMESTAMP
from apache_beam.transforms.window import TimestampCombiner
from apache_beam.transforms.window import WindowedValue
from apache_beam.transforms.window import TimestampedValue
from apache_beam.transforms.window import GlobalWindows
from apache_beam.transforms.window import WindowFn
from apache_beam.typehints import Any
from apache_beam.typehints import Iterable
from apache_beam.typehints import KV
from apache_beam.typehints import trivial_inference
from apache_beam.typehints import Union
from apache_beam.typehints.decorators import get_type_hints
from apache_beam.typehints.decorators import TypeCheckError
from apache_beam.typehints.decorators import WithTypeHints
from apache_beam.typehints.trivial_inference import element_type
from apache_beam.typehints.typehints import is_consistent_with
from apache_beam.utils import urns
from apache_beam.options.pipeline_options import TypeOptions
__all__ = [
'DoFn',
'CombineFn',
'PartitionFn',
'ParDo',
'FlatMap',
'Map',
'Filter',
'CombineGlobally',
'CombinePerKey',
'CombineValues',
'GroupByKey',
'Partition',
'Windowing',
'WindowInto',
'Flatten',
'Create',
]
# Type variables
T = typehints.TypeVariable('T')
K = typehints.TypeVariable('K')
V = typehints.TypeVariable('V')
class DoFnContext(object):
"""A context available to all methods of DoFn instance."""
pass
class DoFnProcessContext(DoFnContext):
"""A processing context passed to DoFn process() during execution.
Most importantly, a DoFn.process method will access context.element
to get the element it is supposed to process.
Attributes:
label: label of the ParDo whose element is being processed.
element: element being processed
(in process method only; always None in start_bundle and finish_bundle)
timestamp: timestamp of the element
(in process method only; always None in start_bundle and finish_bundle)
windows: windows of the element
(in process method only; always None in start_bundle and finish_bundle)
state: a DoFnState object, which holds the runner's internal state
for this element.
Not used by the pipeline code.
"""
def __init__(self, label, element=None, state=None):
"""Initialize a processing context object with an element and state.
The element represents one value from a PCollection that will be accessed
by a DoFn object during pipeline execution, and state is an arbitrary object
where counters and other pipeline state information can be passed in.
DoFnProcessContext objects are also used as inputs to PartitionFn instances.
Args:
label: label of the PCollection whose element is being processed.
element: element of a PCollection being processed using this context.
state: a DoFnState object with state to be passed in to the DoFn object.
"""
self.label = label
self.state = state
if element is not None:
self.set_element(element)
def set_element(self, windowed_value):
if windowed_value is None:
# Not currently processing an element.
if hasattr(self, 'element'):
del self.element
del self.timestamp
del self.windows
else:
self.element = windowed_value.value
self.timestamp = windowed_value.timestamp
self.windows = windowed_value.windows
class DoFn(WithTypeHints, HasDisplayData):
"""A function object used by a transform with custom processing.
The ParDo transform is such a transform. The ParDo.apply
method will take an object of type DoFn and apply it to all elements of a
PCollection object.
In order to have concrete DoFn objects one has to subclass from DoFn and
define the desired behavior (start_bundle/finish_bundle and process) or wrap a
callable object using the CallableWrapperDoFn class.
"""
ElementParam = 'ElementParam'
SideInputParam = 'SideInputParam'
TimestampParam = 'TimestampParam'
WindowParam = 'WindowParam'
DoFnParams = [ElementParam, SideInputParam, TimestampParam, WindowParam]
@staticmethod
def from_callable(fn):
return CallableWrapperDoFn(fn)
def default_label(self):
return self.__class__.__name__
def process(self, element, *args, **kwargs):
"""Called for each element of a pipeline. The default arguments are needed
for the DoFnRunner to be able to pass the parameters correctly.
Args:
element: The element to be processed
*args: side inputs
**kwargs: keyword side inputs
"""
raise NotImplementedError
def start_bundle(self):
"""Called before a bundle of elements is processed on a worker.
Elements to be processed are split into bundles and distributed
to workers. Before a worker calls process() on the first element
of its bundle, it calls this method.
"""
pass
def finish_bundle(self):
"""Called after a bundle of elements is processed on a worker.
"""
pass
def get_function_arguments(self, func):
"""Return the function arguments based on the name provided. If they have
a _inspect_function attached to the class then use that otherwise default
to the python inspect library.
"""
func_name = '_inspect_%s' % func
if hasattr(self, func_name):
f = getattr(self, func_name)
return f()
f = getattr(self, func)
return inspect.getargspec(f)
# TODO(sourabhbajaj): Do we want to remove the responsiblity of these from
# the DoFn or maybe the runner
def infer_output_type(self, input_type):
# TODO(robertwb): Side inputs types.
# TODO(robertwb): Assert compatibility with input type hint?
return self._strip_output_annotations(
trivial_inference.infer_return_type(self.process, [input_type]))
def _strip_output_annotations(self, type_hint):
annotations = (TimestampedValue, WindowedValue, pvalue.TaggedOutput)
# TODO(robertwb): These should be parameterized types that the
# type inferencer understands.
if (type_hint in annotations
or trivial_inference.element_type(type_hint) in annotations):
return Any
return type_hint
def _process_argspec_fn(self):
"""Returns the Python callable that will eventually be invoked.
This should ideally be the user-level function that is called with
the main and (if any) side inputs, and is used to relate the type
hint parameters with the input parameters (e.g., by argument name).
"""
return self.process
def is_process_bounded(self):
"""Checks if an object is a bound method on an instance."""
if not isinstance(self.process, types.MethodType):
return False # Not a method
if self.process.im_self is None:
return False # Method is not bound
if issubclass(self.process.im_class, type) or \
self.process.im_class is types.ClassType:
return False # Method is a classmethod
return True
def _fn_takes_side_inputs(fn):
try:
argspec = inspect.getargspec(fn)
except TypeError:
# We can't tell; maybe it does.
return True
is_bound = isinstance(fn, types.MethodType) and fn.im_self is not None
return len(argspec.args) > 1 + is_bound or argspec.varargs or argspec.keywords
class CallableWrapperDoFn(DoFn):
"""For internal use only; no backwards-compatibility guarantees.
A DoFn (function) object wrapping a callable object.
The purpose of this class is to conveniently wrap simple functions and use
them in transforms.
"""
def __init__(self, fn):
"""Initializes a CallableWrapperDoFn object wrapping a callable.
Args:
fn: A callable object.
Raises:
TypeError: if fn parameter is not a callable type.
"""
if not callable(fn):
raise TypeError('Expected a callable object instead of: %r' % fn)
self._fn = fn
if isinstance(fn, (
types.BuiltinFunctionType, types.MethodType, types.FunctionType)):
self.process = fn
else:
# For cases such as set / list where fn is callable but not a function
self.process = lambda element: fn(element)
super(CallableWrapperDoFn, self).__init__()
def display_data(self):
# If the callable has a name, then it's likely a function, and
# we show its name.
# Otherwise, it might be an instance of a callable class. We
# show its class.
display_data_value = (self._fn.__name__ if hasattr(self._fn, '__name__')
else self._fn.__class__)
return {'fn': DisplayDataItem(display_data_value,
label='Transform Function')}
def __repr__(self):
return 'CallableWrapperDoFn(%s)' % self._fn
def default_type_hints(self):
type_hints = get_type_hints(self._fn)
# If the fn was a DoFn annotated with a type-hint that hinted a return
# type compatible with Iterable[Any], then we strip off the outer
# container type due to the 'flatten' portion of FlatMap.
# TODO(robertwb): Should we require an iterable specification for FlatMap?
if type_hints.output_types:
args, kwargs = type_hints.output_types
if len(args) == 1 and is_consistent_with(args[0], Iterable[Any]):
type_hints = type_hints.copy()
type_hints.set_output_types(element_type(args[0]), **kwargs)
return type_hints
def infer_output_type(self, input_type):
return self._strip_output_annotations(
trivial_inference.infer_return_type(self._fn, [input_type]))
def _process_argspec_fn(self):
return getattr(self._fn, '_argspec_fn', self._fn)
class CombineFn(WithTypeHints, HasDisplayData):
"""A function object used by a Combine transform with custom processing.
A CombineFn specifies how multiple values in all or part of a PCollection can
be merged into a single value---essentially providing the same kind of
information as the arguments to the Python "reduce" builtin (except for the
input argument, which is an instance of CombineFnProcessContext). The
combining process proceeds as follows:
1. Input values are partitioned into one or more batches.
2. For each batch, the create_accumulator method is invoked to create a fresh
initial "accumulator" value representing the combination of zero values.
3. For each input value in the batch, the add_input method is invoked to
combine more values with the accumulator for that batch.
4. The merge_accumulators method is invoked to combine accumulators from
separate batches into a single combined output accumulator value, once all
of the accumulators have had all the input value in their batches added to
them. This operation is invoked repeatedly, until there is only one
accumulator value left.
5. The extract_output operation is invoked on the final accumulator to get
the output value.
"""
def default_label(self):
return self.__class__.__name__
def create_accumulator(self, *args, **kwargs):
"""Return a fresh, empty accumulator for the combine operation.
Args:
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
"""
raise NotImplementedError(str(self))
def add_input(self, accumulator, element, *args, **kwargs):
"""Return result of folding element into accumulator.
CombineFn implementors must override add_input.
Args:
accumulator: the current accumulator
element: the element to add
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
"""
raise NotImplementedError(str(self))
def add_inputs(self, accumulator, elements, *args, **kwargs):
"""Returns the result of folding each element in elements into accumulator.
This is provided in case the implementation affords more efficient
bulk addition of elements. The default implementation simply loops
over the inputs invoking add_input for each one.
Args:
accumulator: the current accumulator
elements: the elements to add
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
"""
for element in elements:
accumulator = self.add_input(accumulator, element, *args, **kwargs)
return accumulator
def merge_accumulators(self, accumulators, *args, **kwargs):
"""Returns the result of merging several accumulators
to a single accumulator value.
Args:
accumulators: the accumulators to merge
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
"""
raise NotImplementedError(str(self))
def extract_output(self, accumulator, *args, **kwargs):
"""Return result of converting accumulator into the output value.
Args:
accumulator: the final accumulator value computed by this CombineFn
for the entire input key or PCollection.
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
"""
raise NotImplementedError(str(self))
def apply(self, elements, *args, **kwargs):
"""Returns result of applying this CombineFn to the input values.
Args:
elements: the set of values to combine.
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs.
"""
return self.extract_output(
self.add_inputs(
self.create_accumulator(*args, **kwargs), elements,
*args, **kwargs),
*args, **kwargs)
def for_input_type(self, input_type):
"""Returns a specialized implementation of self, if it exists.
Otherwise, returns self.
Args:
input_type: the type of input elements.
"""
return self
@staticmethod
def from_callable(fn):
return CallableWrapperCombineFn(fn)
@staticmethod
def maybe_from_callable(fn):
return fn if isinstance(fn, CombineFn) else CallableWrapperCombineFn(fn)
class CallableWrapperCombineFn(CombineFn):
"""For internal use only; no backwards-compatibility guarantees.
A CombineFn (function) object wrapping a callable object.
The purpose of this class is to conveniently wrap simple functions and use
them in Combine transforms.
"""
_EMPTY = object()
def __init__(self, fn):
"""Initializes a CallableFn object wrapping a callable.
Args:
fn: A callable object that reduces elements of an iterable to a single
value (like the builtins sum and max). This callable must be capable of
receiving the kind of values it generates as output in its input, and
for best results, its operation must be commutative and associative.
Raises:
TypeError: if fn parameter is not a callable type.
"""
if not callable(fn):
raise TypeError('Expected a callable object instead of: %r' % fn)
super(CallableWrapperCombineFn, self).__init__()
self._fn = fn
def display_data(self):
return {'fn_dd': self._fn}
def __repr__(self):
return "CallableWrapperCombineFn(%s)" % self._fn
def create_accumulator(self, *args, **kwargs):
return self._EMPTY
def add_input(self, accumulator, element, *args, **kwargs):
if accumulator is self._EMPTY:
return element
return self._fn([accumulator, element], *args, **kwargs)
def add_inputs(self, accumulator, elements, *args, **kwargs):
if accumulator is self._EMPTY:
return self._fn(elements, *args, **kwargs)
elif isinstance(elements, (list, tuple)):
return self._fn([accumulator] + list(elements), *args, **kwargs)
def union():
yield accumulator
for e in elements:
yield e
return self._fn(union(), *args, **kwargs)
def merge_accumulators(self, accumulators, *args, **kwargs):
# It's (weakly) assumed that self._fn is associative.
return self._fn(accumulators, *args, **kwargs)
def extract_output(self, accumulator, *args, **kwargs):
return self._fn(()) if accumulator is self._EMPTY else accumulator
def default_type_hints(self):
fn_hints = get_type_hints(self._fn)
if fn_hints.input_types is None:
return fn_hints
else:
# fn(Iterable[V]) -> V becomes CombineFn(V) -> V
input_args, input_kwargs = fn_hints.input_types
if not input_args:
if len(input_kwargs) == 1:
input_args, input_kwargs = tuple(input_kwargs.values()), {}
else:
raise TypeError('Combiner input type must be specified positionally.')
if not is_consistent_with(input_args[0], Iterable[Any]):
raise TypeCheckError(
'All functions for a Combine PTransform must accept a '
'single argument compatible with: Iterable[Any]. '
'Instead a function with input type: %s was received.'
% input_args[0])
input_args = (element_type(input_args[0]),) + input_args[1:]
# TODO(robertwb): Assert output type is consistent with input type?
hints = fn_hints.copy()
hints.set_input_types(*input_args, **input_kwargs)
return hints
def for_input_type(self, input_type):
# Avoid circular imports.
from apache_beam.transforms import cy_combiners
if self._fn is any:
return cy_combiners.AnyCombineFn()
elif self._fn is all:
return cy_combiners.AllCombineFn()
else:
known_types = {
(sum, int): cy_combiners.SumInt64Fn(),
(min, int): cy_combiners.MinInt64Fn(),
(max, int): cy_combiners.MaxInt64Fn(),
(sum, float): cy_combiners.SumFloatFn(),
(min, float): cy_combiners.MinFloatFn(),
(max, float): cy_combiners.MaxFloatFn(),
}
return known_types.get((self._fn, input_type), self)
class PartitionFn(WithTypeHints):
"""A function object used by a Partition transform.
A PartitionFn specifies how individual values in a PCollection will be placed
into separate partitions, indexed by an integer.
"""
def default_label(self):
return self.__class__.__name__
def partition_for(self, element, num_partitions, *args, **kwargs):
"""Specify which partition will receive this element.
Args:
element: An element of the input PCollection.
num_partitions: Number of partitions, i.e., output PCollections.
*args: optional parameters and side inputs.
**kwargs: optional parameters and side inputs.
Returns:
An integer in [0, num_partitions).
"""
pass
class CallableWrapperPartitionFn(PartitionFn):
"""For internal use only; no backwards-compatibility guarantees.
A PartitionFn object wrapping a callable object.
Instances of this class wrap simple functions for use in Partition operations.
"""
def __init__(self, fn):
"""Initializes a PartitionFn object wrapping a callable.
Args:
fn: A callable object, which should accept the following arguments:
element - element to assign to a partition.
num_partitions - number of output partitions.
and may accept additional arguments and side inputs.
Raises:
TypeError: if fn is not a callable type.
"""
if not callable(fn):
raise TypeError('Expected a callable object instead of: %r' % fn)
self._fn = fn
def partition_for(self, element, num_partitions, *args, **kwargs):
return self._fn(element, num_partitions, *args, **kwargs)
class ParDo(PTransformWithSideInputs):
"""A ParDo transform.
Processes an input PCollection by applying a DoFn to each element and
returning the accumulated results into an output PCollection. The type of the
elements is not fixed as long as the DoFn can deal with it. In reality
the type is restrained to some extent because the elements sometimes must be
persisted to external storage. See the expand() method comments for a detailed
description of all possible arguments.
Note that the DoFn must return an iterable for each element of the input
PCollection. An easy way to do this is to use the yield keyword in the
process method.
Args:
pcoll: a PCollection to be processed.
fn: a DoFn object to be applied to each element of pcoll argument.
*args: positional arguments passed to the dofn object.
**kwargs: keyword arguments passed to the dofn object.
Note that the positional and keyword arguments will be processed in order
to detect PCollections that will be computed as side inputs to the
transform. During pipeline execution whenever the DoFn object gets executed
(its apply() method gets called) the PCollection arguments will be replaced
by values from the PCollection in the exact positions where they appear in
the argument lists.
"""
def __init__(self, fn, *args, **kwargs):
super(ParDo, self).__init__(fn, *args, **kwargs)
# TODO(robertwb): Change all uses of the dofn attribute to use fn instead.
self.dofn = self.fn
self.output_tags = set()
if not isinstance(self.fn, DoFn):
raise TypeError('ParDo must be called with a DoFn instance.')
# Validate the DoFn by creating a DoFnSignature
from apache_beam.runners.common import DoFnSignature
DoFnSignature(self.fn)
def default_type_hints(self):
return self.fn.get_type_hints()
def infer_output_type(self, input_type):
return trivial_inference.element_type(
self.fn.infer_output_type(input_type))
def make_fn(self, fn):
if isinstance(fn, DoFn):
return fn
return CallableWrapperDoFn(fn)
def _process_argspec_fn(self):
return self.fn._process_argspec_fn()
def display_data(self):
return {'fn': DisplayDataItem(self.fn.__class__,
label='Transform Function'),
'fn_dd': self.fn}
def expand(self, pcoll):
return pvalue.PCollection(pcoll.pipeline)
def with_outputs(self, *tags, **main_kw):
"""Returns a tagged tuple allowing access to the outputs of a ParDo.
The resulting object supports access to the
PCollection associated with a tag (e.g., o.tag, o[tag]) and iterating over
the available tags (e.g., for tag in o: ...).
Args:
*tags: if non-empty, list of valid tags. If a list of valid tags is given,
it will be an error to use an undeclared tag later in the pipeline.
**main_kw: dictionary empty or with one key 'main' defining the tag to be
used for the main output (which will not have a tag associated with it).
Returns:
An object of type DoOutputsTuple that bundles together all the outputs
of a ParDo transform and allows accessing the individual
PCollections for each output using an object.tag syntax.
Raises:
TypeError: if the self object is not a PCollection that is the result of
a ParDo transform.
ValueError: if main_kw contains any key other than 'main'.
"""
main_tag = main_kw.pop('main', None)
if main_kw:
raise ValueError('Unexpected keyword arguments: %s' % main_kw.keys())
return _MultiParDo(self, tags, main_tag)
class _MultiParDo(PTransform):
def __init__(self, do_transform, tags, main_tag):
super(_MultiParDo, self).__init__(do_transform.label)
self._do_transform = do_transform
self._tags = tags
self._main_tag = main_tag
def expand(self, pcoll):
_ = pcoll | self._do_transform
return pvalue.DoOutputsTuple(
pcoll.pipeline, self._do_transform, self._tags, self._main_tag)
def FlatMap(fn, *args, **kwargs): # pylint: disable=invalid-name
"""FlatMap is like ParDo except it takes a callable to specify the
transformation.
The callable must return an iterable for each element of the input
PCollection. The elements of these iterables will be flattened into
the output PCollection.
Args:
fn: a callable object.
*args: positional arguments passed to the transform callable.
**kwargs: keyword arguments passed to the transform callable.
Returns:
A PCollection containing the Map outputs.
Raises:
TypeError: If the fn passed as argument is not a callable. Typical error
is to pass a DoFn instance which is supported only for ParDo.
"""
label = 'FlatMap(%s)' % ptransform.label_from_callable(fn)
if not callable(fn):
raise TypeError(
'FlatMap can be used only with callable objects. '
'Received %r instead.' % (fn))
pardo = ParDo(CallableWrapperDoFn(fn), *args, **kwargs)
pardo.label = label
return pardo
def Map(fn, *args, **kwargs): # pylint: disable=invalid-name
"""Map is like FlatMap except its callable returns only a single element.
Args:
fn: a callable object.
*args: positional arguments passed to the transform callable.
**kwargs: keyword arguments passed to the transform callable.
Returns:
A PCollection containing the Map outputs.
Raises:
TypeError: If the fn passed as argument is not a callable. Typical error
is to pass a DoFn instance which is supported only for ParDo.
"""
if not callable(fn):
raise TypeError(
'Map can be used only with callable objects. '
'Received %r instead.' % (fn))
if _fn_takes_side_inputs(fn):
wrapper = lambda x, *args, **kwargs: [fn(x, *args, **kwargs)]
else:
wrapper = lambda x: [fn(x)]
label = 'Map(%s)' % ptransform.label_from_callable(fn)
# TODO. What about callable classes?
if hasattr(fn, '__name__'):
wrapper.__name__ = fn.__name__
# Proxy the type-hint information from the original function to this new
# wrapped function.
get_type_hints(wrapper).input_types = get_type_hints(fn).input_types
output_hint = get_type_hints(fn).simple_output_type(label)
if output_hint:
get_type_hints(wrapper).set_output_types(typehints.Iterable[output_hint])
# pylint: disable=protected-access
wrapper._argspec_fn = fn
# pylint: enable=protected-access
pardo = FlatMap(wrapper, *args, **kwargs)
pardo.label = label
return pardo
def Filter(fn, *args, **kwargs): # pylint: disable=invalid-name
"""Filter is a FlatMap with its callable filtering out elements.
Args:
fn: a callable object.
*args: positional arguments passed to the transform callable.
**kwargs: keyword arguments passed to the transform callable.
Returns:
A PCollection containing the Filter outputs.
Raises:
TypeError: If the fn passed as argument is not a callable. Typical error
is to pass a DoFn instance which is supported only for FlatMap.
"""
if not callable(fn):
raise TypeError(
'Filter can be used only with callable objects. '
'Received %r instead.' % (fn))
wrapper = lambda x, *args, **kwargs: [x] if fn(x, *args, **kwargs) else []
label = 'Filter(%s)' % ptransform.label_from_callable(fn)
# TODO: What about callable classes?
if hasattr(fn, '__name__'):
wrapper.__name__ = fn.__name__
# Proxy the type-hint information from the function being wrapped, setting the
# output type to be the same as the input type.
get_type_hints(wrapper).input_types = get_type_hints(fn).input_types
output_hint = get_type_hints(fn).simple_output_type(label)
if (output_hint is None
and get_type_hints(wrapper).input_types
and get_type_hints(wrapper).input_types[0]):
output_hint = get_type_hints(wrapper).input_types[0]
if output_hint:
get_type_hints(wrapper).set_output_types(typehints.Iterable[output_hint])
# pylint: disable=protected-access
wrapper._argspec_fn = fn
# pylint: enable=protected-access
pardo = FlatMap(wrapper, *args, **kwargs)
pardo.label = label
return pardo
class CombineGlobally(PTransform):
"""A CombineGlobally transform.
Reduces a PCollection to a single value by progressively applying a CombineFn
to portions of the PCollection (and to intermediate values created thereby).
See documentation in CombineFn for details on the specifics on how CombineFns
are applied.
Args:
pcoll: a PCollection to be reduced into a single value.
fn: a CombineFn object that will be called to progressively reduce the
PCollection into single values, or a callable suitable for wrapping
by CallableWrapperCombineFn.
*args: positional arguments passed to the CombineFn object.
**kwargs: keyword arguments passed to the CombineFn object.
Raises:
TypeError: If the output type of the input PCollection is not compatible
with Iterable[A].
Returns:
A single-element PCollection containing the main output of the Combine
transform.
Note that the positional and keyword arguments will be processed in order
to detect PObjects that will be computed as side inputs to the transform.
During pipeline execution whenever the CombineFn object gets executed (i.e.,
any of the CombineFn methods get called), the PObject arguments will be
replaced by their actual value in the exact position where they appear in
the argument lists.
"""
has_defaults = True
as_view = False
def __init__(self, fn, *args, **kwargs):
if not (isinstance(fn, CombineFn) or callable(fn)):
raise TypeError(
'CombineGlobally can be used only with combineFn objects. '
'Received %r instead.' % (fn))
super(CombineGlobally, self).__init__()
self.fn = fn
self.args = args
self.kwargs = kwargs
def display_data(self):
return {'combine_fn':
DisplayDataItem(self.fn.__class__, label='Combine Function'),
'combine_fn_dd':
self.fn}
def default_label(self):
return 'CombineGlobally(%s)' % ptransform.label_from_callable(self.fn)
def _clone(self, **extra_attributes):
clone = copy.copy(self)
clone.__dict__.update(extra_attributes)
return clone
def with_defaults(self, has_defaults=True):
return self._clone(has_defaults=has_defaults)
def without_defaults(self):
return self.with_defaults(False)
def as_singleton_view(self):
return self._clone(as_view=True)
def expand(self, pcoll):
def add_input_types(transform):
type_hints = self.get_type_hints()
if type_hints.input_types:
return transform.with_input_types(type_hints.input_types[0][0])
return transform
combined = (pcoll
| 'KeyWithVoid' >> add_input_types(
Map(lambda v: (None, v)).with_output_types(
KV[None, pcoll.element_type]))
| 'CombinePerKey' >> CombinePerKey(
self.fn, *self.args, **self.kwargs)
| 'UnKey' >> Map(lambda (k, v): v))
if not self.has_defaults and not self.as_view:
return combined
if self.has_defaults:
combine_fn = (
self.fn if isinstance(self.fn, CombineFn)
else CombineFn.from_callable(self.fn))
default_value = combine_fn.apply([], *self.args, **self.kwargs)
else:
default_value = pvalue.AsSingleton._NO_DEFAULT # pylint: disable=protected-access
view = pvalue.AsSingleton(combined, default_value=default_value)
if self.as_view:
return view
else:
if pcoll.windowing.windowfn != GlobalWindows():
raise ValueError(
"Default values are not yet supported in CombineGlobally() if the "
"output PCollection is not windowed by GlobalWindows. "
"Instead, use CombineGlobally().without_defaults() to output "
"an empty PCollection if the input PCollection is empty, "
"or CombineGlobally().as_singleton_view() to get the default "
"output of the CombineFn if the input PCollection is empty.")
def typed(transform):
# TODO(robertwb): We should infer this.
if combined.element_type:
return transform.with_output_types(combined.element_type)
return transform
return (pcoll.pipeline
| 'DoOnce' >> Create([None])
| 'InjectDefault' >> typed(Map(lambda _, s: s, view)))
class CombinePerKey(PTransformWithSideInputs):
"""A per-key Combine transform.
Identifies sets of values associated with the same key in the input
PCollection, then applies a CombineFn to condense those sets to single
values. See documentation in CombineFn for details on the specifics on how
CombineFns are applied.
Args:
pcoll: input pcollection.
fn: instance of CombineFn to apply to all values under the same key in
pcoll, or a callable whose signature is ``f(iterable, *args, **kwargs)``
(e.g., sum, max).
*args: arguments and side inputs, passed directly to the CombineFn.
**kwargs: arguments and side inputs, passed directly to the CombineFn.
Returns:
A PObject holding the result of the combine operation.
"""
def display_data(self):
return {'combine_fn':
DisplayDataItem(self.fn.__class__, label='Combine Function'),
'combine_fn_dd':
self.fn}
def make_fn(self, fn):
self._fn_label = ptransform.label_from_callable(fn)
return fn if isinstance(fn, CombineFn) else CombineFn.from_callable(fn)
def default_label(self):
return '%s(%s)' % (self.__class__.__name__, self._fn_label)
def _process_argspec_fn(self):
return self.fn._fn # pylint: disable=protected-access
def expand(self, pcoll):
args, kwargs = util.insert_values_in_args(
self.args, self.kwargs, self.side_inputs)
return pcoll | GroupByKey() | 'Combine' >> CombineValues(
self.fn, *args, **kwargs)
# TODO(robertwb): Rename to CombineGroupedValues?
class CombineValues(PTransformWithSideInputs):
def make_fn(self, fn):
return fn if isinstance(fn, CombineFn) else CombineFn.from_callable(fn)
def expand(self, pcoll):
args, kwargs = util.insert_values_in_args(
self.args, self.kwargs, self.side_inputs)
input_type = pcoll.element_type
key_type = None
if input_type is not None:
key_type, _ = input_type.tuple_types
runtime_type_check = (
pcoll.pipeline._options.view_as(TypeOptions).runtime_type_check)
return pcoll | ParDo(
CombineValuesDoFn(key_type, self.fn, runtime_type_check),
*args, **kwargs)
class CombineValuesDoFn(DoFn):
"""DoFn for performing per-key Combine transforms."""
def __init__(self, input_pcoll_type, combinefn, runtime_type_check):
super(CombineValuesDoFn, self).__init__()
self.combinefn = combinefn
self.runtime_type_check = runtime_type_check
def process(self, element, *args, **kwargs):
# Expected elements input to this DoFn are 2-tuples of the form
# (key, iter), with iter an iterable of all the values associated with key
# in the input PCollection.
if self.runtime_type_check:
# Apply the combiner in a single operation rather than artificially
# breaking it up so that output type violations manifest as TypeCheck
# errors rather than type errors.
return [
(element[0],
self.combinefn.apply(element[1], *args, **kwargs))]
# Add the elements into three accumulators (for testing of merge).
elements = element[1]
accumulators = []
for k in range(3):
if len(elements) <= k:
break
accumulators.append(
self.combinefn.add_inputs(
self.combinefn.create_accumulator(*args, **kwargs),
elements[k::3],
*args, **kwargs))
# Merge the accumulators.
accumulator = self.combinefn.merge_accumulators(
accumulators, *args, **kwargs)
# Convert accumulator to the final result.
return [(element[0],
self.combinefn.extract_output(accumulator, *args, **kwargs))]
def default_type_hints(self):
hints = self.combinefn.get_type_hints().copy()
if hints.input_types:
K = typehints.TypeVariable('K')
args, kwargs = hints.input_types
args = (typehints.Tuple[K, typehints.Iterable[args[0]]],) + args[1:]
hints.set_input_types(*args, **kwargs)
else:
K = typehints.Any
if hints.output_types:
main_output_type = hints.simple_output_type('')
hints.set_output_types(typehints.Tuple[K, main_output_type])
return hints
@typehints.with_input_types(typehints.KV[K, V])
@typehints.with_output_types(typehints.KV[K, typehints.Iterable[V]])
class GroupByKey(PTransform):
"""A group by key transform.
Processes an input PCollection consisting of key/value pairs represented as a
tuple pair. The result is a PCollection where values having a common key are
grouped together. For example (a, 1), (b, 2), (a, 3) will result into
(a, [1, 3]), (b, [2]).
The implementation here is used only when run on the local direct runner.
"""
class ReifyWindows(DoFn):
def process(self, element, window=DoFn.WindowParam,
timestamp=DoFn.TimestampParam):
try:
k, v = element
except TypeError:
raise TypeCheckError('Input to GroupByKey must be a PCollection with '
'elements compatible with KV[A, B]')
return [(k, WindowedValue(v, timestamp, [window]))]
def infer_output_type(self, input_type):
key_type, value_type = trivial_inference.key_value_types(input_type)
return Iterable[KV[key_type, typehints.WindowedValue[value_type]]]
def expand(self, pcoll):
# This code path is only used in the local direct runner. For Dataflow
# runner execution, the GroupByKey transform is expanded on the service.
input_type = pcoll.element_type
if input_type is not None:
# Initialize type-hints used below to enforce type-checking and to pass
# downstream to further PTransforms.
key_type, value_type = trivial_inference.key_value_types(input_type)
typecoders.registry.verify_deterministic(
typecoders.registry.get_coder(key_type),
'GroupByKey operation "%s"' % self.label)
reify_output_type = KV[key_type, typehints.WindowedValue[value_type]]
gbk_input_type = (
KV[key_type, Iterable[typehints.WindowedValue[value_type]]])
gbk_output_type = KV[key_type, Iterable[value_type]]
# pylint: disable=bad-continuation
return (pcoll
| 'ReifyWindows' >> (ParDo(self.ReifyWindows())
.with_output_types(reify_output_type))
| 'GroupByKey' >> (_GroupByKeyOnly()
.with_input_types(reify_output_type)
.with_output_types(gbk_input_type))
| ('GroupByWindow' >> _GroupAlsoByWindow(pcoll.windowing)
.with_input_types(gbk_input_type)
.with_output_types(gbk_output_type)))
else:
# The input_type is None, run the default
return (pcoll
| 'ReifyWindows' >> ParDo(self.ReifyWindows())
| 'GroupByKey' >> _GroupByKeyOnly()
| 'GroupByWindow' >> _GroupAlsoByWindow(pcoll.windowing))
@typehints.with_input_types(typehints.KV[K, V])
@typehints.with_output_types(typehints.KV[K, typehints.Iterable[V]])
class _GroupByKeyOnly(PTransform):
"""A group by key transform, ignoring windows."""
def infer_output_type(self, input_type):
key_type, value_type = trivial_inference.key_value_types(input_type)
return KV[key_type, Iterable[value_type]]
def expand(self, pcoll):
self._check_pcollection(pcoll)
return pvalue.PCollection(pcoll.pipeline)
@typehints.with_input_types(typehints.KV[K, typehints.Iterable[V]])
@typehints.with_output_types(typehints.KV[K, typehints.Iterable[V]])
class _GroupAlsoByWindow(ParDo):
"""The GroupAlsoByWindow transform."""
def __init__(self, windowing):
super(_GroupAlsoByWindow, self).__init__(
_GroupAlsoByWindowDoFn(windowing))
self.windowing = windowing
def expand(self, pcoll):
self._check_pcollection(pcoll)
return pvalue.PCollection(pcoll.pipeline)
class _GroupAlsoByWindowDoFn(DoFn):
# TODO(robertwb): Support combiner lifting.
def __init__(self, windowing):
super(_GroupAlsoByWindowDoFn, self).__init__()
self.windowing = windowing
def infer_output_type(self, input_type):
key_type, windowed_value_iter_type = trivial_inference.key_value_types(
input_type)
value_type = windowed_value_iter_type.inner_type.inner_type
return Iterable[KV[key_type, Iterable[value_type]]]
def start_bundle(self):
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.trigger import InMemoryUnmergedState
from apache_beam.transforms.trigger import create_trigger_driver
# pylint: enable=wrong-import-order, wrong-import-position
self.driver = create_trigger_driver(self.windowing, True)
self.state_type = InMemoryUnmergedState
def process(self, element):
k, vs = element
state = self.state_type()
# TODO(robertwb): Conditionally process in smaller chunks.
for wvalue in self.driver.process_elements(state, vs, MIN_TIMESTAMP):
yield wvalue.with_value((k, wvalue.value))
while state.timers:
fired = state.get_and_clear_timers()
for timer_window, (name, time_domain, fire_time) in fired:
for wvalue in self.driver.process_timer(
timer_window, name, time_domain, fire_time, state):
yield wvalue.with_value((k, wvalue.value))
class Partition(PTransformWithSideInputs):
"""Split a PCollection into several partitions.
Uses the specified PartitionFn to separate an input PCollection into the
specified number of sub-PCollections.
When apply()d, a Partition() PTransform requires the following:
Args:
partitionfn: a PartitionFn, or a callable with the signature described in
CallableWrapperPartitionFn.
n: number of output partitions.
The result of this PTransform is a simple list of the output PCollections
representing each of n partitions, in order.
"""
class ApplyPartitionFnFn(DoFn):
"""A DoFn that applies a PartitionFn."""
def process(self, element, partitionfn, n, *args, **kwargs):
partition = partitionfn.partition_for(element, n, *args, **kwargs)
if not 0 <= partition < n:
raise ValueError(
'PartitionFn specified out-of-bounds partition index: '
'%d not in [0, %d)' % (partition, n))
# Each input is directed into the output that corresponds to the
# selected partition.
yield pvalue.TaggedOutput(str(partition), element)
def make_fn(self, fn):
return fn if isinstance(fn, PartitionFn) else CallableWrapperPartitionFn(fn)
def expand(self, pcoll):
n = int(self.args[0])
return pcoll | ParDo(
self.ApplyPartitionFnFn(), self.fn, *self.args,
**self.kwargs).with_outputs(*[str(t) for t in range(n)])
class Windowing(object):
def __init__(self, windowfn, triggerfn=None, accumulation_mode=None,
timestamp_combiner=None):
global AccumulationMode, DefaultTrigger # pylint: disable=global-variable-not-assigned
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.trigger import AccumulationMode, DefaultTrigger
# pylint: enable=wrong-import-order, wrong-import-position
if triggerfn is None:
triggerfn = DefaultTrigger()
if accumulation_mode is None:
if triggerfn == DefaultTrigger():
accumulation_mode = AccumulationMode.DISCARDING
else:
raise ValueError(
'accumulation_mode must be provided for non-trivial triggers')
if not windowfn.get_window_coder().is_deterministic():
raise ValueError(
'window fn (%s) does not have a determanistic coder (%s)' % (
window_fn, windowfn.get_window_coder()))
self.windowfn = windowfn
self.triggerfn = triggerfn
self.accumulation_mode = accumulation_mode
self.timestamp_combiner = (
timestamp_combiner or TimestampCombiner.OUTPUT_AT_EOW)
self._is_default = (
self.windowfn == GlobalWindows() and
self.triggerfn == DefaultTrigger() and
self.accumulation_mode == AccumulationMode.DISCARDING and
self.timestamp_combiner == TimestampCombiner.OUTPUT_AT_EOW)
def __repr__(self):
return "Windowing(%s, %s, %s, %s)" % (self.windowfn, self.triggerfn,
self.accumulation_mode,
self.timestamp_combiner)
def __eq__(self, other):
if type(self) == type(other):
if self._is_default and other._is_default:
return True
return (
self.windowfn == other.windowfn
and self.triggerfn == other.triggerfn
and self.accumulation_mode == other.accumulation_mode
and self.timestamp_combiner == other.timestamp_combiner)
return False
def is_default(self):
return self._is_default
def to_runner_api(self, context):
return beam_runner_api_pb2.WindowingStrategy(
window_fn=self.windowfn.to_runner_api(context),
# TODO(robertwb): Prohibit implicit multi-level merging.
merge_status=(beam_runner_api_pb2.NEEDS_MERGE
if self.windowfn.is_merging()
else beam_runner_api_pb2.NON_MERGING),
window_coder_id=context.coders.get_id(
self.windowfn.get_window_coder()),
trigger=self.triggerfn.to_runner_api(context),
accumulation_mode=self.accumulation_mode,
output_time=self.timestamp_combiner,
# TODO(robertwb): Support EMIT_IF_NONEMPTY
closing_behavior=beam_runner_api_pb2.EMIT_ALWAYS,
allowed_lateness=0)
@staticmethod
def from_runner_api(proto, context):
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.trigger import TriggerFn
return Windowing(
windowfn=WindowFn.from_runner_api(proto.window_fn, context),
triggerfn=TriggerFn.from_runner_api(proto.trigger, context),
accumulation_mode=proto.accumulation_mode,
timestamp_combiner=proto.output_time)
@typehints.with_input_types(T)
@typehints.with_output_types(T)
class WindowInto(ParDo):
"""A window transform assigning windows to each element of a PCollection.
Transforms an input PCollection by applying a windowing function to each
element. Each transformed element in the result will be a WindowedValue
element with the same input value and timestamp, with its new set of windows
determined by the windowing function.
"""
class WindowIntoFn(DoFn):
"""A DoFn that applies a WindowInto operation."""
def __init__(self, windowing):
self.windowing = windowing
def process(self, element, timestamp=DoFn.TimestampParam):
context = WindowFn.AssignContext(timestamp, element=element)
new_windows = self.windowing.windowfn.assign(context)
yield WindowedValue(element, context.timestamp, new_windows)
def __init__(self, windowfn, **kwargs):
"""Initializes a WindowInto transform.
Args:
windowfn: Function to be used for windowing
"""
triggerfn = kwargs.pop('trigger', None)
accumulation_mode = kwargs.pop('accumulation_mode', None)
timestamp_combiner = kwargs.pop('timestamp_combiner', None)
self.windowing = Windowing(windowfn, triggerfn, accumulation_mode,
timestamp_combiner)
super(WindowInto, self).__init__(self.WindowIntoFn(self.windowing))
def get_windowing(self, unused_inputs):
return self.windowing
def infer_output_type(self, input_type):
return input_type
def expand(self, pcoll):
input_type = pcoll.element_type
if input_type is not None:
output_type = input_type
self.with_input_types(input_type)
self.with_output_types(output_type)
return super(WindowInto, self).expand(pcoll)
def to_runner_api_parameter(self, context):
return (
urns.WINDOW_INTO_TRANSFORM,
self.windowing.to_runner_api(context))
@staticmethod
def from_runner_api_parameter(proto, context):
windowing = Windowing.from_runner_api(proto, context)
return WindowInto(
windowing.windowfn,
trigger=windowing.triggerfn,
accumulation_mode=windowing.accumulation_mode,
timestamp_combiner=windowing.timestamp_combiner)
PTransform.register_urn(
urns.WINDOW_INTO_TRANSFORM,
# TODO(robertwb): Update WindowIntoPayload to include the full strategy.
# (Right now only WindowFn is used, but we need this to reconstitute the
# WindowInto transform, and in the future will need it at runtime to
# support meta-data driven triggers.)
beam_runner_api_pb2.WindowingStrategy,
WindowInto.from_runner_api_parameter)
# Python's pickling is broken for nested classes.
WindowIntoFn = WindowInto.WindowIntoFn
class Flatten(PTransform):
"""Merges several PCollections into a single PCollection.
Copies all elements in 0 or more PCollections into a single output
PCollection. If there are no input PCollections, the resulting PCollection
will be empty (but see also kwargs below).
Args:
**kwargs: Accepts a single named argument "pipeline", which specifies the
pipeline that "owns" this PTransform. Ordinarily Flatten can obtain this
information from one of the input PCollections, but if there are none (or
if there's a chance there may be none), this argument is the only way to
provide pipeline information and should be considered mandatory.
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__()
self.pipeline = kwargs.pop('pipeline', None)
if kwargs:
raise ValueError('Unexpected keyword arguments: %s' % kwargs.keys())
def _extract_input_pvalues(self, pvalueish):
try:
pvalueish = tuple(pvalueish)
except TypeError:
raise ValueError('Input to Flatten must be an iterable.')
return pvalueish, pvalueish
def expand(self, pcolls):
for pcoll in pcolls:
self._check_pcollection(pcoll)
return pvalue.PCollection(self.pipeline)
def get_windowing(self, inputs):
if not inputs:
# TODO(robertwb): Return something compatible with every windowing?
return Windowing(GlobalWindows())
return super(Flatten, self).get_windowing(inputs)
def to_runner_api_parameter(self, context):
return urns.FLATTEN_TRANSFORM, None
@staticmethod
def from_runner_api_parameter(unused_parameter, unused_context):
return Flatten()
PTransform.register_urn(
urns.FLATTEN_TRANSFORM, None, Flatten.from_runner_api_parameter)
class Create(PTransform):
"""A transform that creates a PCollection from an iterable."""
def __init__(self, value):
"""Initializes a Create transform.
Args:
value: An object of values for the PCollection
"""
super(Create, self).__init__()
if isinstance(value, basestring):
raise TypeError('PTransform Create: Refusing to treat string as '
'an iterable. (string=%r)' % value)
elif isinstance(value, dict):
value = value.items()
self.value = tuple(value)
def infer_output_type(self, unused_input_type):
if not self.value:
return Any
return Union[[trivial_inference.instance_to_type(v) for v in self.value]]
def get_output_type(self):
return (self.get_type_hints().simple_output_type(self.label) or
self.infer_output_type(None))
def expand(self, pbegin):
from apache_beam.io import iobase
assert isinstance(pbegin, pvalue.PBegin)
self.pipeline = pbegin.pipeline
coder = typecoders.registry.get_coder(self.get_output_type())
source = self._create_source_from_iterable(self.value, coder)
return (pbegin.pipeline
| iobase.Read(source).with_output_types(self.get_output_type()))
def get_windowing(self, unused_inputs):
return Windowing(GlobalWindows())
@staticmethod
def _create_source_from_iterable(values, coder):
return Create._create_source(map(coder.encode, values), coder)
@staticmethod
def _create_source(serialized_values, coder):
from apache_beam.io import iobase
class _CreateSource(iobase.BoundedSource):
def __init__(self, serialized_values, coder):
self._coder = coder
self._serialized_values = []
self._total_size = 0
self._serialized_values = serialized_values
self._total_size = sum(map(len, self._serialized_values))
def read(self, range_tracker):
start_position = range_tracker.start_position()
current_position = start_position
def split_points_unclaimed(stop_position):
if current_position >= stop_position:
return 0
return stop_position - current_position - 1
range_tracker.set_split_points_unclaimed_callback(
split_points_unclaimed)
element_iter = iter(self._serialized_values[start_position:])
for i in range(start_position, range_tracker.stop_position()):
if not range_tracker.try_claim(i):
return
current_position = i
yield self._coder.decode(next(element_iter))
def split(self, desired_bundle_size, start_position=None,
stop_position=None):
from apache_beam.io import iobase
if len(self._serialized_values) < 2:
yield iobase.SourceBundle(
weight=0, source=self, start_position=0,
stop_position=len(self._serialized_values))
else:
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = len(self._serialized_values)
avg_size_per_value = self._total_size / len(self._serialized_values)
num_values_per_split = max(
int(desired_bundle_size / avg_size_per_value), 1)
start = start_position
while start < stop_position:
end = min(start + num_values_per_split, stop_position)
remaining = stop_position - end
# Avoid having a too small bundle at the end.
if remaining < (num_values_per_split / 4):
end = stop_position
sub_source = Create._create_source(
self._serialized_values[start:end], self._coder)
yield iobase.SourceBundle(weight=(end - start),
source=sub_source,
start_position=0,
stop_position=(end - start))
start = end
def get_range_tracker(self, start_position, stop_position):
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = len(self._serialized_values)
from apache_beam import io
return io.OffsetRangeTracker(start_position, stop_position)
def estimate_size(self):
return self._total_size
return _CreateSource(serialized_values, coder)
|
ankit01ojha/coala-bears | refs/heads/master | bears/hypertext/HTMLHintBear.py | 5 | import json
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.NpmRequirement import NpmRequirement
@linter(executable='htmlhint',
output_format='regex',
output_regex=r'(?P<filename>.+):(?P<line>\d+):(?P<column>\d+):\s*'
r'(?P<message>.+) \[(?P<severity>error|warning).+\]')
class HTMLHintBear:
"""
Checks HTML code with ``htmlhint`` for possible problems. Attempts to catch
little mistakes and enforces a code style guide on HTML files.
"""
LANGUAGES = {'HTML'}
REQUIREMENTS = {NpmRequirement('htmlhint', '0.9.13')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Syntax', 'Formatting', 'Duplication', 'Code Simplification'}
SEE_MORE = 'https://github.com/yaniswang/HTMLHint'
@staticmethod
def generate_config(filename, file,
enforce_lowercase_tagname: bool=True,
enforce_lowercase_attribute: bool=True,
require_attribute_value_in_double_quotes: bool=False,
prohibit_empty_value_for_attribute: bool=False,
prohibit_attribute_duplication: bool=True,
require_doctype_at_beginning: bool=True,
enforce_tag_pair: bool=True,
enforce_self_close_empty_tag: bool=True,
require_escaped_special_characters: bool=False,
require_unique_attribute_id: bool=True,
require_title_tag: bool=True,
prohibit_script_in_head: bool=False,
require_alt_attribute: bool=True,
enforce_id_class_naming_convention: str=None,
prohibit_inline_style: bool=True,
require_relative_links_in_href: bool=None,
prohibit_unsafe_characters: bool=True,
prohibit_inline_script: bool=False,
prohibit_style_tag: bool=False,
htmlhint_config: str=''):
"""
:param enforce_lowercase_tagname:
Enforce the tagnames to be written in lowercase.
For example: If set to ``True``, prefer ``<span><div>`` over
``<SPAN><BR>``.
:param enforce_lowercase_attribute:
Enforce the attribute names to be written in lowercase.
For example: If set to ``True``, prefer
``<img src="test.png" alt="test">`` over
``<img SRC="test.png" ALT="test">``.
:param require_attribute_value_in_double_quotes:
Allow attribute values to be enclosed in double quotes.
For example: If set to ``True``, prefer ``<a href="" title="abc">``
over ``<a href='' title=abc>``.
:param prohibit_empty_value_for_attribute:
Disallow empty values for attributes.
For example: If set to ``True``, prefer
``<input type="button" disabled="disabled">`` over
``<input type="button" disabled>``.
:param prohibit_attribute_duplication:
Disallow defining of the same attribute more than once in
a tag. For example: If set to ``True``, prefer
``<img src="a.png" />`` over ``<img src="a.png" src="b.png" />``.
:param require_doctype_at_beginning:
Enforce the ``<!DOCTYPE>`` declaration at the beginning.
For example: If set to ``True``, prefer ``<!DOCTYPE HTML><html>``
over ``<!--comment--><!DOCTYPE HTML><html>``.
:param enforce_tag_pair:
Enforce the tags to be paired.
:param enforce_self_close_empty_tag:
Enforce the empty tag to be closed by self.
For example: If set to ``True``, prefer ``<br />`` over ``<br>``.
:param require_escaped_special_characters:
Require the special characters to be escaped.
For example: If set to ``True``, prefer
``<span>aaa>bbb<ccc</span>`` over
``<span>aaa>bbb<ccc</span>``.
:param require_unique_attribute_id:
Require the ID attributes to be unique in the document.
For example: If set to ``True``, prefer
``<div id="id1"></div><div id="id2"></div>`` over
``<div id="id1"></div><div id="id1"></div>``.
:param require_title_tag:
Require the ``<title>`` to be present in the ``<head>`` tag.
:param prohibit_script_in_head:
Prohibit the use of the ``<script>`` tag in the ``<head>`` tag.
:param require_alt_attribute:
Require ``alt`` attribute when using images (``img`` tag) and links
(``href`` tag).
For example: If set to ``True``, prefer this::
<img src="test.png" alt="test">
<input type="image" alt="test">
over this::
<img src="test.png">
<input type="image">
:param enforce_id_class_naming_convention:
Possible values are ``underline``, ``dash`` and ``hump``.
Require the ``id`` and ``class`` values to be set according to
the given rules.
For example: If set to ``underline``, prefer
``<div id="aaa_bbb">``.
For example: If set to ``dash``, prefer ``<div id="aaa-bbb">``.
:param prohibit_inline_style:
Disallow the use of inline ``style`` attribute.
For example: If set to ``True``, ``<div style="color:red"></div>``
will raise a warning.
:param require_relative_links_in_href:
If ``True``, enforce relative links in the ``href`` attribute and
if ``False``, enforce absolute links.
:param prohibit_unsafe_characters:
Prohibit the use of unsafe characters in attribute values.
For example: If set to ``True``,
``<li><a href="https://vimeo.com//56931059\u0009">2012</a></li>``
will raise a warning.
:param prohibit_inline_script:
Disallow the use of inline scripts.
For example: If set to ``True``, this will raise a warning::
<img src="test.gif" onclick="alert(1);">
<img src="javascript:alert(1)">
<a href="javascript:alert(1)">test1</a>
:param prohibit_style_tag:
Prohibit the use of ``style`` tag.
For example: If set to ``True``,
``<body><style type="text/css"></style></body>``
will raise a warning.
"""
if htmlhint_config:
return None
else:
options = {
'tagname-lowercase': enforce_lowercase_tagname,
'attr-lowercase': enforce_lowercase_attribute,
'attr-value-double-quotes':
require_attribute_value_in_double_quotes,
'attr-value-not_empty': prohibit_empty_value_for_attribute,
'attr-no-duplication': prohibit_attribute_duplication,
'doctype-first': require_doctype_at_beginning,
'tag-pair': enforce_tag_pair,
'tag-self-close': enforce_self_close_empty_tag,
'spec-char-escape': require_escaped_special_characters,
'id-unique': require_unique_attribute_id,
'title-require': require_title_tag,
'head-script-disabled': prohibit_script_in_head,
'alt-require': require_alt_attribute,
'id-class-value': enforce_id_class_naming_convention,
'inline-style-disabled': prohibit_inline_style,
'attr-unsafe-chars': prohibit_unsafe_characters,
'inline-script-disabled': prohibit_inline_script,
'style-disabled': prohibit_style_tag,
'href-abs-or-rel':
'false' if require_relative_links_in_href is None
else 'rel' if require_relative_links_in_href else 'abs'
}
return json.dumps(options)
@staticmethod
def create_arguments(filename, file, config_file, htmlhint_config: str=''):
"""
:param htmlhint_config:
The path to a custom ``.htmlhintrc`` config file.
"""
return (filename, '--config',
htmlhint_config if htmlhint_config else config_file,
'--format', 'unix')
|
wlamond/scikit-learn | refs/heads/master | sklearn/cross_decomposition/tests/test_pls.py | 42 | import numpy as np
from sklearn.utils.testing import (assert_equal, assert_array_almost_equal,
assert_array_equal, assert_true,
assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_, CCA
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
pls_bysvd.x_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
pls_bysvd.y_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
# x_weights_sign_flip holds columns of 1 or -1, depending on sign flip
# between R and python
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
x_rotations_sign_flip = pls_ca.x_rotations_ / x_rotations
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
y_rotations_sign_flip = pls_ca.y_rotations_ / y_rotations
# x_weights = X.dot(x_rotation)
# Hence R/python sign flip should be the same in x_weight and x_rotation
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
# This test that R / python give the same result up to column
# sign indeterminacy
assert_array_almost_equal(np.abs(x_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
assert_array_almost_equal(np.abs(y_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
x_weights_sign_flip = pls_2.x_weights_ / x_weights
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
x_loadings_sign_flip = pls_2.x_loadings_ / x_loadings
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_weights_sign_flip = pls_2.y_weights_ / y_weights
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_loadings_sign_flip = pls_2.y_loadings_ / y_loadings
# x_loadings[:, i] = Xi.dot(x_weights[:, i]) \forall i
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
x_loadings_sign_flip = pls_ca.x_loadings_ / x_loadings
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
y_loadings_sign_flip = pls_ca.y_loadings_ / y_loadings
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specified number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale_and_stability():
# We test scale=True parameter
# This allows to check numerical stability over platforms as well
d = load_linnerud()
X1 = d.data
Y1 = d.target
# causes X[:, -1].std() to be zero
X1[:, -1] = 1.0
# From bug #2821
# Test with X2, T2 s.t. clf.x_score[:, 1] == 0, clf.y_score[:, 1] == 0
# This test robustness of algorithm when dealing with value close to 0
X2 = np.array([[0., 0., 1.],
[1., 0., 0.],
[2., 2., 2.],
[3., 5., 4.]])
Y2 = np.array([[0.1, -0.2],
[0.9, 1.1],
[6.2, 5.9],
[11.9, 12.3]])
for (X, Y) in [(X1, Y1), (X2, Y2)]:
X_std = X.std(axis=0, ddof=1)
X_std[X_std == 0] = 1
Y_std = Y.std(axis=0, ddof=1)
Y_std[Y_std == 0] = 1
X_s = (X - X.mean(axis=0)) / X_std
Y_s = (Y - Y.mean(axis=0)) / Y_std
for clf in [CCA(), pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X, Y)
clf.set_params(scale=False)
X_s_score, Y_s_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
# Scaling should be idempotent
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
|
glennyonemitsu/MarkupHiveSDK | refs/heads/master | requests/packages/charade/sjisprober.py | 167 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
rainslytherin/ansible | refs/heads/master | lib/ansible/inventory/group.py | 1 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class Group(object):
''' a group of ansible hosts '''
''' inventory Group 基类 '''
# 使用__slots__ 限制类属性的访问
__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None):
'''
Group基类,包含一下属性:
depth,Group深度,添加子group的时候会进行深度探测
name,组名
hosts,该组下的host对象列表
vars,该组的组变量
child_groups,子组,组允许嵌套,子组也允许有子组,在添加子组的时候要同时在父组内添加子组,并在子组内添加父组。
parent_groups,父组,既该组是哪一组的子组,在添加子组的时候要同时在父组内添加子组,并在子组内添加父组。
_host_cache,用来缓存host数据
'''
self.depth = 0
self.name = name
self.hosts = []
self.vars = {}
self.child_groups = []
self.parent_groups = []
self._hosts_cache = None
#self.clear_hosts_cache()
if self.name is None:
raise Exception("group name is required")
def add_child_group(self, group):
'''
添加子group到当前group,同时探测子组的深度,修改当前group的深度,并在子组中将该组添加成父组。
'''
if self == group:
raise Exception("can't add group to itself")
# don't add if it's already there
if not group in self.child_groups:
self.child_groups.append(group)
# update the depth of the child
group.depth = max([self.depth+1, group.depth])
# update the depth of the grandchildren
group._check_children_depth()
# now add self to child's parent_groups list, but only if there
# isn't already a group with the same name
# 在本组中添加完成子组后,需要在子组中添加该组为父组。
if not self.name in [g.name for g in group.parent_groups]:
group.parent_groups.append(self)
self.clear_hosts_cache() # 清理缓存
def _check_children_depth(self):
for group in self.child_groups:
group.depth = max([self.depth+1, group.depth])
group._check_children_depth()
def add_host(self, host):
''' 添加新的host对象到该group,同时在该host对象中设置该host属于哪个组,一个host可能通过子组的方式属于多个组。'''
self.hosts.append(host)
host.add_group(self)
self.clear_hosts_cache()
def set_variable(self, key, value):
# 设置变量
self.vars[key] = value
def clear_hosts_cache(self):
# 清理host缓存,同时清理父组的host缓存,以保证子组在host变更的同时,父组也会变更。
self._hosts_cache = None
for g in self.parent_groups:
g.clear_hosts_cache()
def get_hosts(self):
# 获取host对象,同时进行缓存
if self._hosts_cache is None:
self._hosts_cache = self._get_hosts()
return self._hosts_cache
def _get_hosts(self):
# 返回当前group的所有host对象列表,去重
hosts = [] # hosts为最终返回的host对象列表
seen = {} # seen用来判断是否重复处理
for kid in self.child_groups:
kid_hosts = kid.get_hosts()
for kk in kid_hosts:
if kk not in seen:
seen[kk] = 1
hosts.append(kk)
for mine in self.hosts:
if mine not in seen:
seen[mine] = 1
hosts.append(mine)
return hosts
def get_variables(self):
# 获取变量的副本
return self.vars.copy()
def _get_ancestors(self):
# 嵌套获取所有父group对象
results = {}
for g in self.parent_groups:
results[g.name] = g
results.update(g._get_ancestors())
return results
def get_ancestors(self):
# 获取所有父group对象
return self._get_ancestors().values()
|
igemsoftware/SYSU-Software2013 | refs/heads/master | project/Python27/Lib/site-packages/pythonwin/pywin/framework/editor/color/coloreditor.py | 17 | # Color Editor originally by Neil Hodgson, but restructured by mh to integrate
# even tighter into Pythonwin.
import win32ui
import win32con
import win32api
import sys
import pywin.scintilla.keycodes
from pywin.scintilla import bindings
from pywin.framework.editor import GetEditorOption, SetEditorOption, GetEditorFontOption, SetEditorFontOption, defaultCharacterFormat
#from pywin.framework.editor import EditorPropertyPage
MSG_CHECK_EXTERNAL_FILE = win32con.WM_USER+1999 ## WARNING: Duplicated in document.py and editor.py
# Define a few common markers
MARKER_BOOKMARK = 0
MARKER_BREAKPOINT = 1
MARKER_CURRENT = 2
from pywin.debugger import dbgcon
from pywin.scintilla.document import CScintillaDocument
from pywin.framework.editor.document import EditorDocumentBase
from pywin.scintilla import scintillacon # For the marker definitions
import pywin.scintilla.view
class SyntEditDocument(EditorDocumentBase):
"A SyntEdit document. "
def OnDebuggerStateChange(self, state):
self._ApplyOptionalToViews("OnDebuggerStateChange", state)
def HookViewNotifications(self, view):
EditorDocumentBase.HookViewNotifications(self, view)
view.SCISetUndoCollection(1)
def FinalizeViewCreation(self, view):
EditorDocumentBase.FinalizeViewCreation(self, view)
if view==self.GetFirstView():
self.GetDocTemplate().CheckIDLEMenus(view.idle)
SyntEditViewParent=pywin.scintilla.view.CScintillaView
class SyntEditView(SyntEditViewParent):
"A view of a SyntEdit. Obtains data from document."
def __init__(self, doc):
SyntEditViewParent.__init__(self, doc)
self.bCheckingFile = 0
def OnInitialUpdate(self):
SyntEditViewParent.OnInitialUpdate(self)
self.HookMessage(self.OnRClick,win32con.WM_RBUTTONDOWN)
for id in [win32ui.ID_VIEW_FOLD_COLLAPSE, win32ui.ID_VIEW_FOLD_COLLAPSE_ALL,
win32ui.ID_VIEW_FOLD_EXPAND, win32ui.ID_VIEW_FOLD_EXPAND_ALL]:
self.HookCommand(self.OnCmdViewFold, id)
self.HookCommandUpdate(self.OnUpdateViewFold, id)
self.HookCommand(self.OnCmdViewFoldTopLevel, win32ui.ID_VIEW_FOLD_TOPLEVEL)
# Define the markers
# self.SCIMarkerDeleteAll()
self.SCIMarkerDefineAll(MARKER_BOOKMARK, scintillacon.SC_MARK_ROUNDRECT, win32api.RGB(0x0, 0x0, 0x0), win32api.RGB(0, 0xff, 0xff))
self.SCIMarkerDefine(MARKER_CURRENT, scintillacon.SC_MARK_ARROW)
self.SCIMarkerSetBack(MARKER_CURRENT, win32api.RGB(0xff, 0xff, 0x00))
# Define the folding markers
if 1: #traditional markers
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDEROPEN, scintillacon.SC_MARK_MINUS, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDER, scintillacon.SC_MARK_PLUS, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDERSUB, scintillacon.SC_MARK_EMPTY, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDERTAIL, scintillacon.SC_MARK_EMPTY, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDEREND, scintillacon.SC_MARK_EMPTY, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDEROPENMID, scintillacon.SC_MARK_EMPTY, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDERMIDTAIL, scintillacon.SC_MARK_EMPTY, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
else: # curved markers
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDEROPEN, scintillacon.SC_MARK_CIRCLEMINUS, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDER, scintillacon.SC_MARK_CIRCLEPLUS, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDERSUB, scintillacon.SC_MARK_VLINE, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDERTAIL, scintillacon.SC_MARK_LCORNERCURVE, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDEREND, scintillacon.SC_MARK_CIRCLEPLUSCONNECTED, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDEROPENMID, scintillacon.SC_MARK_CIRCLEMINUSCONNECTED, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
self.SCIMarkerDefineAll(scintillacon.SC_MARKNUM_FOLDERMIDTAIL, scintillacon.SC_MARK_TCORNERCURVE, win32api.RGB(0xff, 0xff, 0xff), win32api.RGB(0, 0, 0))
self.SCIMarkerDefine(MARKER_BREAKPOINT, scintillacon.SC_MARK_CIRCLE)
# Marker background depends on debugger state
self.SCIMarkerSetFore(MARKER_BREAKPOINT, win32api.RGB(0x0, 0, 0))
# Get the current debugger state.
try:
import pywin.debugger
if pywin.debugger.currentDebugger is None:
state = dbgcon.DBGSTATE_NOT_DEBUGGING
else:
state = pywin.debugger.currentDebugger.debuggerState
except ImportError:
state = dbgcon.DBGSTATE_NOT_DEBUGGING
self.OnDebuggerStateChange(state)
def _GetSubConfigNames(self):
return ["editor"] # Allow [Keys:Editor] sections to be specific to us
def DoConfigChange(self):
SyntEditViewParent.DoConfigChange(self)
tabSize = GetEditorOption("Tab Size", 4, 2)
indentSize = GetEditorOption("Indent Size", 4, 2)
bUseTabs = GetEditorOption("Use Tabs", 0)
bSmartTabs = GetEditorOption("Smart Tabs", 1)
ext = self.idle.IDLEExtension("AutoIndent") # Required extension.
self.SCISetViewWS( GetEditorOption("View Whitespace", 0) )
self.SCISetViewEOL( GetEditorOption("View EOL", 0) )
self.SCISetIndentationGuides( GetEditorOption("View Indentation Guides", 0) )
if GetEditorOption("Right Edge Enabled", 0):
mode = scintillacon.EDGE_BACKGROUND
else:
mode = scintillacon.EDGE_NONE
self.SCISetEdgeMode(mode)
self.SCISetEdgeColumn( GetEditorOption("Right Edge Column", 75) )
self.SCISetEdgeColor( GetEditorOption("Right Edge Color", win32api.RGB(0xef, 0xef, 0xef)))
width = GetEditorOption("Marker Margin Width", 16)
self.SCISetMarginWidthN(1, width)
width = GetEditorOption("Fold Margin Width", 12)
self.SCISetMarginWidthN(2, width)
width = GetEditorOption("Line Number Margin Width", 0)
self.SCISetMarginWidthN(0, width)
self.bFolding = GetEditorOption("Enable Folding", 1)
fold_flags = 0
self.SendScintilla(scintillacon.SCI_SETMODEVENTMASK, scintillacon.SC_MOD_CHANGEFOLD);
if self.bFolding:
if GetEditorOption("Fold Lines", 1):
fold_flags = 16
self.SCISetProperty("fold", self.bFolding)
self.SCISetFoldFlags(fold_flags)
tt_color = GetEditorOption("Tab Timmy Color", win32api.RGB(0xff, 0, 0))
self.SendScintilla(scintillacon.SCI_INDICSETFORE, 1, tt_color)
tt_use = GetEditorOption("Use Tab Timmy", 1)
if tt_use:
self.SCISetProperty("tab.timmy.whinge.level", "1")
# Auto-indent has very complicated behaviour. In a nutshell, the only
# way to get sensible behaviour from it is to ensure tabwidth != indentsize.
# Further, usetabs will only ever go from 1->0, never 0->1.
# This is _not_ the behaviour Pythonwin wants:
# * Tab width is arbitary, so should have no impact on smarts.
# * bUseTabs setting should reflect how new files are created, and
# if Smart Tabs disabled, existing files are edited
# * If "Smart Tabs" is enabled, bUseTabs should have no bearing
# for existing files (unless of course no context can be determined)
#
# So for smart tabs we configure the widget with completely dummy
# values (ensuring tabwidth != indentwidth), ask it to guess, then
# look at the values it has guessed, and re-configure
if bSmartTabs:
ext.config(usetabs=1, tabwidth=5, indentwidth=4)
ext.set_indentation_params(1)
if ext.indentwidth==5:
# Either 5 literal spaces, or a single tab character. Assume a tab
usetabs = 1
indentwidth = tabSize
else:
# Either Indented with spaces, and indent size has been guessed or
# an empty file (or no context found - tough!)
if self.GetTextLength()==0: # emtpy
usetabs = bUseTabs
indentwidth = indentSize
else: # guessed.
indentwidth = ext.indentwidth
usetabs = 0
# Tab size can never be guessed - set at user preference.
ext.config(usetabs=usetabs, indentwidth=indentwidth, tabwidth=tabSize)
else:
# Dont want smart-tabs - just set the options!
ext.config(usetabs=bUseTabs, tabwidth=tabSize, indentwidth=indentSize)
self.SCISetIndent(indentSize)
self.SCISetTabWidth(tabSize)
def OnDebuggerStateChange(self, state):
if state == dbgcon.DBGSTATE_NOT_DEBUGGING:
# Indicate breakpoints arent really usable.
# Not quite white - useful when no marker margin, so set as background color.
self.SCIMarkerSetBack(MARKER_BREAKPOINT, win32api.RGB(0xef, 0xef, 0xef))
else:
# A light-red, so still readable when no marker margin.
self.SCIMarkerSetBack(MARKER_BREAKPOINT, win32api.RGB(0xff, 0x80, 0x80))
def HookDocumentHandlers(self):
SyntEditViewParent.HookDocumentHandlers(self)
self.HookMessage(self.OnCheckExternalDocumentUpdated,MSG_CHECK_EXTERNAL_FILE)
def HookHandlers(self):
SyntEditViewParent.HookHandlers(self)
self.HookMessage(self.OnSetFocus, win32con.WM_SETFOCUS)
def _PrepareUserStateChange(self):
return self.GetSel(), self.GetFirstVisibleLine()
def _EndUserStateChange(self, info):
scrollOff = info[1] - self.GetFirstVisibleLine()
if scrollOff:
self.LineScroll(scrollOff)
# Make sure we dont reset the cursor beyond the buffer.
max = self.GetTextLength()
newPos = min(info[0][0], max), min(info[0][1], max)
self.SetSel(newPos)
#######################################
# The Windows Message or Notify handlers.
#######################################
def OnMarginClick(self, std, extra):
notify = self.SCIUnpackNotifyMessage(extra)
if notify.margin==2: # Our fold margin
line_click = self.LineFromChar(notify.position)
# max_line = self.GetLineCount()
if self.SCIGetFoldLevel(line_click) & scintillacon.SC_FOLDLEVELHEADERFLAG:
# If a fold point.
self.SCIToggleFold(line_click)
return 1
def OnSetFocus(self,msg):
# Even though we use file change notifications, we should be very sure about it here.
self.OnCheckExternalDocumentUpdated(msg)
return 1
def OnCheckExternalDocumentUpdated(self, msg):
if self.bCheckingFile: return
self.bCheckingFile = 1
self.GetDocument().CheckExternalDocumentUpdated()
self.bCheckingFile = 0
def OnRClick(self,params):
menu = win32ui.CreatePopupMenu()
self.AppendMenu(menu, "&Locate module", "LocateModule")
self.AppendMenu(menu, flags=win32con.MF_SEPARATOR)
self.AppendMenu(menu, "&Undo", "EditUndo")
self.AppendMenu(menu, '&Redo', 'EditRedo')
self.AppendMenu(menu, flags=win32con.MF_SEPARATOR)
self.AppendMenu(menu, 'Cu&t', 'EditCut')
self.AppendMenu(menu, '&Copy', 'EditCopy')
self.AppendMenu(menu, '&Paste', 'EditPaste')
self.AppendMenu(menu, flags=win32con.MF_SEPARATOR)
self.AppendMenu(menu, '&Select all', 'EditSelectAll')
self.AppendMenu(menu, 'View &Whitespace', 'ViewWhitespace', checked=self.SCIGetViewWS())
self.AppendMenu(menu, "&Fixed Font", "ViewFixedFont", checked = self._GetColorizer().bUseFixed)
self.AppendMenu(menu, flags=win32con.MF_SEPARATOR)
self.AppendMenu(menu, "&Goto line...", "GotoLine")
submenu = win32ui.CreatePopupMenu()
newitems = self.idle.GetMenuItems("edit")
for text, event in newitems:
self.AppendMenu(submenu, text, event)
flags=win32con.MF_STRING|win32con.MF_ENABLED|win32con.MF_POPUP
menu.AppendMenu(flags, submenu.GetHandle(), "&Source code")
flags = win32con.TPM_LEFTALIGN|win32con.TPM_LEFTBUTTON|win32con.TPM_RIGHTBUTTON
menu.TrackPopupMenu(params[5], flags, self)
return 0
def OnCmdViewFold(self, cid, code): # Handle the menu command
if cid == win32ui.ID_VIEW_FOLD_EXPAND_ALL:
self.FoldExpandAllEvent(None)
elif cid == win32ui.ID_VIEW_FOLD_EXPAND:
self.FoldExpandEvent(None)
elif cid == win32ui.ID_VIEW_FOLD_COLLAPSE_ALL:
self.FoldCollapseAllEvent(None)
elif cid == win32ui.ID_VIEW_FOLD_COLLAPSE:
self.FoldCollapseEvent(None)
else:
print "Unknown collapse/expand ID"
def OnUpdateViewFold(self, cmdui): # Update the tick on the UI.
if not self.bFolding:
cmdui.Enable(0)
return
id = cmdui.m_nID
if id in [win32ui.ID_VIEW_FOLD_EXPAND_ALL, win32ui.ID_VIEW_FOLD_COLLAPSE_ALL]:
cmdui.Enable()
else:
enable = 0
lineno = self.LineFromChar(self.GetSel()[0])
foldable = self.SCIGetFoldLevel(lineno) & scintillacon.SC_FOLDLEVELHEADERFLAG
is_expanded = self.SCIGetFoldExpanded(lineno)
if id == win32ui.ID_VIEW_FOLD_EXPAND:
if foldable and not is_expanded:
enable = 1
elif id == win32ui.ID_VIEW_FOLD_COLLAPSE:
if foldable and is_expanded:
enable = 1
cmdui.Enable(enable)
def OnCmdViewFoldTopLevel(self, cid, code): # Handle the menu command
self.FoldTopLevelEvent(None)
#######################################
# The Events
#######################################
def ToggleBookmarkEvent(self, event, pos = -1):
"""Toggle a bookmark at the specified or current position
"""
if pos==-1:
pos, end = self.GetSel()
startLine = self.LineFromChar(pos)
self.GetDocument().MarkerToggle(startLine+1, MARKER_BOOKMARK)
return 0
def GotoNextBookmarkEvent(self, event, fromPos=-1):
""" Move to the next bookmark
"""
if fromPos==-1:
fromPos, end = self.GetSel()
startLine = self.LineFromChar(fromPos)+1 # Zero based line to start
nextLine = self.GetDocument().MarkerGetNext(startLine+1, MARKER_BOOKMARK)-1
if nextLine<0:
nextLine = self.GetDocument().MarkerGetNext(0, MARKER_BOOKMARK)-1
if nextLine <0 or nextLine == startLine-1:
win32api.MessageBeep()
else:
self.SCIEnsureVisible(nextLine)
self.SCIGotoLine(nextLine)
return 0
def TabKeyEvent(self, event):
"""Insert an indent. If no selection, a single indent, otherwise a block indent
"""
# Handle auto-complete first.
if self.SCIAutoCActive():
self.SCIAutoCComplete()
return 0
# Call the IDLE event.
return self.bindings.fire("<<smart-indent>>", event)
def EnterKeyEvent(self, event):
"""Handle the enter key with special handling for auto-complete
"""
# Handle auto-complete first.
if self.SCIAutoCActive():
self.SCIAutoCComplete()
self.SCIAutoCCancel()
# Call the IDLE event.
return self.bindings.fire("<<newline-and-indent>>", event)
def ShowInteractiveWindowEvent(self, event):
import pywin.framework.interact
pywin.framework.interact.ShowInteractiveWindow()
def FoldTopLevelEvent(self, event = None):
if not self.bFolding:
return 1
win32ui.DoWaitCursor(1)
try:
self.Colorize()
maxLine = self.GetLineCount()
# Find the first line, and check out its state.
for lineSeek in xrange(maxLine):
if self.SCIGetFoldLevel(lineSeek) & scintillacon.SC_FOLDLEVELHEADERFLAG:
expanding = not self.SCIGetFoldExpanded(lineSeek)
break
else:
# no folds here!
return
for lineSeek in xrange(lineSeek, maxLine):
level = self.SCIGetFoldLevel(lineSeek)
level_no = level & scintillacon.SC_FOLDLEVELNUMBERMASK - scintillacon.SC_FOLDLEVELBASE
is_header = level & scintillacon.SC_FOLDLEVELHEADERFLAG
# print lineSeek, level_no, is_header
if level_no == 0 and is_header:
if (expanding and not self.SCIGetFoldExpanded(lineSeek)) or \
(not expanding and self.SCIGetFoldExpanded(lineSeek)):
self.SCIToggleFold(lineSeek)
finally:
win32ui.DoWaitCursor(-1)
def FoldExpandSecondLevelEvent(self, event):
if not self.bFolding:
return 1
win32ui.DoWaitCursor(1)
## I think this is needed since Scintilla may not have
## already formatted parts of file outside visible window.
self.Colorize()
levels=[scintillacon.SC_FOLDLEVELBASE]
## Scintilla's level number is based on amount of whitespace indentation
for lineno in xrange(self.GetLineCount()):
level = self.SCIGetFoldLevel(lineno)
if not level & scintillacon.SC_FOLDLEVELHEADERFLAG:
continue
curr_level = level & scintillacon.SC_FOLDLEVELNUMBERMASK
if curr_level > levels[-1]:
levels.append(curr_level)
try:
level_ind=levels.index(curr_level)
except ValueError:
## probably syntax error in source file, bail
break
levels=levels[:level_ind+1]
if level_ind == 1 and not self.SCIGetFoldExpanded(lineno):
self.SCIToggleFold(lineno)
win32ui.DoWaitCursor(-1)
def FoldCollapseSecondLevelEvent(self, event):
if not self.bFolding:
return 1
win32ui.DoWaitCursor(1)
## I think this is needed since Scintilla may not have
## already formatted parts of file outside visible window.
self.Colorize()
levels=[scintillacon.SC_FOLDLEVELBASE]
## Scintilla's level number is based on amount of whitespace indentation
for lineno in xrange(self.GetLineCount()):
level = self.SCIGetFoldLevel(lineno)
if not level & scintillacon.SC_FOLDLEVELHEADERFLAG:
continue
curr_level = level & scintillacon.SC_FOLDLEVELNUMBERMASK
if curr_level > levels[-1]:
levels.append(curr_level)
try:
level_ind=levels.index(curr_level)
except ValueError:
## probably syntax error in source file, bail
break
levels=levels[:level_ind+1]
if level_ind == 1 and self.SCIGetFoldExpanded(lineno):
self.SCIToggleFold(lineno)
win32ui.DoWaitCursor(-1)
def FoldExpandEvent(self, event):
if not self.bFolding:
return 1
win32ui.DoWaitCursor(1)
lineno = self.LineFromChar(self.GetSel()[0])
if self.SCIGetFoldLevel(lineno) & scintillacon.SC_FOLDLEVELHEADERFLAG and \
not self.SCIGetFoldExpanded(lineno):
self.SCIToggleFold(lineno)
win32ui.DoWaitCursor(-1)
def FoldExpandAllEvent(self, event):
if not self.bFolding:
return 1
win32ui.DoWaitCursor(1)
for lineno in xrange(0, self.GetLineCount()):
if self.SCIGetFoldLevel(lineno) & scintillacon.SC_FOLDLEVELHEADERFLAG and \
not self.SCIGetFoldExpanded(lineno):
self.SCIToggleFold(lineno)
win32ui.DoWaitCursor(-1)
def FoldCollapseEvent(self, event):
if not self.bFolding:
return 1
win32ui.DoWaitCursor(1)
lineno = self.LineFromChar(self.GetSel()[0])
if self.SCIGetFoldLevel(lineno) & scintillacon.SC_FOLDLEVELHEADERFLAG and \
self.SCIGetFoldExpanded(lineno):
self.SCIToggleFold(lineno)
win32ui.DoWaitCursor(-1)
def FoldCollapseAllEvent(self, event):
if not self.bFolding:
return 1
win32ui.DoWaitCursor(1)
self.Colorize()
for lineno in xrange(0, self.GetLineCount()):
if self.SCIGetFoldLevel(lineno) & scintillacon.SC_FOLDLEVELHEADERFLAG and \
self.SCIGetFoldExpanded(lineno):
self.SCIToggleFold(lineno)
win32ui.DoWaitCursor(-1)
from pywin.framework.editor.frame import EditorFrame
class SplitterFrame(EditorFrame):
def OnCreate(self, cs):
self.HookCommand(self.OnWindowSplit, win32ui.ID_WINDOW_SPLIT)
return 1
def OnWindowSplit(self, id, code):
self.GetDlgItem(win32ui.AFX_IDW_PANE_FIRST).DoKeyboardSplit()
return 1
from pywin.framework.editor.template import EditorTemplateBase
class SyntEditTemplate(EditorTemplateBase):
def __init__(self, res=win32ui.IDR_TEXTTYPE, makeDoc=None, makeFrame=None, makeView=None):
if makeDoc is None: makeDoc = SyntEditDocument
if makeView is None: makeView = SyntEditView
if makeFrame is None: makeFrame = SplitterFrame
self.bSetMenus = 0
EditorTemplateBase.__init__(self, res, makeDoc, makeFrame, makeView)
def CheckIDLEMenus(self, idle):
if self.bSetMenus: return
self.bSetMenus = 1
submenu = win32ui.CreatePopupMenu()
newitems = idle.GetMenuItems("edit")
flags=win32con.MF_STRING|win32con.MF_ENABLED
for text, event in newitems:
id = bindings.event_to_commands.get(event)
if id is not None:
keyname = pywin.scintilla.view.configManager.get_key_binding( event, ["editor"] )
if keyname is not None:
text = text + "\t" + keyname
submenu.AppendMenu(flags, id, text)
mainMenu = self.GetSharedMenu()
editMenu = mainMenu.GetSubMenu(1)
editMenu.AppendMenu(win32con.MF_SEPARATOR, 0, "")
editMenu.AppendMenu(win32con.MF_STRING | win32con.MF_POPUP | win32con.MF_ENABLED, submenu.GetHandle(), "&Source Code")
def _CreateDocTemplate(self, resourceId):
return win32ui.CreateDocTemplate(resourceId)
def CreateWin32uiDocument(self):
return self.DoCreateDoc()
def GetPythonPropertyPages(self):
"""Returns a list of property pages
"""
from pywin.scintilla import configui
return EditorTemplateBase.GetPythonPropertyPages(self) + [configui.ScintillaFormatPropertyPage()]
# For debugging purposes, when this module may be reloaded many times.
try:
win32ui.GetApp().RemoveDocTemplate(editorTemplate)
except NameError:
pass
editorTemplate = SyntEditTemplate()
win32ui.GetApp().AddDocTemplate(editorTemplate)
|
Subsets and Splits