repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
sensidev/drf-requests-jwt | drf_requests_jwt/services.py | 1 | 4894 | """
Services.
"""
import logging
import requests
from urllib.parse import urlparse, parse_qs
from drf_requests_jwt import settings
from drf_requests_jwt.backends.utils import build_url
logger = logging.getLogger(__name__)
class HttpRequestService(object):
obtain_jwt_allowed_fail_attempts = settings.DEFAULTS.get('OBTAIN_JWT_ALLOWED_FAIL_ATTEMPTS')
cache_backend_class = settings.DEFAULTS.get('CACHE_BACKEND_CLASS')
def __init__(self, params=None):
super().__init__()
self.cache_backend = self._get_cache_backend()
self.params = params or {}
self.params.update(self._get_params())
self.headers = self._get_headers()
self.url = self._get_url()
self.session = requests.Session()
self.obtain_jwt_fail_attempts = 0
def _get_cache_backend(self):
resolved_backend_class = settings.import_from_string(self.cache_backend_class)
return resolved_backend_class(self._get_jwt_cache_key())
def _get_base_url(self):
raise NotImplementedError
def _get_url_path(self):
raise NotImplementedError
def _get_url(self):
return build_url(base_url=self._get_base_url(), path=self._get_url_path())
def _get_jwt_login_url_path(self):
raise NotImplementedError
def _get_jwt_login_url(self):
return build_url(base_url=self._get_base_url(), path=self._get_jwt_login_url_path())
def _get_username(self):
raise NotImplementedError
def _get_password(self):
raise NotImplementedError
def _get_params(self):
return {}
def _get_headers(self):
return {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {token}'.format(token=self._get_jwt_token_from_cache())
}
def get_results_from_all_pages(self):
next_url = self.url
result_list = []
while True:
url_parse = urlparse(next_url)
self.params.update(parse_qs(url_parse.query))
next_url = '{scheme}://{netloc}{path}'.format(
scheme=url_parse.scheme, netloc=url_parse.netloc, path=url_parse.path
)
response = self.session.get(next_url, headers=self.headers, params=self.params)
logger.debug('Request url: {} with params {}'.format(next_url, self.params))
if response.status_code == 200:
response_json = response.json()
next_url = response_json.get('next')
result_list.extend(response_json.get('results', []))
elif response.status_code == 401:
if self._should_update_authorization_header():
self.update_authorization_header()
else:
break
else:
raise Exception('Wrong response status code: {code}, content: {content}'.format(
code=response.status_code,
content=response.content
))
if not bool(next_url):
break
return result_list
def write_results_from_all_pages_to_file(self, filename):
results = self.get_results_from_all_pages()
with open(filename, 'w') as output:
json.dump(results, output)
def update_authorization_header(self):
token = self._get_jwt_token()
self.headers['Authorization'] = 'Bearer {token}'.format(token=token)
def get_deserialized_data(self):
raise NotImplementedError
def _get_jwt_token(self):
payload = {
'username': self._get_username(),
'password': self._get_password()
}
url = self._get_jwt_login_url()
logger.debug('Request url: {}'.format(url))
response = self.session.post(url, data=payload)
if response.status_code == 200:
response_dict = response.json()
token = response_dict.get('access')
self._set_jwt_token_to_cache(token)
logger.debug('Received a fresh JWT token')
return token
else:
self.obtain_jwt_fail_attempts += 1
logger.warning('Attempt to get a JWT token failed')
raise Exception('Wrong response status code: {code}, content: {content}'.format(
code=response.status_code,
content=response.content
))
def _should_update_authorization_header(self):
return self.obtain_jwt_fail_attempts <= self.obtain_jwt_allowed_fail_attempts
def _set_jwt_token_to_cache(self, token):
self.cache_backend.set_jwt(token)
def _get_jwt_token_from_cache(self):
return self.cache_backend.get_jwt()
def _get_jwt_cache_key(self):
return 'jwt-{url}-{username}'.format(url=self._get_base_url(), username=self._get_username())
| mit | -6,559,862,687,840,582,000 | 31.410596 | 101 | 0.596036 | false |
UrusTeam/android_ndk_toolchain_cross | lib/python2.7/lib2to3/fixes/fix_sys_exc.py | 327 | 1039 | """Fixer for sys.exc_{type, value, traceback}
sys.exc_type -> sys.exc_info()[0]
sys.exc_value -> sys.exc_info()[1]
sys.exc_traceback -> sys.exc_info()[2]
"""
# By Jeff Balogh and Benjamin Peterson
# Local imports
from .. import fixer_base
from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms
class FixSysExc(fixer_base.BaseFix):
# This order matches the ordering of sys.exc_info().
exc_info = [u"exc_type", u"exc_value", u"exc_traceback"]
BM_compatible = True
PATTERN = """
power< 'sys' trailer< dot='.' attribute=(%s) > >
""" % '|'.join("'%s'" % e for e in exc_info)
def transform(self, node, results):
sys_attr = results["attribute"][0]
index = Number(self.exc_info.index(sys_attr.value))
call = Call(Name(u"exc_info"), prefix=sys_attr.prefix)
attr = Attr(Name(u"sys"), call)
attr[1].children[0].prefix = results["dot"].prefix
attr.append(Subscript(index))
return Node(syms.power, attr, prefix=node.prefix)
| gpl-2.0 | 8,030,890,179,667,635,000 | 33.633333 | 72 | 0.616939 | false |
MarishaYasko/interactive-stories-stands | InteractiveStands/Lib/copyreg.py | 165 | 6833 | """Helper to provide extensibility for pickle.
This is only useful to add pickle support for extension types defined in
C, not for instances of user-defined classes.
"""
__all__ = ["pickle", "constructor",
"add_extension", "remove_extension", "clear_extension_cache"]
dispatch_table = {}
def pickle(ob_type, pickle_function, constructor_ob=None):
if not callable(pickle_function):
raise TypeError("reduction functions must be callable")
dispatch_table[ob_type] = pickle_function
# The constructor_ob function is a vestige of safe for unpickling.
# There is no reason for the caller to pass it anymore.
if constructor_ob is not None:
constructor(constructor_ob)
def constructor(object):
if not callable(object):
raise TypeError("constructors must be callable")
# Example: provide pickling support for complex numbers.
try:
complex
except NameError:
pass
else:
def pickle_complex(c):
return complex, (c.real, c.imag)
pickle(complex, pickle_complex, complex)
# Support for pickling new-style objects
def _reconstructor(cls, base, state):
if base is object:
obj = object.__new__(cls)
else:
obj = base.__new__(cls, state)
if base.__init__ != object.__init__:
base.__init__(obj, state)
return obj
_HEAPTYPE = 1<<9
# Python code for object.__reduce_ex__ for protocols 0 and 1
def _reduce_ex(self, proto):
assert proto < 2
for base in self.__class__.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
else:
base = object # not really reachable
if base is object:
state = None
else:
if base is self.__class__:
raise TypeError("can't pickle %s objects" % base.__name__)
state = base(self)
args = (self.__class__, base, state)
try:
getstate = self.__getstate__
except AttributeError:
if getattr(self, "__slots__", None):
raise TypeError("a class that defines __slots__ without "
"defining __getstate__ cannot be pickled")
try:
dict = self.__dict__
except AttributeError:
dict = None
else:
dict = getstate()
if dict:
return _reconstructor, args, dict
else:
return _reconstructor, args
# Helper for __reduce_ex__ protocol 2
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def __newobj_ex__(cls, args, kwargs):
"""Used by pickle protocol 4, instead of __newobj__ to allow classes with
keyword-only arguments to be pickled correctly.
"""
return cls.__new__(cls, *args, **kwargs)
def _slotnames(cls):
"""Return a list of slot names for a given class.
This needs to find slots defined by the class and its bases, so we
can't simply return the __slots__ attribute. We must walk down
the Method Resolution Order and concatenate the __slots__ of each
class found there. (This assumes classes don't modify their
__slots__ attribute to misrepresent their slots after the class is
defined.)
"""
# Get the value from a cache in the class if possible
names = cls.__dict__.get("__slotnames__")
if names is not None:
return names
# Not cached -- calculate the value
names = []
if not hasattr(cls, "__slots__"):
# This class has no slots
pass
else:
# Slots found -- gather slot names from all base classes
for c in cls.__mro__:
if "__slots__" in c.__dict__:
slots = c.__dict__['__slots__']
# if class has a single slot, it can be given as a string
if isinstance(slots, str):
slots = (slots,)
for name in slots:
# special descriptors
if name in ("__dict__", "__weakref__"):
continue
# mangled names
elif name.startswith('__') and not name.endswith('__'):
names.append('_%s%s' % (c.__name__, name))
else:
names.append(name)
# Cache the outcome in the class if at all possible
try:
cls.__slotnames__ = names
except:
pass # But don't die if we can't
return names
# A registry of extension codes. This is an ad-hoc compression
# mechanism. Whenever a global reference to <module>, <name> is about
# to be pickled, the (<module>, <name>) tuple is looked up here to see
# if it is a registered extension code for it. Extension codes are
# universal, so that the meaning of a pickle does not depend on
# context. (There are also some codes reserved for local use that
# don't have this restriction.) Codes are positive ints; 0 is
# reserved.
_extension_registry = {} # key -> code
_inverted_registry = {} # code -> key
_extension_cache = {} # code -> object
# Don't ever rebind those names: pickling grabs a reference to them when
# it's initialized, and won't see a rebinding.
def add_extension(module, name, code):
"""Register an extension code."""
code = int(code)
if not 1 <= code <= 0x7fffffff:
raise ValueError("code out of range")
key = (module, name)
if (_extension_registry.get(key) == code and
_inverted_registry.get(code) == key):
return # Redundant registrations are benign
if key in _extension_registry:
raise ValueError("key %s is already registered with code %s" %
(key, _extension_registry[key]))
if code in _inverted_registry:
raise ValueError("code %s is already in use for key %s" %
(code, _inverted_registry[code]))
_extension_registry[key] = code
_inverted_registry[code] = key
def remove_extension(module, name, code):
"""Unregister an extension code. For testing only."""
key = (module, name)
if (_extension_registry.get(key) != code or
_inverted_registry.get(code) != key):
raise ValueError("key %s is not registered with code %s" %
(key, code))
del _extension_registry[key]
del _inverted_registry[code]
if code in _extension_cache:
del _extension_cache[code]
def clear_extension_cache():
_extension_cache.clear()
# Standard extension code assignments
# Reserved ranges
# First Last Count Purpose
# 1 127 127 Reserved for Python standard library
# 128 191 64 Reserved for Zope
# 192 239 48 Reserved for 3rd parties
# 240 255 16 Reserved for private use (will never be assigned)
# 256 Inf Inf Reserved for future assignment
# Extension codes are assigned by the Python Software Foundation.
| mit | 8,624,719,848,230,675,000 | 32.826733 | 77 | 0.604127 | false |
catapult-project/catapult-csm | trace_processor/third_party/cloudstorage/common.py | 129 | 12326 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Helpers shared by cloudstorage_stub and cloudstorage_api."""
__all__ = ['CS_XML_NS',
'CSFileStat',
'dt_str_to_posix',
'local_api_url',
'LOCAL_GCS_ENDPOINT',
'local_run',
'get_access_token',
'get_stored_content_length',
'get_metadata',
'GCSFileStat',
'http_time_to_posix',
'memory_usage',
'posix_time_to_http',
'posix_to_dt_str',
'set_access_token',
'validate_options',
'validate_bucket_name',
'validate_bucket_path',
'validate_file_path',
]
import calendar
import datetime
from email import utils as email_utils
import logging
import os
import re
try:
from google.appengine.api import runtime
except ImportError:
from google.appengine.api import runtime
_GCS_BUCKET_REGEX_BASE = r'[a-z0-9\.\-_]{3,63}'
_GCS_BUCKET_REGEX = re.compile(_GCS_BUCKET_REGEX_BASE + r'$')
_GCS_BUCKET_PATH_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'$')
_GCS_PATH_PREFIX_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'.*')
_GCS_FULLPATH_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'/.*')
_GCS_METADATA = ['x-goog-meta-',
'content-disposition',
'cache-control',
'content-encoding']
_GCS_OPTIONS = _GCS_METADATA + ['x-goog-acl']
CS_XML_NS = 'http://doc.s3.amazonaws.com/2006-03-01'
LOCAL_GCS_ENDPOINT = '/_ah/gcs'
_access_token = ''
_MAX_GET_BUCKET_RESULT = 1000
def set_access_token(access_token):
"""Set the shared access token to authenticate with Google Cloud Storage.
When set, the library will always attempt to communicate with the
real Google Cloud Storage with this token even when running on dev appserver.
Note the token could expire so it's up to you to renew it.
When absent, the library will automatically request and refresh a token
on appserver, or when on dev appserver, talk to a Google Cloud Storage
stub.
Args:
access_token: you can get one by run 'gsutil -d ls' and copy the
str after 'Bearer'.
"""
global _access_token
_access_token = access_token
def get_access_token():
"""Returns the shared access token."""
return _access_token
class GCSFileStat(object):
"""Container for GCS file stat."""
def __init__(self,
filename,
st_size,
etag,
st_ctime,
content_type=None,
metadata=None,
is_dir=False):
"""Initialize.
For files, the non optional arguments are always set.
For directories, only filename and is_dir is set.
Args:
filename: a Google Cloud Storage filename of form '/bucket/filename'.
st_size: file size in bytes. long compatible.
etag: hex digest of the md5 hash of the file's content. str.
st_ctime: posix file creation time. float compatible.
content_type: content type. str.
metadata: a str->str dict of user specified options when creating
the file. Possible keys are x-goog-meta-, content-disposition,
content-encoding, and cache-control.
is_dir: True if this represents a directory. False if this is a real file.
"""
self.filename = filename
self.is_dir = is_dir
self.st_size = None
self.st_ctime = None
self.etag = None
self.content_type = content_type
self.metadata = metadata
if not is_dir:
self.st_size = long(st_size)
self.st_ctime = float(st_ctime)
if etag[0] == '"' and etag[-1] == '"':
etag = etag[1:-1]
self.etag = etag
def __repr__(self):
if self.is_dir:
return '(directory: %s)' % self.filename
return (
'(filename: %(filename)s, st_size: %(st_size)s, '
'st_ctime: %(st_ctime)s, etag: %(etag)s, '
'content_type: %(content_type)s, '
'metadata: %(metadata)s)' %
dict(filename=self.filename,
st_size=self.st_size,
st_ctime=self.st_ctime,
etag=self.etag,
content_type=self.content_type,
metadata=self.metadata))
def __cmp__(self, other):
if not isinstance(other, self.__class__):
raise ValueError('Argument to cmp must have the same type. '
'Expect %s, got %s', self.__class__.__name__,
other.__class__.__name__)
if self.filename > other.filename:
return 1
elif self.filename < other.filename:
return -1
return 0
def __hash__(self):
if self.etag:
return hash(self.etag)
return hash(self.filename)
CSFileStat = GCSFileStat
def get_stored_content_length(headers):
"""Return the content length (in bytes) of the object as stored in GCS.
x-goog-stored-content-length should always be present except when called via
the local dev_appserver. Therefore if it is not present we default to the
standard content-length header.
Args:
headers: a dict of headers from the http response.
Returns:
the stored content length.
"""
length = headers.get('x-goog-stored-content-length')
if length is None:
length = headers.get('content-length')
return length
def get_metadata(headers):
"""Get user defined options from HTTP response headers."""
return dict((k, v) for k, v in headers.iteritems()
if any(k.lower().startswith(valid) for valid in _GCS_METADATA))
def validate_bucket_name(name):
"""Validate a Google Storage bucket name.
Args:
name: a Google Storage bucket name with no prefix or suffix.
Raises:
ValueError: if name is invalid.
"""
_validate_path(name)
if not _GCS_BUCKET_REGEX.match(name):
raise ValueError('Bucket should be 3-63 characters long using only a-z,'
'0-9, underscore, dash or dot but got %s' % name)
def validate_bucket_path(path):
"""Validate a Google Cloud Storage bucket path.
Args:
path: a Google Storage bucket path. It should have form '/bucket'.
Raises:
ValueError: if path is invalid.
"""
_validate_path(path)
if not _GCS_BUCKET_PATH_REGEX.match(path):
raise ValueError('Bucket should have format /bucket '
'but got %s' % path)
def validate_file_path(path):
"""Validate a Google Cloud Storage file path.
Args:
path: a Google Storage file path. It should have form '/bucket/filename'.
Raises:
ValueError: if path is invalid.
"""
_validate_path(path)
if not _GCS_FULLPATH_REGEX.match(path):
raise ValueError('Path should have format /bucket/filename '
'but got %s' % path)
def _process_path_prefix(path_prefix):
"""Validate and process a Google Cloud Stoarge path prefix.
Args:
path_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix'
or '/bucket/' or '/bucket'.
Raises:
ValueError: if path is invalid.
Returns:
a tuple of /bucket and prefix. prefix can be None.
"""
_validate_path(path_prefix)
if not _GCS_PATH_PREFIX_REGEX.match(path_prefix):
raise ValueError('Path prefix should have format /bucket, /bucket/, '
'or /bucket/prefix but got %s.' % path_prefix)
bucket_name_end = path_prefix.find('/', 1)
bucket = path_prefix
prefix = None
if bucket_name_end != -1:
bucket = path_prefix[:bucket_name_end]
prefix = path_prefix[bucket_name_end + 1:] or None
return bucket, prefix
def _validate_path(path):
"""Basic validation of Google Storage paths.
Args:
path: a Google Storage path. It should have form '/bucket/filename'
or '/bucket'.
Raises:
ValueError: if path is invalid.
TypeError: if path is not of type basestring.
"""
if not path:
raise ValueError('Path is empty')
if not isinstance(path, basestring):
raise TypeError('Path should be a string but is %s (%s).' %
(path.__class__, path))
def validate_options(options):
"""Validate Google Cloud Storage options.
Args:
options: a str->basestring dict of options to pass to Google Cloud Storage.
Raises:
ValueError: if option is not supported.
TypeError: if option is not of type str or value of an option
is not of type basestring.
"""
if not options:
return
for k, v in options.iteritems():
if not isinstance(k, str):
raise TypeError('option %r should be a str.' % k)
if not any(k.lower().startswith(valid) for valid in _GCS_OPTIONS):
raise ValueError('option %s is not supported.' % k)
if not isinstance(v, basestring):
raise TypeError('value %r for option %s should be of type basestring.' %
(v, k))
def http_time_to_posix(http_time):
"""Convert HTTP time format to posix time.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
for http time format.
Args:
http_time: time in RFC 2616 format. e.g.
"Mon, 20 Nov 1995 19:12:08 GMT".
Returns:
A float of secs from unix epoch.
"""
if http_time is not None:
return email_utils.mktime_tz(email_utils.parsedate_tz(http_time))
def posix_time_to_http(posix_time):
"""Convert posix time to HTML header time format.
Args:
posix_time: unix time.
Returns:
A datatime str in RFC 2616 format.
"""
if posix_time:
return email_utils.formatdate(posix_time, usegmt=True)
_DT_FORMAT = '%Y-%m-%dT%H:%M:%S'
def dt_str_to_posix(dt_str):
"""format str to posix.
datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ,
e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator
between date and time when they are on the same line.
Z indicates UTC (zero meridian).
A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html
This is used to parse LastModified node from GCS's GET bucket XML response.
Args:
dt_str: A datetime str.
Returns:
A float of secs from unix epoch. By posix definition, epoch is midnight
1970/1/1 UTC.
"""
parsable, _ = dt_str.split('.')
dt = datetime.datetime.strptime(parsable, _DT_FORMAT)
return calendar.timegm(dt.utctimetuple())
def posix_to_dt_str(posix):
"""Reverse of str_to_datetime.
This is used by GCS stub to generate GET bucket XML response.
Args:
posix: A float of secs from unix epoch.
Returns:
A datetime str.
"""
dt = datetime.datetime.utcfromtimestamp(posix)
dt_str = dt.strftime(_DT_FORMAT)
return dt_str + '.000Z'
def local_run():
"""Whether we should hit GCS dev appserver stub."""
server_software = os.environ.get('SERVER_SOFTWARE')
if server_software is None:
return True
if 'remote_api' in server_software:
return False
if server_software.startswith(('Development', 'testutil')):
return True
return False
def local_api_url():
"""Return URL for GCS emulation on dev appserver."""
return 'http://%s%s' % (os.environ.get('HTTP_HOST'), LOCAL_GCS_ENDPOINT)
def memory_usage(method):
"""Log memory usage before and after a method."""
def wrapper(*args, **kwargs):
logging.info('Memory before method %s is %s.',
method.__name__, runtime.memory_usage().current())
result = method(*args, **kwargs)
logging.info('Memory after method %s is %s',
method.__name__, runtime.memory_usage().current())
return result
return wrapper
def _add_ns(tagname):
return '{%(ns)s}%(tag)s' % {'ns': CS_XML_NS,
'tag': tagname}
_T_CONTENTS = _add_ns('Contents')
_T_LAST_MODIFIED = _add_ns('LastModified')
_T_ETAG = _add_ns('ETag')
_T_KEY = _add_ns('Key')
_T_SIZE = _add_ns('Size')
_T_PREFIX = _add_ns('Prefix')
_T_COMMON_PREFIXES = _add_ns('CommonPrefixes')
_T_NEXT_MARKER = _add_ns('NextMarker')
_T_IS_TRUNCATED = _add_ns('IsTruncated')
| bsd-3-clause | -1,954,761,002,360,891,000 | 27.731935 | 80 | 0.639624 | false |
dnidever/noaosourcecatalog | python/nsc_instcal_combine.py | 1 | 27777 | #!/usr/bin/env python
import os
import sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table, vstack, Column
from astropy.time import Time
import healpy as hp
from dlnpyutils import utils, coords
import subprocess
import time
from argparse import ArgumentParser
import socket
from dustmaps.sfd import SFDQuery
from astropy.coordinates import SkyCoord
def add_elements(cat,nnew=300000):
""" Add more elements to a catalog"""
ncat = len(cat)
old = cat.copy()
nnew = utils.gt(nnew,ncat)
cat = np.zeros(ncat+nnew,dtype=old.dtype)
cat[0:ncat] = old
del(old)
return cat
def add_cat(obj,totobj,idstr,idcnt,ind1,cat,meta):
""" Add object information from a new meas catalog of matched objects"""
ncat = len(cat)
f = meta['filter'].lower().strip()[0]
# Copy to final structure
obj['ra'][ind1] = cat['RA']
obj['dec'][ind1] = cat['DEC']
obj['raerr'][ind1] += 1.0/cat['RAERR']**2 # total(ra_wt)
obj['decerr'][ind1] += 1.0/cat['DECERR']**2 # total(dec_wt)
obj['pmra'][ind1] += (1.0/cat['RAERR']**2) * meta['mjd']*cat['RA'] # total(wt*mjd*ra)
obj['pmdec'][ind1] += (1.0/cat['DECERR']**2) * meta['mjd']*cat['DEC'] # total(wt*mjd*dec)
obj['mjd'][ind1] += meta['mjd'] # total(mjd)
obj['ndet'][ind1] += 1
# Detection and morphology parameters for this FILTER
obj['ndet'+f][ind1] += 1
obj[f+'asemi'][ind1] += cat['ASEMI']
obj[f+'bsemi'][ind1] += cat['BSEMI']
obj[f+'theta'][ind1] += cat['THETA']
# Good photometry for this FILTER
gdmag, = np.where(cat['MAG_AUTO']<50)
if len(gdmag)>0:
obj[f+'mag'][ind1[gdmag]] += 2.5118864**cat['MAG_AUTO'][gdmag] * (1.0/cat['MAGERR_AUTO'][gdmag]**2)
obj[f+'err'][ind1[gdmag]] += 1.0/cat['MAGERR_AUTO'][gdmag]**2
obj['nphot'+f][ind1[gdmag]] += 1
obj['asemi'][ind1] += cat['ASEMI']
obj['asemierr'][ind1] += cat['ASEMIERR']**2
obj['bsemi'][ind1] += cat['BSEMI']
obj['bsemierr'][ind1] += cat['BSEMIERR']**2
obj['theta'][ind1] += cat['THETA']
obj['thetaerr'][ind1] += cat['THETAERR']**2
obj['fwhm'][ind1] += cat['FWHM'] # in arcsec
obj['flags'][ind1] |= cat['FLAGS'] # OR combine
obj['class_star'][ind1] += cat['CLASS_STAR']
totobj['ra'][ind1] += cat['RA'] * (1.0/cat['RAERR']**2) # total(ra*wt)
totobj['dec'][ind1] += cat['DEC'] * (1.0/cat['DECERR']**2) # total(dec*wt)
totobj['ramjd'][ind1] += (1.0/cat['RAERR']**2) * meta['mjd'] # total(wt_ra*mjd)
totobj['decmjd'][ind1] += (1.0/cat['DECERR']**2) * meta['mjd'] # total(wt_dec*mjd)
totobj['ramjd2'][ind1] += (1.0/cat['RAERR']**2) * meta['mjd']**2 # total(wt_ra*mjd**2)
totobj['decmjd2'][ind1] += (1.0/cat['DECERR']**2) * meta['mjd']**2 # total(wt_dec*mjd**2)
totobj['minmjd'][ind1] = np.minimum( meta['mjd'][0], totobj['minmjd'][ind1] )
totobj['maxmjd'][ind1] = np.maximum( meta['mjd'][0], totobj['maxmjd'][ind1] )
if len(gdmag)>0:
totobj[f+'tot'][ind1[gdmag]] += cat['MAG_AUTO'][gdmag] # sum(mag)
totobj[f+'mag2'][ind1[gdmag]] += np.float64(cat['MAG_AUTO'][gdmag])**2 # sum(mag**2), need dbl to precent underflow
# Add new elements to IDSTR
if idcnt+ncat > len(idstr):
idstr = add_elements(idstr)
nidstr = len(idstr)
# Add to IDSTR
idstr['measid'][idcnt:idcnt+ncat] = cat['MEASID']
idstr['exposure'][idcnt:idcnt+ncat] = meta['base']
idstr['expnum'][idcnt:idcnt+ncat] = meta['expnum']
idstr['objectid'][idcnt:idcnt+ncat] = obj[ind1]['objectid']
idstr['objectindex'][idcnt:idcnt+ncat] = ind1
idcnt += ncat
return obj,totobj,idstr,idcnt
def loadmeas(metafile,buffdict=None,verbose=False):
if os.path.exists(metafile) is False:
print(metafile+' NOT FOUND')
return np.array([])
meta = fits.getdata(metafile,1)
chmeta = fits.getdata(metafile,2)
fdir = os.path.dirname(metafile)
fbase, ext = os.path.splitext(os.path.basename(metafile))
fbase = fbase[:-5] # remove _meta at end
# Loop over the chip files
cat = None
for j in range(len(chmeta)):
# Check that this chip was astrometrically calibrated
# and falls in to HEALPix region
if chmeta[j]['ngaiamatch'] == 0:
if verbose: print('This chip was not astrometrically calibrate')
# Check that this overlaps the healpix region
inside = True
if buffdict is not None:
vra = chmeta[j]['vra']
vdec = chmeta[j]['vdec']
if (np.max(vra)-np.min(vra)) > 100: # deal with RA=0 wrapround
bd, = np.where(vra>180)
if len(bd)>0: vra[bd] -= 360
if coords.doPolygonsOverlap(buffdict['ra'],buffdict['dec'],vra,vdec) is False:
if verbose: print('This chip does NOT overlap the HEALPix region+buffer')
inside = False
# Check if the chip-level file exists
chfile = fdir+'/'+fbase+'_'+str(chmeta[j]['ccdnum'])+'_meas.fits'
if os.path.exists(chfile) is False:
print(chfile+' NOT FOUND')
# Load this one
if (os.path.exists(chfile) is True) and (inside is True) and (chmeta[j]['ngaiamatch']>1):
# Load the chip-level catalog
cat1 = fits.getdata(chfile,1)
ncat1 = len(cat1)
print(' '+str(ncat1)+' sources')
# Make sure it's in the right format
if len(cat1.dtype.fields) != 32:
if verbose: print(' This catalog does not have the right format. Skipping')
del(cat1)
ncat1 = 0
# Only include sources inside Boundary+Buffer zone
# -use ROI_CUT
# -reproject to tangent plane first so we don't have to deal
# with RA=0 wrapping or pol issues
if buffdict is not None:
lon, lat = coords.rotsphcen(cat1['ra'],cat1['dec'],buffdict['cenra'],buffdict['cendec'],gnomic=True)
ind0, ind1 = utils.roi_cut(buffdict['lon'],buffdict['lat'],lon,lat)
nmatch = len(ind1)
# Only want source inside this pixel
if nmatch>0:
cat1 = cat1[ind1]
ncat1 = len(cat1)
if verbose: print(' '+str(nmatch)+' sources are inside this pixel')
# Combine the catalogs
if ncat1 > 0:
if cat is None:
dtype_cat = cat1.dtype
cat = np.zeros(np.sum(chmeta['nsources']),dtype=dtype_cat)
catcount = 0
cat[catcount:catcount+ncat1] = cat1
catcount += ncat1
#BOMB1:
if cat is not None: cat=cat[0:catcount] # trim excess
if cat is None: cat=np.array([]) # empty cat
return cat
# Combine data for one NSC healpix region
if __name__ == "__main__":
parser = ArgumentParser(description='Combine NSC data for one healpix region.')
parser.add_argument('pix', type=str, nargs=1, help='HEALPix pixel number')
parser.add_argument('version', type=str, nargs=1, help='Version number')
parser.add_argument('--nside', type=int, default=128, help='HEALPix Nside')
parser.add_argument('-r','--redo', action='store_true', help='Redo this HEALPIX')
parser.add_argument('--outdir', type=str, default='', help='Output directory')
#parser.add_argument('--filesexist', type=float, default=0.2, help='Time to wait between checking the status of running jobs')
#parser.add_argument('--pixfiles', type=str, default=False, help='IDL program')
args = parser.parse_args()
t0 = time.time()
hostname = socket.gethostname()
host = hostname.split('.')[0]
radeg = np.float64(180.00) / np.pi
# Inputs
pix = int(args.pix[0])
version = args.version
nside = args.nside
redo = args.redo
outdir = args.outdir
# on thing/hulk use
if (host == "thing") or (host == "hulk"):
dir = "/dl1/users/dnidever/nsc/instcal/"+version+"/"
mssdir = "/mss1/"
localdir = "/d0/"
tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/"
# on gp09 use
if (host == "gp09") or (host == "gp08") or (host == "gp07") or (host == "gp06") or (host == "gp05"):
dir = "/net/dl1/users/dnidever/nsc/instcal/"+version+"/"
mssdir = "/net/mss1/"
localdir = "/data0/"
tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/"
t0 = time.time()
# Check if output file already exists
if outdir == '': outdir=dir+'combine/'
subdir = str(int(pix)//1000) # use the thousands to create subdirectory grouping
outfile = outdir+'/'+subdir+'/'+str(pix)+'.fits'
if (os.path.exists(outfile) or os.path.exists(outfile+'.gz')) & ~redo:
print(outfile+' EXISTS already and REDO not set')
sys.exit()
print("Combining InstCal SExtractor catalogs for Healpix pixel = "+str(pix))
# Load the list
listfile = localdir+'dnidever/nsc/instcal/'+version+'/nsc_instcal_combine_healpix_list.fits.gz'
if os.path.exists(listfile) is False:
print(listfile+" NOT FOUND")
sys.exit()
healstr = Table(fits.getdata(listfile,1))
index = Table(fits.getdata(listfile,2))
# Find our pixel
ind, = np.where(index['PIX'] == pix)
nind = len(ind)
if nind == 0:
print("No entries for Healpix pixel '"+str(pix)+"' in the list")
sys.exit()
ind = ind[0]
hlist = healstr[index[ind]['LO']:index[ind]['HI']+1]
nlist = len(hlist)
# GET EXPOSURES FOR NEIGHBORING PIXELS AS WELL
# so we can deal with the edge cases
neipix = hp.get_all_neighbours(nside,pix)
for neip in neipix:
ind1, = np.where(index['PIX'] == neip)
nind1 = len(ind1)
if nind1>0:
ind1 = ind1[0]
hlist1 = healstr[index[ind1]['LO']:index[ind1]['HI']+1]
hlist = vstack([hlist,hlist1])
# Use entire exposure files
# Get unique values
u, ui = np.unique(hlist['FILE'],return_index=True)
hlist = hlist[ui]
nhlist = len(hlist)
print(str(nhlist)+' exposures that overlap this pixel and neighbors')
# Get the boundary coordinates
# healpy.boundaries but not sure how to do it in IDL
# pix2vec_ring/nest can optionally return vertices but only 4
# maybe subsample myself between the vectors
# Expand the boundary to include a "buffer" zone
# to deal with edge cases
vecbound = hp.boundaries(nside,pix,step=100)
rabound, decbound = hp.vec2ang(np.transpose(vecbound),lonlat=True)
# Expand the boundary by the buffer size
cenra, cendec = hp.pix2ang(nside,pix,lonlat=True)
# reproject onto tangent plane
lonbound, latbound = coords.rotsphcen(rabound,decbound,cenra,cendec,gnomic=True)
# expand by a fraction, it's not an extact boundary but good enough
buffsize = 10.0/3600. # in deg
radbound = np.sqrt(lonbound**2+latbound**2)
frac = 1.0 + 1.5*np.max(buffsize/radbound)
lonbuff = lonbound*frac
latbuff = latbound*frac
rabuff, decbuff = coords.rotsphcen(lonbuff,latbuff,cenra,cendec,gnomic=True,reverse=True)
if (np.max(rabuff)-np.min(rabuff))>100: # deal with RA=0 wraparound
bd, = np.where(rabuff>180)
if len(bd)>0:rabuff[bd] -=360.0
buffdict = {'cenra':cenra,'cendec':cendec,'rar':utils.minmax(rabuff),'decr':utils.minmax(decbuff),'ra':rabuff,'dec':decbuff,\
'lon':lonbuff,'lat':latbuff,'lr':utils.minmax(lonbuff),'br':utils.minmax(latbuff)}
# Initialize the ID structure
# this will contain the MeasID, Exposure name, ObjectID
dtype_idstr = np.dtype([('measid',np.str,200),('exposure',np.str,200),('expnum',np.str,200),('objectid',np.str,200),('objectindex',int)])
idstr = np.zeros(1000000,dtype=dtype_idstr)
nidstr = len(idstr)
idcnt = 0
# Initialize the object structure
dtype_obj = np.dtype([('objectid',np.str,100),('pix',int),('ra',np.float64),('dec',np.float64),('raerr',float),('decerr',float),
('pmra',float),('pmdec',float),('pmraerr',float),('pmdecerr',float),('mjd',np.float64),
('deltamjd',float),('ndet',int),('nphot',int),
('ndetu',int),('nphotu',int),('umag',float),('urms',float),('uerr',float),('uasemi',float),('ubsemi',float),('utheta',float),
('ndetg',int),('nphotg',int),('gmag',float),('grms',float),('gerr',float),('gasemi',float),('gbsemi',float),('gtheta',float),
('ndetr',int),('nphotr',int),('rmag',float),('rrms',float),('rerr',float),('rasemi',float),('rbsemi',float),('rtheta',float),
('ndeti',int),('nphoti',int),('imag',float),('irms',float),('ierr',float),('iasemi',float),('ibsemi',float),('itheta',float),
('ndetz',int),('nphotz',int),('zmag',float),('zrms',float),('zerr',float),('zasemi',float),('zbsemi',float),('ztheta',float),
('ndety',int),('nphoty',int),('ymag',float),('yrms',float),('yerr',float),('yasemi',float),('ybsemi',float),('ytheta',float),
('ndetvr',int),('nphotvr',int),('vrmag',float),('vrrms',float),('vrerr',float),('vrasemi',float),('vrbsemi',float),('vrtheta',float),
('asemi',float),('asemierr',float),('bsemi',float),('bsemierr',float),('theta',float),('thetaerr',float),
('fwhm',float),('flags',int),('class_star',float),('ebv',float)])
tags = dtype_obj.names
obj = np.zeros(500000,dtype=dtype_obj)
obj['pix'] = pix
nobj = len(obj)
dtype_totobj = np.dtype([('ra',np.float64),('dec',np.float64),('ramjd',np.float64),('decmjd',np.float64),('ramjd2',np.float64),
('decmjd2',np.float64),('minmjd',np.float64),('maxmjd',np.float64),('umag2',np.float64),('gmag2',np.float64),
('rmag2',np.float64),('imag2',np.float64),('zmag2',np.float64),('ymag2',np.float64),('vrmag2',np.float64),
('utot',np.float64),('gtot',np.float64),('rtot',np.float64),('itot',np.float64),('ztot',np.float64),
('ytot',np.float64),('vrtot',np.float64)])
totags = dtype_totobj.names
totobj = np.zeros(nobj,dtype=dtype_totobj)
totobj['minmjd'] = 999999.0
totobj['maxmjd'] = -999999.0
cnt = 0
# New meta-data format
dtype_meta = np.dtype([('file',np.str,500),('base',np.str,200),('expnum',int),('ra',np.float64),
('dec',np.float64),('dateobs',np.str,100),('mjd',np.float64),('filter',np.str,50),
('exptime',float),('airmass',float),('nsources',int),('fwhm',float),
('nchips',int),('badchip31',bool),('rarms',float),('decrms',float),
('ebv',float),('gaianmatch',int),('zpterm',float),('zptermerr',float),
('zptermsig',float),('refmatch',int)])
# Loop over the exposures
allmeta = None
for i in range(nhlist):
print(str(i+1)+' Loading '+hlist[i]['FILE'])
# Load meta data file first
metafile = hlist[i]['FILE'].replace('_cat','_meta').strip()
if os.path.exists(metafile) is False:
print(metafile+' NOT FOUND')
#goto,BOMB
meta = fits.getdata(metafile,1)
t = Time(meta['dateobs'], format='isot', scale='utc')
meta['mjd'] = t.mjd # recompute because some MJD are bad
chmeta = fits.getdata(metafile,2) # chip-level meta-data structure
print(' FILTER='+meta['filter'][0]+' EXPTIME='+str(meta['exptime'][0])+' sec')
# Load the measurement catalog
cat = loadmeas(metafile,buffdict)
ncat = utils.size(cat)
if ncat==0:
print('This exposure does NOT cover the HEALPix')
continue # go to next exposure
# Add metadata to ALLMETA
# Make sure it's in the right format
newmeta = np.zeros(1,dtype=dtype_meta)
# Copy over the meta information
for n in newmeta.dtype.names:
if n.upper() in meta.dtype.names: newmeta[n]=meta[n]
if allmeta is None:
allmeta = newmeta
else:
allmeta = np.hstack((allmeta,newmeta))
# Combine the data
#-----------------
# First catalog
if cnt==0:
ind1 = np.arange(len(cat))
obj['objectid'][ind1] = utils.strjoin( str(pix)+'.', ((np.arange(ncat)+1).astype(np.str)) )
obj,totobj,idstr,idcnt = add_cat(obj,totobj,idstr,idcnt,ind1,cat,meta)
cnt += ncat
# Second and up
else:
# Match new sources to the objects
ind1,ind2,dist = coords.xmatch(obj[0:cnt]['ra'],obj[0:cnt]['dec'],cat['RA'],cat['DEC'],0.5)
nmatch = len(ind1)
print(' '+str(nmatch)+' matched sources')
# Some matches, add data to existing record for these sources
if nmatch>0:
obj,totobj,idstr,idcnt = add_cat(obj,totobj,idstr,idcnt,ind1,cat[ind2],meta)
if nmatch<ncat:
cat = np.delete(cat,ind2)
ncat = len(cat)
else:
cat = np.array([])
ncat = 0
# Some left, add records for these sources
if ncat>0:
print(' '+str(ncat)+' sources left to add')
# Add new elements
if (cnt+ncat)>nobj:
obj = add_elements(obj)
nobj = len(obj)
ind1 = np.arange(ncat)+cnt
obj['objectid'][ind1] = utils.strjoin( str(pix)+'.', ((np.arange(ncat)+1+cnt).astype(np.str)) )
obj,totobj,idstr,idcnt = add_cat(obj,totobj,idstr,idcnt,ind1,cat,meta)
cnt += ncat
# No sources
if cnt==0:
print('No sources in this pixel')
sys.exit()
# Trim off the excess elements
obj = obj[0:cnt]
totobj = totobj[0:cnt]
nobj = len(obj)
print(str(nobj)+' final objects')
idstr = idstr[0:idcnt]
# Make NPHOT from NPHOTX
obj['nphot'] = obj['nphotu']+obj['nphotg']+obj['nphotr']+obj['nphoti']+obj['nphotz']+obj['nphoty']+obj['nphotvr']
# Convert total(mjd*ra) to true proper motion values
# the slope of RA vs. MJD is
# pmra=(total(wt*mjd*ra)/total(wt)-<mjd>*<ra>)/(total(wt*mjd^2)/total(wt)-<mjd>^2)
# we are creating the totals cumulatively as we go
totobj['ra'] /= obj['raerr'] # wt mean RA (totalrawt/totalwt)
totobj['dec'] /= obj['decerr'] # wt mean DEC (totaldecwt/totalwt)
obj['mjd'] /= obj['ndet'] # mean MJD
totobj['ramjd'] /= obj['raerr'] # wt_ra mean MJD
totobj['decmjd'] /= obj['decerr'] # wt_dec mean MJD
gdet, = np.where(obj['ndet']>1)
if len(gdet)>0:
pmra = (obj['pmra'][gdet]/obj['raerr'][gdet]-totobj['ramjd'][gdet]*totobj['ra'][gdet]) / (totobj['ramjd2'][gdet]/obj['raerr'][gdet]-totobj['ramjd'][gdet]**2) # deg[ra]/day
pmra *= (3600*1e3)*365.2425 # mas/year
pmra *= np.cos(obj['dec'][gdet]/radeg) # mas/year, true angle
pmdec = (obj['pmdec'][gdet]/obj['decerr'][gdet]-totobj['decmjd'][gdet]*totobj['dec'][gdet])/(totobj['decmjd2'][gdet]/obj['decerr'][gdet]-totobj['decmjd'][gdet]**2) # deg/day
pmdec *= (3600*1e3)*365.2425 # mas/year
# Proper motion errors
# pmerr = 1/sqrt( sum(wt*mjd^2) - <mjd>^2 * sum(wt) )
# if wt=1/err^2 with err in degrees, but we are using arcsec
# Need to divide by 3600 for PMDECERR and 3600*cos(dec) for PMRAERR
pmraerr = 1.0/np.sqrt( totobj['ramjd2'][gdet] - totobj['ramjd'][gdet]**2 * obj['raerr'][gdet] )
pmraerr /= (3600*np.cos(totobj['dec'][gdet]/radeg)) # correction for raerr in arcsec
pmraerr *= (3600*1e3)*365.2425 # mas/year
pmraerr *= np.cos(obj['dec'][gdet]/radeg) # mas/year, true angle
pmdecerr = 1.0/np.sqrt( totobj['decmjd2'][gdet] - totobj['decmjd'][gdet]**2 * obj['decerr'][gdet] )
pmdecerr /= 3600 # correction for decerr in arcsec
pmdecerr *= (3600*1e3)*365.2425 # mas/year
obj['pmra'][gdet] = pmra
obj['pmdec'][gdet] = pmdec
obj['pmraerr'][gdet] = pmraerr
obj['pmdecerr'][gdet] = pmdecerr
# sometimes it happens that the denominator is 0.0
# when there are few closely spaced points
# nothing we can do, just mark as bad
bdet, = np.where((obj['ndet']<2) | ~np.isfinite(obj['pmra']))
if len(bdet)>0:
obj['pmra'][bdet] = 999999.0
obj['pmdec'][bdet] = 999999.0
obj['pmraerr'][bdet] = 999999.0
obj['pmdecerr'][bdet] = 999999.0
obj['deltamjd'] = totobj['maxmjd']-totobj['minmjd']
# Average coordinates
obj['ra'] = totobj['ra'] # now stuff in the average coordinates
obj['dec'] = totobj['dec']
obj['raerr'] = np.sqrt(1.0/obj['raerr']) # err in wt mean RA, arcsec
obj['decerr'] = np.sqrt(1.0/obj['decerr']) # err in wt mean DEC, arcsec
# Convert totalwt and totalfluxwt to MAG and ERR
# and average the morphology parameters PER FILTER
filters = ['u','g','r','i','z','y','vr']
for f in filters:
# Get average photometry for objects with photometry in this band
gph, = np.where(obj['nphot'+f]>0)
if len(gph)>0:
newflux = obj[f+'mag'][gph] / obj[f+'err'][gph]
newmag = 2.50*np.log10(newflux)
newerr = np.sqrt(1.0/obj[f+'err'][gph])
obj[f+'mag'][gph] = newmag
obj[f+'err'][gph] = newerr
bdmag, = np.where((obj['nphot'+f]==0) | ~np.isfinite(obj[f+'mag']))
if len(bdmag)>0:
obj[f+'mag'][bdmag] = 99.99
obj[f+'err'][bdmag] = 9.99
# Calculate RMS scatter
# RMS^2 * N = sum(mag^2) - 2*<mag>*sum(mag) + N*<mag>^2
# where <mag> is a weighted average
# RMS = sqrt( sum(mag^2)/N - 2*<mag>*sum(mag)/N + <mag>^2 )
# sum(mag^2) is in the MAG2 column and sum(mag) is in TOT
rms = np.zeros(nobj,float)
gdrms, = np.where(obj['nphot'+f]>1)
ngdrms = len(gdrms)
bdrms, = np.where(obj['nphot'+f]<=1)
nbdrms = len(bdrms)
if ngdrms>0:
rms[gdrms] = np.sqrt( totobj[f+'mag2'][gdrms]/obj['nphot'+f][gdrms] -
2*obj[f+'mag'][gdrms]*totobj[f+'tot'][gdrms]/obj['nphot'+f][gdrms] + np.float64(obj[f+'mag'][gdrms])**2 )
if nbdrms>0: rms[bdrms] = 999999.
obj[f+'rms'] = rms
# Average the morphology parameters PER FILTER
gdet, = np.where(obj['ndet'+f]>0)
ngdet = len(gdet)
bdet, = np.where(obj['ndet'+f]==0)
nbdet = len(bdet)
if ngdet>0:
obj[f+'asemi'][gdet] /= obj['ndet'+f][gdet]
obj[f+'bsemi'][gdet] /= obj['ndet'+f][gdet]
obj[f+'theta'][gdet] /= obj['ndet'+f][gdet]
if nbdet>0:
obj[f+'asemi'][bdet] = 999999.
obj[f+'bsemi'][bdet] = 999999.
obj[f+'theta'][bdet] = 999999.
# Average the morphology parameters, Need a separate counter for that maybe?
mtags = ['asemi','bsemi','theta','fwhm','class_star']
gdet, = np.where(obj['ndet']>0)
ngdet = len(gdet)
bdet, = np.where(obj['ndet']==0)
nbdet = len(bdet)
for m in mtags:
# Divide by the number of detections
if ngdet>0: obj[m][gdet] /= obj['ndet'][gdet]
if nbdet>0: obj[m][bdet] = 999999. # no good detections
# Get the average error
metags = ['asemierr','bsemierr','thetaerr']
for m in metags:
# Just take the sqrt to complete the addition in quadrature
if ngdet>0: obj[m][gdet] = np.sqrt(obj[m][gdet]) / obj['ndet'][gdet]
if nbdet>0: obj[m][bdet] = 999999. # no good detections
# Add E(B-V)
print('Getting E(B-V)')
sfd = SFDQuery()
c = SkyCoord(obj['ra'],obj['dec'],frame='icrs',unit='deg')
#c = SkyCoord('05h00m00.00000s','+30d00m00.0000s', frame='icrs')
ebv = sfd(c)
obj['ebv'] = ebv
# ONLY INCLUDE OBJECTS WITH AVERAGE RA/DEC
# WITHIN THE BOUNDARY OF THE HEALPIX PIXEL!!!
ipring = hp.pixelfunc.ang2pix(nside,obj['ra'],obj['dec'],lonlat=True)
ind1, = np.where(ipring == pix)
nmatch = len(ind1)
if nmatch==0:
print('None of the final objects fall inside the pixel')
sys.exit()
# Get trimmed objects and indices
objtokeep = np.zeros(nobj,bool) # boolean to keep or trim objects
objtokeep[ind1] = True
if nmatch<nobj:
trimind = np.arange(nobj)
trimind = np.delete(trimind,ind1)
#trimind = utils.remove_indices(trimind,ind1)
trimobj = obj[trimind] # trimmed objects
newobjindex = np.zeros(nobj,int)-1 # new indices
newobjindex[ind1] = np.arange(nmatch)
# Keep the objects inside the Healpix
obj = obj[ind1]
print(str(nmatch)+' final objects fall inside the pixel')
# Remove trimmed objects from IDSTR
totrim, = np.where(objtokeep[idstr['objectindex']]==0) #using old index
if len(totrim)>0:
# Trim objects
idstr = np.delete(idstr,totrim)
#idstr = utils.remove_indices(idstr,totrim)
# Update IDSTR.objectindex
old_idstr_objectindex = idstr['objectindex']
idstr['objectindex'] = newobjindex[old_idstr_objectindex]
# Create final summary structure from ALLMETA
# get exposures that are in IDSTR
# sometimes EXPNUM numbers have the leading 0s removed
# and sometimes not, so turn to LONG to match
dum, uiexpnum = np.unique(idstr['expnum'].astype(int),return_index=True)
uexpnum = idstr[uiexpnum]['expnum'].astype(int)
nuexpnum = len(uexpnum)
ind1,ind2 = utils.match(allmeta['expnum'].astype(int),uexpnum)
nmatch = len(ind1)
sumstr = Table(allmeta[ind1])
col_nobj = Column(name='nobjects', dtype=np.int, length=len(sumstr))
col_healpix = Column(name='healpix', dtype=np.int, length=len(sumstr))
sumstr.add_columns([col_nobj, col_healpix])
sumstr['nobjects'] = 0
sumstr['healpix'] = pix
# get number of objects per exposure
expnum = idstr['expnum'].astype(int)
siexp = np.argsort(expnum)
expnum = expnum[siexp]
if nuexpnum>1:
brklo, = np.where(expnum != np.roll(expnum,1))
nbrk = len(brklo)
brkhi = np.hstack((brklo[1:nbrk],len(expnum)))
numobjexp = brkhi-brklo+1
else:
numobjexp=len(expnum)
ind1,ind2 = utils.match(sumstr['expnum'].astype(int),uexpnum)
nmatch = len(ind1)
sumstr['nobjects'][ind1] = numobjexp
# Write the output file
print('Writing combined catalog to '+outfile)
if os.path.exists(outdir) is False: os.mkdir(outdir)
if os.path.exists(outdir+'/'+subdir) is False: os.mkdir(outdir+'/'+subdir)
if os.path.exists(outfile): os.remove(outfile)
sumstr.write(outfile) # first, summary table
# append other fits binary tables
hdulist = fits.open(outfile)
hdu = fits.table_to_hdu(Table(obj)) # second, catalog
hdulist.append(hdu)
hdu = fits.table_to_hdu(Table(idstr)) # third, ID table
hdulist.append(hdu)
hdulist.writeto(outfile,overwrite=True)
hdulist.close()
if os.path.exists(outfile+'.gz'): os.remove(outfile+'.gz')
ret = subprocess.call(['gzip',outfile]) # compress final catalog
dt = time.time()-t0
print('dt = '+str(dt)+' sec.')
| mit | 329,439,110,446,213,800 | 44.387255 | 182 | 0.572596 | false |
sbt9uc/osf.io | framework/mongo/utils.py | 11 | 1888 | # -*- coding: utf-8 -*-
import re
import httplib as http
import pymongo
from modularodm.exceptions import ValidationValueError
from framework.exceptions import HTTPError
# MongoDB forbids field names that begin with "$" or contain ".". These
# utilities map to and from Mongo field names.
mongo_map = {
'.': '__!dot!__',
'$': '__!dollar!__',
}
def to_mongo(item):
for key, value in mongo_map.items():
item = item.replace(key, value)
return item
def to_mongo_key(item):
return to_mongo(item).strip().lower()
def from_mongo(item):
for key, value in mongo_map.items():
item = item.replace(value, key)
return item
sanitize_pattern = re.compile(r'<\/?[^>]+>')
def sanitized(value):
if value != sanitize_pattern.sub('', value):
raise ValidationValueError('Unsanitary string')
def unique_on(*groups):
"""Decorator for subclasses of `StoredObject`. Add a unique index on each
group of keys provided.
:param *groups: List of lists of keys to be indexed
"""
def wrapper(cls):
cls.__indices__ = getattr(cls, '__indices__', [])
cls.__indices__.extend([
{
'key_or_list': [
(key, pymongo.ASCENDING)
for key in group
],
'unique': True,
}
for group in groups
])
return cls
return wrapper
def get_or_http_error(Model, pk, allow_deleted=False):
instance = Model.load(pk)
if not allow_deleted and getattr(instance, 'is_deleted', False):
raise HTTPError(http.GONE, data=dict(
message_long="This resource has been deleted"
))
if not instance:
raise HTTPError(http.NOT_FOUND, data=dict(
message_long="No resource with that primary key could be found"
))
else:
return instance
| apache-2.0 | 5,962,020,813,483,322,000 | 24.173333 | 77 | 0.591102 | false |
holmes/intellij-community | python/helpers/epydoc/apidoc.py | 90 | 92479 | # epydoc -- API Documentation Classes
#
# Copyright (C) 2005 Edward Loper
# Author: Edward Loper <[email protected]>
# URL: <http://epydoc.sf.net>
#
# $Id: apidoc.py 1675 2008-01-29 17:12:56Z edloper $
"""
Classes for encoding API documentation about Python programs.
These classes are used as a common representation for combining
information derived from introspection and from parsing.
The API documentation for a Python program is encoded using a graph of
L{APIDoc} objects, each of which encodes information about a single
Python variable or value. C{APIDoc} has two direct subclasses:
L{VariableDoc}, for documenting variables; and L{ValueDoc}, for
documenting values. The C{ValueDoc} class is subclassed further, to
define the different pieces of information that should be recorded
about each value type:
G{classtree: APIDoc}
The distinction between variables and values is intentionally made
explicit. This allows us to distinguish information about a variable
itself (such as whether it should be considered 'public' in its
containing namespace) from information about the value it contains
(such as what type the value has). This distinction is also important
because several variables can contain the same value: each variable
should be described by a separate C{VariableDoc}; but we only need one
C{ValueDoc}, since they share a single value.
@todo: Add a cache to canonical name lookup?
"""
__docformat__ = 'epytext en'
######################################################################
## Imports
######################################################################
import types, re, os.path, pickle
from epydoc import log
import epydoc
import __builtin__
from epydoc.compat import * # Backwards compatibility
from epydoc.util import decode_with_backslashreplace, py_src_filename
import epydoc.markup.pyval_repr
######################################################################
# Dotted Names
######################################################################
class DottedName:
"""
A sequence of identifiers, separated by periods, used to name a
Python variable, value, or argument. The identifiers that make up
a dotted name can be accessed using the indexing operator:
>>> name = DottedName('epydoc', 'api_doc', 'DottedName')
>>> print name
epydoc.apidoc.DottedName
>>> name[1]
'api_doc'
"""
UNREACHABLE = "??"
_IDENTIFIER_RE = re.compile("""(?x)
(%s | # UNREACHABLE marker, or..
(script-)? # Prefix: script (not a module)
\w+ # Identifier (yes, identifiers starting with a
# digit are allowed. See SF bug #1649347)
'?) # Suffix: submodule that is shadowed by a var
(-\d+)? # Suffix: unreachable vals with the same name
$"""
% re.escape(UNREACHABLE))
class InvalidDottedName(ValueError):
"""
An exception raised by the DottedName constructor when one of
its arguments is not a valid dotted name.
"""
_ok_identifiers = set()
"""A cache of identifier strings that have been checked against
_IDENTIFIER_RE and found to be acceptable."""
def __init__(self, *pieces, **options):
"""
Construct a new dotted name from the given sequence of pieces,
each of which can be either a C{string} or a C{DottedName}.
Each piece is divided into a sequence of identifiers, and
these sequences are combined together (in order) to form the
identifier sequence for the new C{DottedName}. If a piece
contains a string, then it is divided into substrings by
splitting on periods, and each substring is checked to see if
it is a valid identifier.
As an optimization, C{pieces} may also contain a single tuple
of values. In that case, that tuple will be used as the
C{DottedName}'s identifiers; it will I{not} be checked to
see if it's valid.
@kwparam strict: if true, then raise an L{InvalidDottedName}
if the given name is invalid.
"""
if len(pieces) == 1 and isinstance(pieces[0], tuple):
self._identifiers = pieces[0] # Optimization
return
if len(pieces) == 0:
raise DottedName.InvalidDottedName('Empty DottedName')
self._identifiers = []
for piece in pieces:
if isinstance(piece, DottedName):
self._identifiers += piece._identifiers
elif isinstance(piece, basestring):
for subpiece in piece.split('.'):
if piece not in self._ok_identifiers:
if not self._IDENTIFIER_RE.match(subpiece):
if options.get('strict'):
raise DottedName.InvalidDottedName(
'Bad identifier %r' % (piece,))
else:
log.warning("Identifier %r looks suspicious; "
"using it anyway." % piece)
self._ok_identifiers.add(piece)
self._identifiers.append(subpiece)
else:
raise TypeError('Bad identifier %r: expected '
'DottedName or str' % (piece,))
self._identifiers = tuple(self._identifiers)
def __repr__(self):
idents = [`ident` for ident in self._identifiers]
return 'DottedName(' + ', '.join(idents) + ')'
def __str__(self):
"""
Return the dotted name as a string formed by joining its
identifiers with periods:
>>> print DottedName('epydoc', 'api_doc', DottedName')
epydoc.apidoc.DottedName
"""
return '.'.join(self._identifiers)
def __add__(self, other):
"""
Return a new C{DottedName} whose identifier sequence is formed
by adding C{other}'s identifier sequence to C{self}'s.
"""
if isinstance(other, (basestring, DottedName)):
return DottedName(self, other)
else:
return DottedName(self, *other)
def __radd__(self, other):
"""
Return a new C{DottedName} whose identifier sequence is formed
by adding C{self}'s identifier sequence to C{other}'s.
"""
if isinstance(other, (basestring, DottedName)):
return DottedName(other, self)
else:
return DottedName(*(list(other)+[self]))
def __getitem__(self, i):
"""
Return the C{i}th identifier in this C{DottedName}. If C{i} is
a non-empty slice, then return a C{DottedName} built from the
identifiers selected by the slice. If C{i} is an empty slice,
return an empty list (since empty C{DottedName}s are not valid).
"""
if isinstance(i, types.SliceType):
pieces = self._identifiers[i.start:i.stop]
if pieces: return DottedName(pieces)
else: return []
else:
return self._identifiers[i]
def __hash__(self):
return hash(self._identifiers)
def __cmp__(self, other):
"""
Compare this dotted name to C{other}. Two dotted names are
considered equal if their identifier subsequences are equal.
Ordering between dotted names is lexicographic, in order of
identifier from left to right.
"""
if not isinstance(other, DottedName):
return -1
return cmp(self._identifiers, other._identifiers)
def __len__(self):
"""
Return the number of identifiers in this dotted name.
"""
return len(self._identifiers)
def container(self):
"""
Return the DottedName formed by removing the last identifier
from this dotted name's identifier sequence. If this dotted
name only has one name in its identifier sequence, return
C{None} instead.
"""
if len(self._identifiers) == 1:
return None
else:
return DottedName(*self._identifiers[:-1])
def dominates(self, name, strict=False):
"""
Return true if this dotted name is equal to a prefix of
C{name}. If C{strict} is true, then also require that
C{self!=name}.
>>> DottedName('a.b').dominates(DottedName('a.b.c.d'))
True
"""
len_self = len(self._identifiers)
len_name = len(name._identifiers)
if (len_self > len_name) or (strict and len_self == len_name):
return False
# The following is redundant (the first clause is implied by
# the second), but is done as an optimization.
return ((self._identifiers[0] == name._identifiers[0]) and
self._identifiers == name._identifiers[:len_self])
def contextualize(self, context):
"""
If C{self} and C{context} share a common ancestor, then return
a name for C{self}, relative to that ancestor. If they do not
share a common ancestor (or if C{context} is C{UNKNOWN}), then
simply return C{self}.
This is used to generate shorter versions of dotted names in
cases where users can infer the intended target from the
context.
@type context: L{DottedName}
@rtype: L{DottedName}
"""
if context is UNKNOWN or not context or len(self) <= 1:
return self
if self[0] == context[0]:
return self[1:].contextualize(context[1:])
else:
return self
# Find the first index where self & context differ.
for i in range(min(len(context), len(self))):
if self._identifiers[i] != context._identifiers[i]:
first_difference = i
break
else:
first_difference = i+1
# Strip off anything before that index.
if first_difference == 0:
return self
elif first_difference == len(self):
return self[-1:]
else:
return self[first_difference:]
######################################################################
# UNKNOWN Value
######################################################################
class _Sentinel:
"""
A unique value that won't compare equal to any other value. This
class is used to create L{UNKNOWN}.
"""
def __init__(self, name):
self.name = name
def __repr__(self):
return '<%s>' % self.name
def __nonzero__(self):
raise ValueError('Sentinel value <%s> can not be used as a boolean' %
self.name)
UNKNOWN = _Sentinel('UNKNOWN')
"""A special value used to indicate that a given piece of
information about an object is unknown. This is used as the
default value for all instance variables."""
######################################################################
# API Documentation Objects: Abstract Base Classes
######################################################################
class APIDoc(object):
"""
API documentation information for a single element of a Python
program. C{APIDoc} itself is an abstract base class; subclasses
are used to specify what information should be recorded about each
type of program element. In particular, C{APIDoc} has two direct
subclasses, C{VariableDoc} for documenting variables and
C{ValueDoc} for documenting values; and the C{ValueDoc} class is
subclassed further for different value types.
Each C{APIDoc} subclass specifies the set of attributes that
should be used to record information about the corresponding
program element type. The default value for each attribute is
stored in the class; these default values can then be overridden
with instance variables. Most attributes use the special value
L{UNKNOWN} as their default value, to indicate that the correct
value for that attribute has not yet been determined. This makes
it easier to merge two C{APIDoc} objects that are documenting the
same element (in particular, to merge information about an element
that was derived from parsing with information that was derived
from introspection).
For all attributes with boolean values, use only the constants
C{True} and C{False} to designate true and false. In particular,
do I{not} use other values that evaluate as true or false, such as
C{2} or C{()}. This restriction makes it easier to handle
C{UNKNOWN} values. For example, to test if a boolean attribute is
C{True} or C{UNKNOWN}, use 'C{attrib in (True, UNKNOWN)}' or
'C{attrib is not False}'.
Two C{APIDoc} objects describing the same object can be X{merged},
using the method L{merge_and_overwrite(other)}. After two
C{APIDoc}s are merged, any changes to one will be reflected in the
other. This is accomplished by setting the two C{APIDoc} objects
to use a shared instance dictionary. See the documentation for
L{merge_and_overwrite} for more information, and some important
caveats about hashing.
"""
#{ Docstrings
docstring = UNKNOWN
"""@ivar: The documented item's docstring.
@type: C{string} or C{None}"""
docstring_lineno = UNKNOWN
"""@ivar: The line number on which the documented item's docstring
begins.
@type: C{int}"""
#} end of "docstrings" group
#{ Information Extracted from Docstrings
descr = UNKNOWN
"""@ivar: A description of the documented item, extracted from its
docstring.
@type: L{ParsedDocstring<epydoc.markup.ParsedDocstring>}"""
summary = UNKNOWN
"""@ivar: A summary description of the documented item, extracted from
its docstring.
@type: L{ParsedDocstring<epydoc.markup.ParsedDocstring>}"""
other_docs = UNKNOWN
"""@ivar: A flag indicating if the entire L{docstring} body (except tags
if any) is entirely included in the L{summary}.
@type: C{bool}"""
metadata = UNKNOWN
"""@ivar: Metadata about the documented item, extracted from fields in
its docstring. I{Currently} this is encoded as a list of tuples
C{(field, arg, descr)}. But that may change.
@type: C{(str, str, L{ParsedDocstring<markup.ParsedDocstring>})}"""
extra_docstring_fields = UNKNOWN
"""@ivar: A list of new docstring fields tags that are defined by the
documented item's docstring. These new field tags can be used by
this item or by any item it contains.
@type: L{DocstringField <epydoc.docstringparser.DocstringField>}"""
#} end of "information extracted from docstrings" group
#{ Source Information
docs_extracted_by = UNKNOWN # 'parser' or 'introspecter' or 'both'
"""@ivar: Information about where the information contained by this
C{APIDoc} came from. Can be one of C{'parser'},
C{'introspector'}, or C{'both'}.
@type: C{str}"""
#} end of "source information" group
def __init__(self, **kwargs):
"""
Construct a new C{APIDoc} object. Keyword arguments may be
used to initialize the new C{APIDoc}'s attributes.
@raise TypeError: If a keyword argument is specified that does
not correspond to a valid attribute for this (sub)class of
C{APIDoc}.
"""
if epydoc.DEBUG:
for key in kwargs:
if key[0] != '_' and not hasattr(self.__class__, key):
raise TypeError('%s got unexpected arg %r' %
(self.__class__.__name__, key))
self.__dict__.update(kwargs)
def _debug_setattr(self, attr, val):
"""
Modify an C{APIDoc}'s attribute. This is used when
L{epydoc.DEBUG} is true, to make sure we don't accidentally
set any inappropriate attributes on C{APIDoc} objects.
@raise AttributeError: If C{attr} is not a valid attribute for
this (sub)class of C{APIDoc}. (C{attr} is considered a
valid attribute iff C{self.__class__} defines an attribute
with that name.)
"""
# Don't intercept special assignments like __class__, or
# assignments to private variables.
if attr.startswith('_'):
return object.__setattr__(self, attr, val)
if not hasattr(self, attr):
raise AttributeError('%s does not define attribute %r' %
(self.__class__.__name__, attr))
self.__dict__[attr] = val
if epydoc.DEBUG:
__setattr__ = _debug_setattr
def __repr__(self):
return '<%s>' % self.__class__.__name__
def pp(self, doublespace=0, depth=5, exclude=(), include=()):
"""
Return a pretty-printed string representation for the
information contained in this C{APIDoc}.
"""
return pp_apidoc(self, doublespace, depth, exclude, include)
__str__ = pp
def specialize_to(self, cls):
"""
Change C{self}'s class to C{cls}. C{cls} must be a subclass
of C{self}'s current class. For example, if a generic
C{ValueDoc} was created for a value, and it is determined that
the value is a routine, you can update its class with:
>>> valdoc.specialize_to(RoutineDoc)
"""
if not issubclass(cls, self.__class__):
raise ValueError('Can not specialize to %r' % cls)
# Update the class.
self.__class__ = cls
# Update the class of any other apidoc's in the mergeset.
if self.__mergeset is not None:
for apidoc in self.__mergeset:
apidoc.__class__ = cls
# Re-initialize self, in case the subclass constructor does
# any special processing on its arguments.
self.__init__(**self.__dict__)
__has_been_hashed = False
"""True iff L{self.__hash__()} has ever been called."""
def __hash__(self):
self.__has_been_hashed = True
return id(self.__dict__)
def __cmp__(self, other):
if not isinstance(other, APIDoc): return -1
if self.__dict__ is other.__dict__: return 0
name_cmp = cmp(self.canonical_name, other.canonical_name)
if name_cmp == 0: return -1
else: return name_cmp
def is_detailed(self):
"""
Does this object deserve a box with extra details?
@return: True if the object needs extra details, else False.
@rtype: C{bool}
"""
if self.other_docs is True:
return True
if self.metadata is not UNKNOWN:
return bool(self.metadata)
__mergeset = None
"""The set of all C{APIDoc} objects that have been merged with
this C{APIDoc} (using L{merge_and_overwrite()}). Each C{APIDoc}
in this set shares a common instance dictionary (C{__dict__})."""
def merge_and_overwrite(self, other, ignore_hash_conflict=False):
"""
Combine C{self} and C{other} into a X{merged object}, such
that any changes made to one will affect the other. Any
attributes that C{other} had before merging will be discarded.
This is accomplished by copying C{self.__dict__} over
C{other.__dict__} and C{self.__class__} over C{other.__class__}.
Care must be taken with this method, since it modifies the
hash value of C{other}. To help avoid the problems that this
can cause, C{merge_and_overwrite} will raise an exception if
C{other} has ever been hashed, unless C{ignore_hash_conflict}
is True. Note that adding C{other} to a dictionary, set, or
similar data structure will implicitly cause it to be hashed.
If you do set C{ignore_hash_conflict} to True, then any
existing data structures that rely on C{other}'s hash staying
constant may become corrupted.
@return: C{self}
@raise ValueError: If C{other} has ever been hashed.
"""
# If we're already merged, then there's nothing to do.
if (self.__dict__ is other.__dict__ and
self.__class__ is other.__class__): return self
if other.__has_been_hashed and not ignore_hash_conflict:
raise ValueError("%r has already been hashed! Merging it "
"would cause its has value to change." % other)
# If other was itself already merged with anything,
# then we need to merge those too.
a,b = (self.__mergeset, other.__mergeset)
mergeset = (self.__mergeset or [self]) + (other.__mergeset or [other])
other.__dict__.clear()
for apidoc in mergeset:
#if apidoc is self: pass
apidoc.__class__ = self.__class__
apidoc.__dict__ = self.__dict__
self.__mergeset = mergeset
# Sanity chacks.
assert self in mergeset and other in mergeset
for apidoc in mergeset:
assert apidoc.__dict__ is self.__dict__
# Return self.
return self
def apidoc_links(self, **filters):
"""
Return a list of all C{APIDoc}s that are directly linked from
this C{APIDoc} (i.e., are contained or pointed to by one or
more of this C{APIDoc}'s attributes.)
Keyword argument C{filters} can be used to selectively exclude
certain categories of attribute value. For example, using
C{includes=False} will exclude variables that were imported
from other modules; and C{subclasses=False} will exclude
subclasses. The filter categories currently supported by
epydoc are:
- C{imports}: Imported variables.
- C{packages}: Containing packages for modules.
- C{submodules}: Contained submodules for packages.
- C{bases}: Bases for classes.
- C{subclasses}: Subclasses for classes.
- C{variables}: All variables.
- C{private}: Private variables.
- C{overrides}: Points from class variables to the variables
they override. This filter is False by default.
"""
return []
def reachable_valdocs(root, **filters):
"""
Return a list of all C{ValueDoc}s that can be reached, directly or
indirectly from the given root list of C{ValueDoc}s.
@param filters: A set of filters that can be used to prevent
C{reachable_valdocs} from following specific link types when
looking for C{ValueDoc}s that can be reached from the root
set. See C{APIDoc.apidoc_links} for a more complete
description.
"""
apidoc_queue = list(root)
val_set = set()
var_set = set()
while apidoc_queue:
api_doc = apidoc_queue.pop()
if isinstance(api_doc, ValueDoc):
val_set.add(api_doc)
else:
var_set.add(api_doc)
apidoc_queue.extend([v for v in api_doc.apidoc_links(**filters)
if v not in val_set and v not in var_set])
return val_set
######################################################################
# Variable Documentation Objects
######################################################################
class VariableDoc(APIDoc):
"""
API documentation information about a single Python variable.
@note: The only time a C{VariableDoc} will have its own docstring
is if that variable was created using an assignment statement, and
that assignment statement had a docstring-comment or was followed
by a pseudo-docstring.
"""
#{ Basic Variable Information
name = UNKNOWN
"""@ivar: The name of this variable in its containing namespace.
@type: C{str}"""
container = UNKNOWN
"""@ivar: API documentation for the namespace that contains this
variable.
@type: L{ValueDoc}"""
canonical_name = UNKNOWN
"""@ivar: A dotted name that serves as a unique identifier for
this C{VariableDoc}. It should be formed by concatenating
the C{VariableDoc}'s C{container} with its C{name}.
@type: L{DottedName}"""
value = UNKNOWN
"""@ivar: The API documentation for this variable's value.
@type: L{ValueDoc}"""
#}
#{ Information Extracted from Docstrings
type_descr = UNKNOWN
"""@ivar: A description of the variable's expected type, extracted from
its docstring.
@type: L{ParsedDocstring<epydoc.markup.ParsedDocstring>}"""
#} end of "information extracted from docstrings" group
#{ Information about Imported Variables
imported_from = UNKNOWN
"""@ivar: The fully qualified dotted name of the variable that this
variable's value was imported from. This attribute should only
be defined if C{is_instvar} is true.
@type: L{DottedName}"""
is_imported = UNKNOWN
"""@ivar: Was this variable's value imported from another module?
(Exception: variables that are explicitly included in __all__ have
C{is_imported} set to C{False}, even if they are in fact
imported.)
@type: C{bool}"""
#} end of "information about imported variables" group
#{ Information about Variables in Classes
is_instvar = UNKNOWN
"""@ivar: If true, then this variable is an instance variable; if false,
then this variable is a class variable. This attribute should
only be defined if the containing namespace is a class
@type: C{bool}"""
overrides = UNKNOWN # [XXX] rename -- don't use a verb.
"""@ivar: The API documentation for the variable that is overridden by
this variable. This attribute should only be defined if the
containing namespace is a class.
@type: L{VariableDoc}"""
#} end of "information about variables in classes" group
#{ Flags
is_alias = UNKNOWN
"""@ivar: Is this variable an alias for another variable with the same
value? If so, then this variable will be dispreferred when
assigning canonical names.
@type: C{bool}"""
is_public = UNKNOWN
"""@ivar: Is this variable part of its container's public API?
@type: C{bool}"""
#} end of "flags" group
def __init__(self, **kwargs):
APIDoc.__init__(self, **kwargs)
if self.is_public is UNKNOWN and self.name is not UNKNOWN:
self.is_public = (not self.name.startswith('_') or
self.name.endswith('_'))
def __repr__(self):
if self.canonical_name is not UNKNOWN:
return '<%s %s>' % (self.__class__.__name__, self.canonical_name)
if self.name is not UNKNOWN:
return '<%s %s>' % (self.__class__.__name__, self.name)
else:
return '<%s>' % self.__class__.__name__
def _get_defining_module(self):
if self.container is UNKNOWN:
return UNKNOWN
return self.container.defining_module
defining_module = property(_get_defining_module, doc="""
A read-only property that can be used to get the variable's
defining module. This is defined as the defining module
of the variable's container.""")
def apidoc_links(self, **filters):
# nb: overrides filter is *False* by default.
if (filters.get('overrides', False) and
(self.overrides not in (None, UNKNOWN))):
overrides = [self.overrides]
else:
overrides = []
if self.value in (None, UNKNOWN):
return []+overrides
else:
return [self.value]+overrides
def is_detailed(self):
pval = super(VariableDoc, self).is_detailed()
if pval or self.value in (None, UNKNOWN):
return pval
if (self.overrides not in (None, UNKNOWN) and
isinstance(self.value, RoutineDoc)):
return True
if isinstance(self.value, GenericValueDoc):
# [XX] This is a little hackish -- we assume that the
# summary lines will have SUMMARY_REPR_LINELEN chars,
# that len(name) of those will be taken up by the name,
# and that 3 of those will be taken up by " = " between
# the name & val. Note that if any docwriter uses a
# different formula for maxlen for this, then it will
# not get the right value for is_detailed().
maxlen = self.value.SUMMARY_REPR_LINELEN-3-len(self.name)
return (not self.value.summary_pyval_repr(maxlen).is_complete)
else:
return self.value.is_detailed()
######################################################################
# Value Documentation Objects
######################################################################
class ValueDoc(APIDoc):
"""
API documentation information about a single Python value.
"""
canonical_name = UNKNOWN
"""@ivar: A dotted name that serves as a unique identifier for
this C{ValueDoc}'s value. If the value can be reached using a
single sequence of identifiers (given the appropriate imports),
then that sequence of identifiers is used as its canonical name.
If the value can be reached by multiple sequences of identifiers
(i.e., if it has multiple aliases), then one of those sequences of
identifiers is used. If the value cannot be reached by any
sequence of identifiers (e.g., if it was used as a base class but
then its variable was deleted), then its canonical name will start
with C{'??'}. If necessary, a dash followed by a number will be
appended to the end of a non-reachable identifier to make its
canonical name unique.
When possible, canonical names are chosen when new C{ValueDoc}s
are created. However, this is sometimes not possible. If a
canonical name can not be chosen when the C{ValueDoc} is created,
then one will be assigned by L{assign_canonical_names()
<docbuilder.assign_canonical_names>}.
@type: L{DottedName}"""
#{ Value Representation
pyval = UNKNOWN
"""@ivar: A pointer to the actual Python object described by this
C{ValueDoc}. This is used to display the value (e.g., when
describing a variable.) Use L{pyval_repr()} to generate a
plaintext string representation of this value.
@type: Python object"""
parse_repr = UNKNOWN
"""@ivar: A text representation of this value, extracted from
parsing its source code. This representation may not accurately
reflect the actual value (e.g., if the value was modified after
the initial assignment).
@type: C{unicode}"""
REPR_MAXLINES = 5
"""@cvar: The maximum number of lines of text that should be
generated by L{pyval_repr()}. If the string representation does
not fit in this number of lines, an ellpsis marker (...) will
be placed at the end of the formatted representation."""
REPR_LINELEN = 75
"""@cvar: The maximum number of characters for lines of text that
should be generated by L{pyval_repr()}. Any lines that exceed
this number of characters will be line-wrappped; The S{crarr}
symbol will be used to indicate that the line was wrapped."""
SUMMARY_REPR_LINELEN = 75
"""@cvar: The maximum number of characters for the single-line
text representation generated by L{summary_pyval_repr()}. If
the value's representation does not fit in this number of
characters, an ellipsis marker (...) will be placed at the end
of the formatted representation."""
REPR_MIN_SCORE = 0
"""@cvar: The minimum score that a value representation based on
L{pyval} should have in order to be used instead of L{parse_repr}
as the canonical representation for this C{ValueDoc}'s value.
@see: L{epydoc.markup.pyval_repr}"""
#} end of "value representation" group
#{ Context
defining_module = UNKNOWN
"""@ivar: The documentation for the module that defines this
value. This is used, e.g., to lookup the appropriate markup
language for docstrings. For a C{ModuleDoc},
C{defining_module} should be C{self}.
@type: L{ModuleDoc}"""
#} end of "context group"
#{ Information about Imported Variables
proxy_for = None # [xx] in progress.
"""@ivar: If C{proxy_for} is not None, then this value was
imported from another file. C{proxy_for} is the dotted name of
the variable that this value was imported from. If that
variable is documented, then its C{value} may contain more
complete API documentation about this value. The C{proxy_for}
attribute is used by the source code parser to link imported
values to their source values (in particular, for base
classes). When possible, these proxy C{ValueDoc}s are replaced
by the imported value's C{ValueDoc} by
L{link_imports()<docbuilder.link_imports>}.
@type: L{DottedName}"""
#} end of "information about imported variables" group
#: @ivar:
#: This is currently used to extract values from __all__, etc, in
#: the docparser module; maybe I should specialize
#: process_assignment and extract it there? Although, for __all__,
#: it's not clear where I'd put the value, since I just use it to
#: set private/public/imported attribs on other vars (that might not
#: exist yet at the time.)
toktree = UNKNOWN
def __repr__(self):
if self.canonical_name is not UNKNOWN:
return '<%s %s>' % (self.__class__.__name__, self.canonical_name)
else:
return '<%s %s>' % (self.__class__.__name__,
self.summary_pyval_repr().to_plaintext(None))
def __setstate__(self, state):
self.__dict__ = state
def __getstate__(self):
"""
State serializer for the pickle module. This is necessary
because sometimes the C{pyval} attribute contains an
un-pickleable value.
"""
# Construct our pickled dictionary. Maintain this dictionary
# as a private attribute, so we can reuse it later, since
# merged objects need to share a single dictionary.
if not hasattr(self, '_ValueDoc__pickle_state'):
# Make sure __pyval_repr & __summary_pyval_repr are cached:
self.pyval_repr(), self.summary_pyval_repr()
# Construct the dictionary; leave out 'pyval'.
self.__pickle_state = self.__dict__.copy()
self.__pickle_state['pyval'] = UNKNOWN
if not isinstance(self, GenericValueDoc):
assert self.__pickle_state != {}
# Return the pickle state.
return self.__pickle_state
#{ Value Representation
def pyval_repr(self):
"""
Return a formatted representation of the Python object
described by this C{ValueDoc}. This representation may
include data from introspection or parsing, and is authorative
as 'the best way to represent a Python value.' Any lines that
go beyond L{REPR_LINELEN} characters will be wrapped; and if
the representation as a whole takes more than L{REPR_MAXLINES}
lines, then it will be truncated (with an ellipsis marker).
This function will never return L{UNKNOWN} or C{None}.
@rtype: L{ColorizedPyvalRepr}
"""
# Use self.__pyval_repr to cache the result.
if not hasattr(self, '_ValueDoc__pyval_repr'):
self.__pyval_repr = epydoc.markup.pyval_repr.colorize_pyval(
self.pyval, self.parse_repr, self.REPR_MIN_SCORE,
self.REPR_LINELEN, self.REPR_MAXLINES, linebreakok=True)
return self.__pyval_repr
def summary_pyval_repr(self, max_len=None):
"""
Return a single-line formatted representation of the Python
object described by this C{ValueDoc}. This representation may
include data from introspection or parsing, and is authorative
as 'the best way to summarize a Python value.' If the
representation takes more then L{SUMMARY_REPR_LINELEN}
characters, then it will be truncated (with an ellipsis
marker). This function will never return L{UNKNOWN} or
C{None}.
@rtype: L{ColorizedPyvalRepr}
"""
# If max_len is specified, then do *not* cache the result.
if max_len is not None:
return epydoc.markup.pyval_repr.colorize_pyval(
self.pyval, self.parse_repr, self.REPR_MIN_SCORE,
max_len, maxlines=1, linebreakok=False)
# Use self.__summary_pyval_repr to cache the result.
if not hasattr(self, '_ValueDoc__summary_pyval_repr'):
self.__summary_pyval_repr = epydoc.markup.pyval_repr.colorize_pyval(
self.pyval, self.parse_repr, self.REPR_MIN_SCORE,
self.SUMMARY_REPR_LINELEN, maxlines=1, linebreakok=False)
return self.__summary_pyval_repr
#} end of "value representation" group
def apidoc_links(self, **filters):
return []
class GenericValueDoc(ValueDoc):
"""
API documentation about a 'generic' value, i.e., one that does not
have its own docstring or any information other than its value and
parse representation. C{GenericValueDoc}s do not get assigned
cannonical names.
"""
canonical_name = None
def is_detailed(self):
return (not self.summary_pyval_repr().is_complete)
class NamespaceDoc(ValueDoc):
"""
API documentation information about a singe Python namespace
value. (I.e., a module or a class).
"""
#{ Information about Variables
variables = UNKNOWN
"""@ivar: The contents of the namespace, encoded as a
dictionary mapping from identifiers to C{VariableDoc}s. This
dictionary contains all names defined by the namespace,
including imported variables, aliased variables, and variables
inherited from base classes (once L{inherit_docs()
<epydoc.docbuilder.inherit_docs>} has added them).
@type: C{dict} from C{string} to L{VariableDoc}"""
sorted_variables = UNKNOWN
"""@ivar: A list of all variables defined by this
namespace, in sorted order. The elements of this list should
exactly match the values of L{variables}. The sort order for
this list is defined as follows:
- Any variables listed in a C{@sort} docstring field are
listed in the order given by that field.
- These are followed by any variables that were found while
parsing the source code, in the order in which they were
defined in the source file.
- Finally, any remaining variables are listed in
alphabetical order.
@type: C{list} of L{VariableDoc}"""
sort_spec = UNKNOWN
"""@ivar: The order in which variables should be listed,
encoded as a list of names. Any variables whose names are not
included in this list should be listed alphabetically,
following the variables that are included.
@type: C{list} of C{str}"""
group_specs = UNKNOWN
"""@ivar: The groups that are defined by this namespace's
docstrings. C{group_specs} is encoded as an ordered list of
tuples C{(group_name, elt_names)}, where C{group_name} is the
name of a group and C{elt_names} is a list of element names in
that group. (An element can be a variable or a submodule.) A
'*' in an element name will match any string of characters.
@type: C{list} of C{(str,list)}"""
variable_groups = UNKNOWN
"""@ivar: A dictionary specifying what group each
variable belongs to. The keys of the dictionary are group
names, and the values are lists of C{VariableDoc}s. The order
that groups should be listed in should be taken from
L{group_specs}.
@type: C{dict} from C{str} to C{list} of L{VariableDoc}"""
#} end of group "information about variables"
def __init__(self, **kwargs):
kwargs.setdefault('variables', {})
APIDoc.__init__(self, **kwargs)
assert self.variables is not UNKNOWN
def is_detailed(self):
return True
def apidoc_links(self, **filters):
variables = filters.get('variables', True)
imports = filters.get('imports', True)
private = filters.get('private', True)
if variables and imports and private:
return self.variables.values() # list the common case first.
elif not variables:
return []
elif not imports and not private:
return [v for v in self.variables.values() if
v.is_imported != True and v.is_public != False]
elif not private:
return [v for v in self.variables.values() if
v.is_public != False]
elif not imports:
return [v for v in self.variables.values() if
v.is_imported != True]
assert 0, 'this line should be unreachable'
def init_sorted_variables(self):
"""
Initialize the L{sorted_variables} attribute, based on the
L{variables} and L{sort_spec} attributes. This should usually
be called after all variables have been added to C{variables}
(including any inherited variables for classes).
"""
unsorted = self.variables.copy()
self.sorted_variables = []
# Add any variables that are listed in sort_spec
if self.sort_spec is not UNKNOWN:
unused_idents = set(self.sort_spec)
for ident in self.sort_spec:
if ident in unsorted:
self.sorted_variables.append(unsorted.pop(ident))
unused_idents.discard(ident)
elif '*' in ident:
regexp = re.compile('^%s$' % ident.replace('*', '(.*)'))
# sort within matching group?
for name, var_doc in unsorted.items():
if regexp.match(name):
self.sorted_variables.append(unsorted.pop(name))
unused_idents.discard(ident)
for ident in unused_idents:
if ident not in ['__all__', '__docformat__', '__path__']:
log.warning("@sort: %s.%s not found" %
(self.canonical_name, ident))
# Add any remaining variables in alphabetical order.
var_docs = unsorted.items()
var_docs.sort()
for name, var_doc in var_docs:
self.sorted_variables.append(var_doc)
def init_variable_groups(self):
"""
Initialize the L{variable_groups} attribute, based on the
L{sorted_variables} and L{group_specs} attributes.
"""
if self.sorted_variables is UNKNOWN:
self.init_sorted_variables()
assert len(self.sorted_variables) == len(self.variables)
elts = [(v.name, v) for v in self.sorted_variables]
self._unused_groups = dict([(n,set(i)) for (n,i) in self.group_specs])
self.variable_groups = self._init_grouping(elts)
def group_names(self):
"""
Return a list of the group names defined by this namespace, in
the order in which they should be listed, with no duplicates.
"""
name_list = ['']
name_set = set()
for name, spec in self.group_specs:
if name not in name_set:
name_set.add(name)
name_list.append(name)
return name_list
def _init_grouping(self, elts):
"""
Divide a given a list of APIDoc objects into groups, as
specified by L{self.group_specs}.
@param elts: A list of tuples C{(name, apidoc)}.
@return: A list of tuples C{(groupname, elts)}, where
C{groupname} is the name of a group and C{elts} is a list of
C{APIDoc}s in that group. The first tuple has name C{''}, and
is used for ungrouped elements. The remaining tuples are
listed in the order that they appear in C{self.group_specs}.
Within each tuple, the elements are listed in the order that
they appear in C{api_docs}.
"""
# Make the common case fast.
if len(self.group_specs) == 0:
return {'': [elt[1] for elt in elts]}
ungrouped = set([elt_doc for (elt_name, elt_doc) in elts])
ungrouped = dict(elts)
groups = {}
for elt_name, elt_doc in elts:
for (group_name, idents) in self.group_specs:
group = groups.setdefault(group_name, [])
unused_groups = self._unused_groups[group_name]
for ident in idents:
if re.match('^%s$' % ident.replace('*', '(.*)'), elt_name):
unused_groups.discard(ident)
if elt_name in ungrouped:
group.append(ungrouped.pop(elt_name))
else:
log.warning("%s.%s in multiple groups" %
(self.canonical_name, elt_name))
# Convert ungrouped from an unordered set to an ordered list.
groups[''] = [elt_doc for (elt_name, elt_doc) in elts
if elt_name in ungrouped]
return groups
def report_unused_groups(self):
"""
Issue a warning for any @group items that were not used by
L{_init_grouping()}.
"""
for (group, unused_idents) in self._unused_groups.items():
for ident in unused_idents:
log.warning("@group %s: %s.%s not found" %
(group, self.canonical_name, ident))
class ModuleDoc(NamespaceDoc):
"""
API documentation information about a single module.
"""
#{ Information about the Module
filename = UNKNOWN
"""@ivar: The name of the file that defines the module.
@type: C{string}"""
docformat = UNKNOWN
"""@ivar: The markup language used by docstrings in this module.
@type: C{string}"""
#{ Information about Submodules
submodules = UNKNOWN
"""@ivar: Modules contained by this module (if this module
is a package). (Note: on rare occasions, a module may have a
submodule that is shadowed by a variable with the same name.)
@type: C{list} of L{ModuleDoc}"""
submodule_groups = UNKNOWN
"""@ivar: A dictionary specifying what group each
submodule belongs to. The keys of the dictionary are group
names, and the values are lists of C{ModuleDoc}s. The order
that groups should be listed in should be taken from
L{group_specs}.
@type: C{dict} from C{str} to C{list} of L{ModuleDoc}"""
#{ Information about Packages
package = UNKNOWN
"""@ivar: API documentation for the module's containing package.
@type: L{ModuleDoc}"""
is_package = UNKNOWN
"""@ivar: True if this C{ModuleDoc} describes a package.
@type: C{bool}"""
path = UNKNOWN
"""@ivar: If this C{ModuleDoc} describes a package, then C{path}
contains a list of directories that constitute its path (i.e.,
the value of its C{__path__} variable).
@type: C{list} of C{str}"""
#{ Information about Imported Variables
imports = UNKNOWN
"""@ivar: A list of the source names of variables imported into
this module. This is used to construct import graphs.
@type: C{list} of L{DottedName}"""
#}
def apidoc_links(self, **filters):
val_docs = NamespaceDoc.apidoc_links(self, **filters)
if (filters.get('packages', True) and
self.package not in (None, UNKNOWN)):
val_docs.append(self.package)
if (filters.get('submodules', True) and
self.submodules not in (None, UNKNOWN)):
val_docs += self.submodules
return val_docs
def init_submodule_groups(self):
"""
Initialize the L{submodule_groups} attribute, based on the
L{submodules} and L{group_specs} attributes.
"""
if self.submodules in (None, UNKNOWN):
return
self.submodules = sorted(self.submodules,
key=lambda m:m.canonical_name)
elts = [(m.canonical_name[-1], m) for m in self.submodules]
self.submodule_groups = self._init_grouping(elts)
def select_variables(self, group=None, value_type=None, public=None,
imported=None, detailed=None):
"""
Return a specified subset of this module's L{sorted_variables}
list. If C{value_type} is given, then only return variables
whose values have the specified type. If C{group} is given,
then only return variables that belong to the specified group.
@require: The L{sorted_variables}, L{variable_groups}, and
L{submodule_groups} attributes must be initialized before
this method can be used. See L{init_sorted_variables()},
L{init_variable_groups()}, and L{init_submodule_groups()}.
@param value_type: A string specifying the value type for
which variables should be returned. Valid values are:
- 'class' - variables whose values are classes or types.
- 'function' - variables whose values are functions.
- 'other' - variables whose values are not classes,
exceptions, types, or functions.
@type value_type: C{string}
@param group: The name of the group for which variables should
be returned. A complete list of the groups defined by
this C{ModuleDoc} is available in the L{group_names}
instance variable. The first element of this list is
always the special group name C{''}, which is used for
variables that do not belong to any group.
@type group: C{string}
@param detailed: If True (False), return only the variables
deserving (not deserving) a detailed informative box.
If C{None}, don't care.
@type detailed: C{bool}
"""
if (self.sorted_variables is UNKNOWN or
self.variable_groups is UNKNOWN):
raise ValueError('sorted_variables and variable_groups '
'must be initialized first.')
if group is None: var_list = self.sorted_variables
else:
var_list = self.variable_groups.get(group, self.sorted_variables)
# Public/private filter (Count UNKNOWN as public)
if public is True:
var_list = [v for v in var_list if v.is_public is not False]
elif public is False:
var_list = [v for v in var_list if v.is_public is False]
# Imported filter (Count UNKNOWN as non-imported)
if imported is True:
var_list = [v for v in var_list if v.is_imported is True]
elif imported is False:
var_list = [v for v in var_list if v.is_imported is not True]
# Detailed filter
if detailed is True:
var_list = [v for v in var_list if v.is_detailed() is True]
elif detailed is False:
var_list = [v for v in var_list if v.is_detailed() is not True]
# [xx] Modules are not currently included in any of these
# value types.
if value_type is None:
return var_list
elif value_type == 'class':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, ClassDoc))]
elif value_type == 'function':
return [var_doc for var_doc in var_list
if isinstance(var_doc.value, RoutineDoc)]
elif value_type == 'other':
return [var_doc for var_doc in var_list
if not isinstance(var_doc.value,
(ClassDoc, RoutineDoc, ModuleDoc))]
else:
raise ValueError('Bad value type %r' % value_type)
class ClassDoc(NamespaceDoc):
"""
API documentation information about a single class.
"""
#{ Information about Base Classes
bases = UNKNOWN
"""@ivar: API documentation for the class's base classes.
@type: C{list} of L{ClassDoc}"""
#{ Information about Subclasses
subclasses = UNKNOWN
"""@ivar: API documentation for the class's known subclasses.
@type: C{list} of L{ClassDoc}"""
#}
def apidoc_links(self, **filters):
val_docs = NamespaceDoc.apidoc_links(self, **filters)
if (filters.get('bases', True) and
self.bases not in (None, UNKNOWN)):
val_docs += self.bases
if (filters.get('subclasses', True) and
self.subclasses not in (None, UNKNOWN)):
val_docs += self.subclasses
return val_docs
def is_type(self):
if self.canonical_name == DottedName('type'): return True
if self.bases is UNKNOWN: return False
for base in self.bases:
if isinstance(base, ClassDoc) and base.is_type():
return True
return False
def is_exception(self):
if self.canonical_name == DottedName('Exception'): return True
if self.bases is UNKNOWN: return False
for base in self.bases:
if isinstance(base, ClassDoc) and base.is_exception():
return True
return False
def is_newstyle_class(self):
if self.canonical_name == DottedName('object'): return True
if self.bases is UNKNOWN: return False
for base in self.bases:
if isinstance(base, ClassDoc) and base.is_newstyle_class():
return True
return False
def mro(self, warn_about_bad_bases=False):
if self.is_newstyle_class():
return self._c3_mro(warn_about_bad_bases)
else:
return self._dfs_bases([], set(), warn_about_bad_bases)
def _dfs_bases(self, mro, seen, warn_about_bad_bases):
if self in seen: return mro
mro.append(self)
seen.add(self)
if self.bases is not UNKNOWN:
for base in self.bases:
if isinstance(base, ClassDoc) and base.proxy_for is None:
base._dfs_bases(mro, seen, warn_about_bad_bases)
elif warn_about_bad_bases:
self._report_bad_base(base)
return mro
def _c3_mro(self, warn_about_bad_bases):
"""
Compute the class precedence list (mro) according to C3.
@seealso: U{http://www.python.org/2.3/mro.html}
"""
bases = [base for base in self.bases if isinstance(base, ClassDoc)]
if len(bases) != len(self.bases) and warn_about_bad_bases:
for base in self.bases:
if (not isinstance(base, ClassDoc) or
base.proxy_for is not None):
self._report_bad_base(base)
w = [warn_about_bad_bases]*len(bases)
return self._c3_merge([[self]] + map(ClassDoc._c3_mro, bases, w) +
[list(bases)])
def _report_bad_base(self, base):
if not isinstance(base, ClassDoc):
if not isinstance(base, GenericValueDoc):
base_name = base.canonical_name
elif base.parse_repr is not UNKNOWN:
base_name = base.parse_repr
else:
base_name = '%r' % base
log.warning("%s's base %s is not a class" %
(self.canonical_name, base_name))
elif base.proxy_for is not None:
log.warning("No information available for %s's base %s" %
(self.canonical_name, base.proxy_for))
def _c3_merge(self, seqs):
"""
Helper function for L{_c3_mro}.
"""
res = []
while 1:
nonemptyseqs=[seq for seq in seqs if seq]
if not nonemptyseqs: return res
for seq in nonemptyseqs: # find merge candidates among seq heads
cand = seq[0]
nothead=[s for s in nonemptyseqs if cand in s[1:]]
if nothead: cand=None #reject candidate
else: break
if not cand: raise "Inconsistent hierarchy"
res.append(cand)
for seq in nonemptyseqs: # remove cand
if seq[0] == cand: del seq[0]
def select_variables(self, group=None, value_type=None, inherited=None,
public=None, imported=None, detailed=None):
"""
Return a specified subset of this class's L{sorted_variables}
list. If C{value_type} is given, then only return variables
whose values have the specified type. If C{group} is given,
then only return variables that belong to the specified group.
If C{inherited} is True, then only return inherited variables;
if C{inherited} is False, then only return local variables.
@require: The L{sorted_variables} and L{variable_groups}
attributes must be initialized before this method can be
used. See L{init_sorted_variables()} and
L{init_variable_groups()}.
@param value_type: A string specifying the value type for
which variables should be returned. Valid values are:
- 'instancemethod' - variables whose values are
instance methods.
- 'classmethod' - variables whose values are class
methods.
- 'staticmethod' - variables whose values are static
methods.
- 'properties' - variables whose values are properties.
- 'class' - variables whose values are nested classes
(including exceptions and types).
- 'instancevariable' - instance variables. This includes
any variables that are explicitly marked as instance
variables with docstring fields; and variables with
docstrings that are initialized in the constructor.
- 'classvariable' - class variables. This includes any
variables that are not included in any of the above
categories.
@type value_type: C{string}
@param group: The name of the group for which variables should
be returned. A complete list of the groups defined by
this C{ClassDoc} is available in the L{group_names}
instance variable. The first element of this list is
always the special group name C{''}, which is used for
variables that do not belong to any group.
@type group: C{string}
@param inherited: If C{None}, then return both inherited and
local variables; if C{True}, then return only inherited
variables; if C{False}, then return only local variables.
@param detailed: If True (False), return only the variables
deserving (not deserving) a detailed informative box.
If C{None}, don't care.
@type detailed: C{bool}
"""
if (self.sorted_variables is UNKNOWN or
self.variable_groups is UNKNOWN):
raise ValueError('sorted_variables and variable_groups '
'must be initialized first.')
if group is None: var_list = self.sorted_variables
else: var_list = self.variable_groups[group]
# Public/private filter (Count UNKNOWN as public)
if public is True:
var_list = [v for v in var_list if v.is_public is not False]
elif public is False:
var_list = [v for v in var_list if v.is_public is False]
# Inherited filter (Count UNKNOWN as non-inherited)
if inherited is None: pass
elif inherited:
var_list = [v for v in var_list if v.container != self]
else:
var_list = [v for v in var_list if v.container == self ]
# Imported filter (Count UNKNOWN as non-imported)
if imported is True:
var_list = [v for v in var_list if v.is_imported is True]
elif imported is False:
var_list = [v for v in var_list if v.is_imported is not True]
# Detailed filter
if detailed is True:
var_list = [v for v in var_list if v.is_detailed() is True]
elif detailed is False:
var_list = [v for v in var_list if v.is_detailed() is not True]
if value_type is None:
return var_list
elif value_type == 'method':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, RoutineDoc) and
var_doc.is_instvar in (False, UNKNOWN))]
elif value_type == 'instancemethod':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, RoutineDoc) and
not isinstance(var_doc.value, ClassMethodDoc) and
not isinstance(var_doc.value, StaticMethodDoc) and
var_doc.is_instvar in (False, UNKNOWN))]
elif value_type == 'classmethod':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, ClassMethodDoc) and
var_doc.is_instvar in (False, UNKNOWN))]
elif value_type == 'staticmethod':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, StaticMethodDoc) and
var_doc.is_instvar in (False, UNKNOWN))]
elif value_type == 'property':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, PropertyDoc) and
var_doc.is_instvar in (False, UNKNOWN))]
elif value_type == 'class':
return [var_doc for var_doc in var_list
if (isinstance(var_doc.value, ClassDoc) and
var_doc.is_instvar in (False, UNKNOWN))]
elif value_type == 'instancevariable':
return [var_doc for var_doc in var_list
if var_doc.is_instvar is True]
elif value_type == 'classvariable':
return [var_doc for var_doc in var_list
if (var_doc.is_instvar in (False, UNKNOWN) and
not isinstance(var_doc.value,
(RoutineDoc, ClassDoc, PropertyDoc)))]
else:
raise ValueError('Bad value type %r' % value_type)
class RoutineDoc(ValueDoc):
"""
API documentation information about a single routine.
"""
#{ Signature
posargs = UNKNOWN
"""@ivar: The names of the routine's positional arguments.
If an argument list contains \"unpacking\" arguments, then
their names will be specified using nested lists. E.g., if
a function's argument list is C{((x1,y1), (x2,y2))}, then
posargs will be C{[['x1','y1'], ['x2','y2']]}.
@type: C{list}"""
posarg_defaults = UNKNOWN
"""@ivar: API documentation for the positional arguments'
default values. This list has the same length as C{posargs}, and
each element of C{posarg_defaults} describes the corresponding
argument in C{posargs}. For positional arguments with no default,
C{posargs_defaults} will contain None.
@type: C{list} of C{ValueDoc} or C{None}"""
vararg = UNKNOWN
"""@ivar: The name of the routine's vararg argument, or C{None} if
it has no vararg argument.
@type: C{string} or C{None}"""
kwarg = UNKNOWN
"""@ivar: The name of the routine's keyword argument, or C{None} if
it has no keyword argument.
@type: C{string} or C{None}"""
lineno = UNKNOWN # used to look up profiling info from pstats.
"""@ivar: The line number of the first line of the function's
signature. For Python functions, this is equal to
C{func.func_code.co_firstlineno}. The first line of a file
is considered line 1.
@type: C{int}"""
#} end of "signature" group
#{ Decorators
decorators = UNKNOWN
"""@ivar: A list of names of decorators that were applied to this
routine, in the order that they are listed in the source code.
(I.e., in the reverse of the order that they were applied in.)
@type: C{list} of C{string}"""
#} end of "decorators" group
#{ Information Extracted from Docstrings
arg_descrs = UNKNOWN
"""@ivar: A list of descriptions of the routine's
arguments. Each element of this list is a tuple C{(args,
descr)}, where C{args} is a list of argument names; and
C{descr} is a L{ParsedDocstring
<epydoc.markup.ParsedDocstring>} describing the argument(s)
specified by C{arg}.
@type: C{list}"""
arg_types = UNKNOWN
"""@ivar: Descriptions of the expected types for the
routine's arguments, encoded as a dictionary mapping from
argument names to type descriptions.
@type: C{dict} from C{string} to L{ParsedDocstring
<epydoc.markup.ParsedDocstring>}"""
return_descr = UNKNOWN
"""@ivar: A description of the value returned by this routine.
@type: L{ParsedDocstring<epydoc.markup.ParsedDocstring>}"""
return_type = UNKNOWN
"""@ivar: A description of expected type for the value
returned by this routine.
@type: L{ParsedDocstring<epydoc.markup.ParsedDocstring>}"""
exception_descrs = UNKNOWN
"""@ivar: A list of descriptions of exceptions
that the routine might raise. Each element of this list is a
tuple C{(exc, descr)}, where C{exc} is a string contianing the
exception name; and C{descr} is a L{ParsedDocstring
<epydoc.markup.ParsedDocstring>} describing the circumstances
under which the exception specified by C{exc} is raised.
@type: C{list}"""
#} end of "information extracted from docstrings" group
callgraph_uid = None
"""@ivar: L{DotGraph}.uid of the call graph for the function.
@type: C{str}"""
def is_detailed(self):
if super(RoutineDoc, self).is_detailed():
return True
if self.arg_descrs not in (None, UNKNOWN) and self.arg_descrs:
return True
if self.arg_types not in (None, UNKNOWN) and self.arg_types:
return True
if self.return_descr not in (None, UNKNOWN):
return True
if self.exception_descrs not in (None, UNKNOWN) and self.exception_descrs:
return True
if (self.decorators not in (None, UNKNOWN)
and [ d for d in self.decorators
if d not in ('classmethod', 'staticmethod') ]):
return True
return False
def all_args(self):
"""
@return: A list of the names of all arguments (positional,
vararg, and keyword), in order. If a positional argument
consists of a tuple of names, then that tuple will be
flattened.
"""
if self.posargs is UNKNOWN:
return UNKNOWN
all_args = _flatten(self.posargs)
if self.vararg not in (None, UNKNOWN):
all_args.append(self.vararg)
if self.kwarg not in (None, UNKNOWN):
all_args.append(self.kwarg)
return all_args
def _flatten(lst, out=None):
"""
Return a flattened version of C{lst}.
"""
if out is None: out = []
for elt in lst:
if isinstance(elt, (list,tuple)):
_flatten(elt, out)
else:
out.append(elt)
return out
class ClassMethodDoc(RoutineDoc): pass
class StaticMethodDoc(RoutineDoc): pass
class PropertyDoc(ValueDoc):
"""
API documentation information about a single property.
"""
#{ Property Access Functions
fget = UNKNOWN
"""@ivar: API documentation for the property's get function.
@type: L{RoutineDoc}"""
fset = UNKNOWN
"""@ivar: API documentation for the property's set function.
@type: L{RoutineDoc}"""
fdel = UNKNOWN
"""@ivar: API documentation for the property's delete function.
@type: L{RoutineDoc}"""
#}
#{ Information Extracted from Docstrings
type_descr = UNKNOWN
"""@ivar: A description of the property's expected type, extracted
from its docstring.
@type: L{ParsedDocstring<epydoc.markup.ParsedDocstring>}"""
#} end of "information extracted from docstrings" group
def apidoc_links(self, **filters):
val_docs = []
if self.fget not in (None, UNKNOWN): val_docs.append(self.fget)
if self.fset not in (None, UNKNOWN): val_docs.append(self.fset)
if self.fdel not in (None, UNKNOWN): val_docs.append(self.fdel)
return val_docs
def is_detailed(self):
if super(PropertyDoc, self).is_detailed():
return True
if self.fget not in (None, UNKNOWN) and self.fget.pyval is not None:
return True
if self.fset not in (None, UNKNOWN) and self.fset.pyval is not None:
return True
if self.fdel not in (None, UNKNOWN) and self.fdel.pyval is not None:
return True
return False
######################################################################
## Index
######################################################################
class DocIndex:
"""
[xx] out of date.
An index that .. hmm... it *can't* be used to access some things,
cuz they're not at the root level. Do I want to add them or what?
And if so, then I have a sort of a new top level. hmm.. so
basically the question is what to do with a name that's not in the
root var's name space. 2 types:
- entirely outside (eg os.path)
- inside but not known (eg a submodule that we didn't look at?)
- container of current thing not examined?
An index of all the C{APIDoc} objects that can be reached from a
root set of C{ValueDoc}s.
The members of this index can be accessed by dotted name. In
particular, C{DocIndex} defines two mappings, accessed via the
L{get_vardoc()} and L{get_valdoc()} methods, which can be used to
access C{VariableDoc}s or C{ValueDoc}s respectively by name. (Two
separate mappings are necessary because a single name can be used
to refer to both a variable and to the value contained by that
variable.)
Additionally, the index defines two sets of C{ValueDoc}s:
\"reachable C{ValueDoc}s\" and \"contained C{ValueDoc}s\". The
X{reachable C{ValueDoc}s} are defined as the set of all
C{ValueDoc}s that can be reached from the root set by following
I{any} sequence of pointers to C{ValueDoc}s or C{VariableDoc}s.
The X{contained C{ValueDoc}s} are defined as the set of all
C{ValueDoc}s that can be reached from the root set by following
only the C{ValueDoc} pointers defined by non-imported
C{VariableDoc}s. For example, if the root set contains a module
C{m}, then the contained C{ValueDoc}s includes the C{ValueDoc}s
for any functions, variables, or classes defined in that module,
as well as methods and variables defined in classes defined in the
module. The reachable C{ValueDoc}s includes all of those
C{ValueDoc}s, as well as C{ValueDoc}s for any values imported into
the module, and base classes for classes defined in the module.
"""
def __init__(self, root):
"""
Create a new documentation index, based on the given root set
of C{ValueDoc}s. If any C{APIDoc}s reachable from the root
set does not have a canonical name, then it will be assigned
one. etc.
@param root: A list of C{ValueDoc}s.
"""
for apidoc in root:
if apidoc.canonical_name in (None, UNKNOWN):
raise ValueError("All APIdocs passed to DocIndexer "
"must already have canonical names.")
# Initialize the root items list. We sort them by length in
# ascending order. (This ensures that variables will shadow
# submodules when appropriate.)
# When the elements name is the same, list in alphabetical order:
# this is needed by the check for duplicates below.
self.root = sorted(root,
key=lambda d: (len(d.canonical_name), d.canonical_name))
"""The list of C{ValueDoc}s to document.
@type: C{list}"""
# Drop duplicated modules
# [xx] maybe what causes duplicates should be fixed instead.
# If fixed, adjust the sort here above: sorting by names will not
# be required anymore
i = 1
while i < len(self.root):
if self.root[i-1] is self.root[i]:
del self.root[i]
else:
i += 1
self.mlclasses = self._get_module_classes(self.root)
"""A mapping from class names to L{ClassDoc}. Contains
classes defined at module level for modules in L{root}
and which can be used as fallback by L{find()} if looking
in containing namespaces fails.
@type: C{dict} from C{str} to L{ClassDoc} or C{list}"""
self.callers = None
"""A dictionary mapping from C{RoutineDoc}s in this index
to lists of C{RoutineDoc}s for the routine's callers.
This dictionary is initialized by calling
L{read_profiling_info()}.
@type: C{list} of L{RoutineDoc}"""
self.callees = None
"""A dictionary mapping from C{RoutineDoc}s in this index
to lists of C{RoutineDoc}s for the routine's callees.
This dictionary is initialized by calling
L{read_profiling_info()}.
@type: C{list} of L{RoutineDoc}"""
self._funcid_to_doc = {}
"""A mapping from C{profile} function ids to corresponding
C{APIDoc} objects. A function id is a tuple of the form
C{(filename, lineno, funcname)}. This is used to update
the L{callers} and L{callees} variables."""
self._container_cache = {}
"""A cache for the L{container()} method, to increase speed."""
self._get_cache = {}
"""A cache for the L{get_vardoc()} and L{get_valdoc()} methods,
to increase speed."""
#////////////////////////////////////////////////////////////
# Lookup methods
#////////////////////////////////////////////////////////////
# [xx]
# Currently these only work for things reachable from the
# root... :-/ I might want to change this so that imported
# values can be accessed even if they're not contained.
# Also, I might want canonical names to not start with ??
# if the thing is a top-level imported module..?
def get_vardoc(self, name):
"""
Return the C{VariableDoc} with the given name, or C{None} if this
index does not contain a C{VariableDoc} with the given name.
"""
var, val = self._get(name)
return var
def get_valdoc(self, name):
"""
Return the C{ValueDoc} with the given name, or C{None} if this
index does not contain a C{ValueDoc} with the given name.
"""
var, val = self._get(name)
return val
def _get(self, name):
"""
A helper function that's used to implement L{get_vardoc()}
and L{get_valdoc()}.
"""
# Convert name to a DottedName, if necessary.
if not isinstance(name, DottedName):
name = DottedName(name)
# Check if the result is cached.
val = self._get_cache.get(name)
if val is not None: return val
# Look for an element in the root set whose name is a prefix
# of `name`. If we can't find one, then return None.
for root_valdoc in self.root:
if root_valdoc.canonical_name.dominates(name):
# Starting at the root valdoc, walk down the variable/
# submodule chain until we find the requested item.
var_doc = None
val_doc = root_valdoc
for identifier in name[len(root_valdoc.canonical_name):]:
if val_doc is None: break
var_doc, val_doc = self._get_from(val_doc, identifier)
else:
# If we found it, then return.
if var_doc is not None or val_doc is not None:
self._get_cache[name] = (var_doc, val_doc)
return var_doc, val_doc
# We didn't find it.
self._get_cache[name] = (None, None)
return None, None
def _get_from(self, val_doc, identifier):
if isinstance(val_doc, NamespaceDoc):
child_var = val_doc.variables.get(identifier)
if child_var is not None:
child_val = child_var.value
if child_val is UNKNOWN: child_val = None
return child_var, child_val
# If that fails, then see if it's a submodule.
if (isinstance(val_doc, ModuleDoc) and
val_doc.submodules is not UNKNOWN):
for submodule in val_doc.submodules:
if submodule.canonical_name[-1] == identifier:
var_doc = None
val_doc = submodule
if val_doc is UNKNOWN: val_doc = None
return var_doc, val_doc
return None, None
def find(self, name, context):
"""
Look for an C{APIDoc} named C{name}, relative to C{context}.
Return the C{APIDoc} if one is found; otherwise, return
C{None}. C{find} looks in the following places, in order:
- Function parameters (if one matches, return C{None})
- All enclosing namespaces, from closest to furthest.
- If C{name} starts with C{'self'}, then strip it off and
look for the remaining part of the name using C{find}
- Builtins
- Parameter attributes
- Classes at module level (if the name is not ambiguous)
@type name: C{str} or L{DottedName}
@type context: L{APIDoc}
"""
if isinstance(name, basestring):
name = re.sub(r'\(.*\)$', '', name.strip())
if re.match('^([a-zA-Z_]\w*)(\.[a-zA-Z_]\w*)*$', name):
name = DottedName(name)
else:
return None
elif not isinstance(name, DottedName):
raise TypeError("'name' should be a string or DottedName")
if context is None or context.canonical_name is None:
container_name = []
else:
container_name = context.canonical_name
# Check for the name in all containing namespaces, starting
# with the closest one.
for i in range(len(container_name), -1, -1):
relative_name = container_name[:i]+name
# Is `name` the absolute name of a documented value?
# (excepting GenericValueDoc values.)
val_doc = self.get_valdoc(relative_name)
if (val_doc is not None and
not isinstance(val_doc, GenericValueDoc)):
return val_doc
# Is `name` the absolute name of a documented variable?
var_doc = self.get_vardoc(relative_name)
if var_doc is not None: return var_doc
# If the name begins with 'self', then try stripping that off
# and see if we can find the variable.
if name[0] == 'self':
doc = self.find('.'.join(name[1:]), context)
if doc is not None: return doc
# Is it the name of a builtin?
if len(name)==1 and hasattr(__builtin__, name[0]):
return None
# Is it a parameter's name or an attribute of a parameter?
if isinstance(context, RoutineDoc):
all_args = context.all_args()
if all_args is not UNKNOWN and name[0] in all_args:
return None
# Is this an object directly contained by any module?
doc = self.mlclasses.get(name[-1])
if isinstance(doc, APIDoc):
return doc
elif isinstance(doc, list):
log.warning("%s is an ambiguous name: it may be %s" % (
name[-1],
", ".join([ "'%s'" % d.canonical_name for d in doc ])))
# Drop this item so that the warning is reported only once.
# fail() will fail anyway.
del self.mlclasses[name[-1]]
def _get_module_classes(self, docs):
"""
Gather all the classes defined in a list of modules.
Very often people refers to classes only by class name,
even if they are not imported in the namespace. Linking
to such classes will fail if we look for them only in nested
namespaces. Allow them to retrieve only by name.
@param docs: containers of the objects to collect
@type docs: C{list} of C{APIDoc}
@return: mapping from objects name to the object(s) with that name
@rtype: C{dict} from C{str} to L{ClassDoc} or C{list}
"""
classes = {}
for doc in docs:
if not isinstance(doc, ModuleDoc):
continue
for var in doc.variables.values():
if not isinstance(var.value, ClassDoc):
continue
val = var.value
if val in (None, UNKNOWN) or val.defining_module is not doc:
continue
if val.canonical_name in (None, UNKNOWN):
continue
name = val.canonical_name[-1]
vals = classes.get(name)
if vals is None:
classes[name] = val
elif not isinstance(vals, list):
classes[name] = [ vals, val ]
else:
vals.append(val)
return classes
#////////////////////////////////////////////////////////////
# etc
#////////////////////////////////////////////////////////////
def reachable_valdocs(self, **filters):
"""
Return a list of all C{ValueDoc}s that can be reached,
directly or indirectly from this C{DocIndex}'s root set.
@param filters: A set of filters that can be used to prevent
C{reachable_valdocs} from following specific link types
when looking for C{ValueDoc}s that can be reached from the
root set. See C{APIDoc.apidoc_links} for a more complete
description.
"""
return reachable_valdocs(self.root, **filters)
def container(self, api_doc):
"""
Return the C{ValueDoc} that contains the given C{APIDoc}, or
C{None} if its container is not in the index.
"""
# Check if the result is cached.
val = self._container_cache.get(api_doc)
if val is not None: return val
if isinstance(api_doc, GenericValueDoc):
self._container_cache[api_doc] = None
return None # [xx] unknown.
if isinstance(api_doc, VariableDoc):
self._container_cache[api_doc] = api_doc.container
return api_doc.container
if len(api_doc.canonical_name) == 1:
self._container_cache[api_doc] = None
return None
elif isinstance(api_doc, ModuleDoc) and api_doc.package is not UNKNOWN:
self._container_cache[api_doc] = api_doc.package
return api_doc.package
else:
parent = self.get_valdoc(api_doc.canonical_name.container())
self._container_cache[api_doc] = parent
return parent
#////////////////////////////////////////////////////////////
# Profiling information
#////////////////////////////////////////////////////////////
def read_profiling_info(self, profile_stats):
"""
Initialize the L{callers} and L{callees} variables, given a
C{Stat} object from the C{pstats} module.
@warning: This method uses undocumented data structures inside
of C{profile_stats}.
"""
if self.callers is None: self.callers = {}
if self.callees is None: self.callees = {}
# The Stat object encodes functions using `funcid`s, or
# tuples of (filename, lineno, funcname). Create a mapping
# from these `funcid`s to `RoutineDoc`s.
self._update_funcid_to_doc(profile_stats)
for callee, (cc, nc, tt, ct, callers) in profile_stats.stats.items():
callee = self._funcid_to_doc.get(callee)
if callee is None: continue
for caller in callers:
caller = self._funcid_to_doc.get(caller)
if caller is None: continue
self.callers.setdefault(callee, []).append(caller)
self.callees.setdefault(caller, []).append(callee)
def _update_funcid_to_doc(self, profile_stats):
"""
Update the dictionary mapping from C{pstat.Stat} funciton ids to
C{RoutineDoc}s. C{pstat.Stat} function ids are tuples of
C{(filename, lineno, funcname)}.
"""
# Maps (filename, lineno, funcname) -> RoutineDoc
for val_doc in self.reachable_valdocs():
# We only care about routines.
if not isinstance(val_doc, RoutineDoc): continue
# Get the filename from the defining module.
module = val_doc.defining_module
if module is UNKNOWN or module.filename is UNKNOWN: continue
# Normalize the filename.
filename = os.path.abspath(module.filename)
try: filename = py_src_filename(filename)
except: pass
# Look up the stat_func_id
funcid = (filename, val_doc.lineno, val_doc.canonical_name[-1])
if funcid in profile_stats.stats:
self._funcid_to_doc[funcid] = val_doc
######################################################################
## Pretty Printing
######################################################################
def pp_apidoc(api_doc, doublespace=0, depth=5, exclude=(), include=(),
backpointers=None):
"""
@return: A multiline pretty-printed string representation for the
given C{APIDoc}.
@param doublespace: If true, then extra lines will be
inserted to make the output more readable.
@param depth: The maximum depth that pp_apidoc will descend
into descendent VarDocs. To put no limit on
depth, use C{depth=-1}.
@param exclude: A list of names of attributes whose values should
not be shown.
@param backpointers: For internal use.
"""
pyid = id(api_doc.__dict__)
if backpointers is None: backpointers = {}
if (hasattr(api_doc, 'canonical_name') and
api_doc.canonical_name not in (None, UNKNOWN)):
name = '%s for %s' % (api_doc.__class__.__name__,
api_doc.canonical_name)
elif getattr(api_doc, 'name', None) not in (UNKNOWN, None):
if (getattr(api_doc, 'container', None) not in (UNKNOWN, None) and
getattr(api_doc.container, 'canonical_name', None)
not in (UNKNOWN, None)):
name ='%s for %s' % (api_doc.__class__.__name__,
api_doc.container.canonical_name+
api_doc.name)
else:
name = '%s for %s' % (api_doc.__class__.__name__, api_doc.name)
else:
name = api_doc.__class__.__name__
if pyid in backpointers:
return '%s [%s] (defined above)' % (name, backpointers[pyid])
if depth == 0:
if hasattr(api_doc, 'name') and api_doc.name is not None:
return '%s...' % api_doc.name
else:
return '...'
backpointers[pyid] = len(backpointers)
s = '%s [%s]' % (name, backpointers[pyid])
# Only print non-empty fields:
fields = [field for field in api_doc.__dict__.keys()
if (field in include or
(getattr(api_doc, field) is not UNKNOWN
and field not in exclude))]
if include:
fields = [field for field in dir(api_doc)
if field in include]
else:
fields = [field for field in api_doc.__dict__.keys()
if (getattr(api_doc, field) is not UNKNOWN
and field not in exclude)]
fields.sort()
for field in fields:
fieldval = getattr(api_doc, field)
if doublespace: s += '\n |'
s += '\n +- %s' % field
if (isinstance(fieldval, types.ListType) and
len(fieldval)>0 and
isinstance(fieldval[0], APIDoc)):
s += _pp_list(api_doc, fieldval, doublespace, depth,
exclude, include, backpointers,
(field is fields[-1]))
elif (isinstance(fieldval, types.DictType) and
len(fieldval)>0 and
isinstance(fieldval.values()[0], APIDoc)):
s += _pp_dict(api_doc, fieldval, doublespace,
depth, exclude, include, backpointers,
(field is fields[-1]))
elif isinstance(fieldval, APIDoc):
s += _pp_apidoc(api_doc, fieldval, doublespace, depth,
exclude, include, backpointers,
(field is fields[-1]))
else:
s += ' = ' + _pp_val(api_doc, fieldval, doublespace,
depth, exclude, include, backpointers)
return s
def _pp_list(api_doc, items, doublespace, depth, exclude, include,
backpointers, is_last):
line1 = (is_last and ' ') or '|'
s = ''
for item in items:
line2 = ((item is items[-1]) and ' ') or '|'
joiner = '\n %s %s ' % (line1, line2)
if doublespace: s += '\n %s |' % line1
s += '\n %s +- ' % line1
valstr = _pp_val(api_doc, item, doublespace, depth, exclude, include,
backpointers)
s += joiner.join(valstr.split('\n'))
return s
def _pp_dict(api_doc, dict, doublespace, depth, exclude, include,
backpointers, is_last):
items = dict.items()
items.sort()
line1 = (is_last and ' ') or '|'
s = ''
for item in items:
line2 = ((item is items[-1]) and ' ') or '|'
joiner = '\n %s %s ' % (line1, line2)
if doublespace: s += '\n %s |' % line1
s += '\n %s +- ' % line1
valstr = _pp_val(api_doc, item[1], doublespace, depth, exclude,
include, backpointers)
s += joiner.join(('%s => %s' % (item[0], valstr)).split('\n'))
return s
def _pp_apidoc(api_doc, val, doublespace, depth, exclude, include,
backpointers, is_last):
line1 = (is_last and ' ') or '|'
s = ''
if doublespace: s += '\n %s | ' % line1
s += '\n %s +- ' % line1
joiner = '\n %s ' % line1
childstr = pp_apidoc(val, doublespace, depth-1, exclude,
include, backpointers)
return s + joiner.join(childstr.split('\n'))
def _pp_val(api_doc, val, doublespace, depth, exclude, include, backpointers):
from epydoc import markup
if isinstance(val, APIDoc):
return pp_apidoc(val, doublespace, depth-1, exclude,
include, backpointers)
elif isinstance(val, markup.ParsedDocstring):
valrepr = `val.to_plaintext(None)`
if len(valrepr) < 40: return valrepr
else: return valrepr[:37]+'...'
else:
valrepr = repr(val)
if len(valrepr) < 40: return valrepr
else: return valrepr[:37]+'...'
| apache-2.0 | 8,770,389,486,572,857,000 | 40.978665 | 82 | 0.587928 | false |
kabrapratik28/Stanford_courses | cs224n/assignment1/q4_sentiment.py | 1 | 8150 | #!/usr/bin/env python
import argparse
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import itertools
from utils.treebank import StanfordSentiment
import utils.glove as glove
from q3_sgd import load_saved_params, sgd
# We will use sklearn here because it will run faster than implementing
# ourselves. However, for other parts of this assignment you must implement
# the functions yourself!
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
def getArguments():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--pretrained", dest="pretrained", action="store_true",
help="Use pretrained GloVe vectors.")
group.add_argument("--yourvectors", dest="yourvectors", action="store_true",
help="Use your vectors from q3.")
return parser.parse_args()
def getSentenceFeatures(tokens, wordVectors, sentence):
"""
Obtain the sentence feature for sentiment analysis by averaging its
word vectors
"""
# Implement computation for the sentence features given a sentence.
# Inputs:
# tokens -- a dictionary that maps words to their indices in
# the word vector list
# wordVectors -- word vectors (each row) for all tokens
# sentence -- a list of words in the sentence of interest
# Output:
# - sentVector: feature vector for the sentence
sentVector = np.zeros((wordVectors.shape[1],))
### YOUR CODE HERE
for word in sentence:
index = tokens[word]
sentVector += wordVectors[index]
sentVector /= len(sentence)
### END YOUR CODE
assert sentVector.shape == (wordVectors.shape[1],)
return sentVector
def getRegularizationValues():
"""Try different regularizations
Return a sorted list of values to try.
"""
values = None # Assign a list of floats in the block below
### YOUR CODE HERE
values = [100, 10, 1, 0, 1e-1, 5e-1, 1e-2, 5e-2,
1e-3, 5e-3, 1e-4, 5e-4, 1e-5, 5e-5, 1e-6]
### END YOUR CODE
return sorted(values)
def chooseBestModel(results):
"""Choose the best model based on parameter tuning on the dev set
Arguments:
results -- A list of python dictionaries of the following format:
{
"reg": regularization,
"clf": classifier,
"train": trainAccuracy,
"dev": devAccuracy,
"test": testAccuracy
}
Returns:
Your chosen result dictionary.
"""
bestResult = None
### YOUR CODE HERE
currBestValue = -1.0
for each_result in results:
if each_result["dev"] > currBestValue:
currBestValue = each_result["dev"]
bestResult = each_result
### END YOUR CODE
return bestResult
def accuracy(y, yhat):
""" Precision for classifier """
assert(y.shape == yhat.shape)
return np.sum(y == yhat) * 100.0 / y.size
def plotRegVsAccuracy(regValues, results, filename):
""" Make a plot of regularization vs accuracy """
plt.plot(regValues, [x["train"] for x in results])
plt.plot(regValues, [x["dev"] for x in results])
plt.xscale('log')
plt.xlabel("regularization")
plt.ylabel("accuracy")
plt.legend(['train', 'dev'], loc='upper left')
plt.savefig(filename)
def outputConfusionMatrix(features, labels, clf, filename):
""" Generate a confusion matrix """
pred = clf.predict(features)
cm = confusion_matrix(labels, pred, labels=range(5))
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Reds)
plt.colorbar()
classes = ["- -", "-", "neut", "+", "+ +"]
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(filename)
def outputPredictions(dataset, features, labels, clf, filename):
""" Write the predictions to file """
pred = clf.predict(features)
with open(filename, "w") as f:
print >> f, "True\tPredicted\tText"
for i in xrange(len(dataset)):
print >> f, "%d\t%d\t%s" % (
labels[i], pred[i], " ".join(dataset[i][0]))
def main(args):
""" Train a model to do sentiment analyis"""
# Load the dataset
dataset = StanfordSentiment()
tokens = dataset.tokens()
nWords = len(tokens)
if args.yourvectors:
_, wordVectors, _ = load_saved_params()
wordVectors = np.concatenate(
(wordVectors[:nWords,:], wordVectors[nWords:,:]),
axis=1)
elif args.pretrained:
wordVectors = glove.loadWordVectors(tokens)
dimVectors = wordVectors.shape[1]
# Load the train set
trainset = dataset.getTrainSentences()
nTrain = len(trainset)
trainFeatures = np.zeros((nTrain, dimVectors))
trainLabels = np.zeros((nTrain,), dtype=np.int32)
for i in xrange(nTrain):
words, trainLabels[i] = trainset[i]
trainFeatures[i, :] = getSentenceFeatures(tokens, wordVectors, words)
# Prepare dev set features
devset = dataset.getDevSentences()
nDev = len(devset)
devFeatures = np.zeros((nDev, dimVectors))
devLabels = np.zeros((nDev,), dtype=np.int32)
for i in xrange(nDev):
words, devLabels[i] = devset[i]
devFeatures[i, :] = getSentenceFeatures(tokens, wordVectors, words)
# Prepare test set features
testset = dataset.getTestSentences()
nTest = len(testset)
testFeatures = np.zeros((nTest, dimVectors))
testLabels = np.zeros((nTest,), dtype=np.int32)
for i in xrange(nTest):
words, testLabels[i] = testset[i]
testFeatures[i, :] = getSentenceFeatures(tokens, wordVectors, words)
# We will save our results from each run
results = []
regValues = getRegularizationValues()
for reg in regValues:
print "Training for reg=%f" % reg
# Note: add a very small number to regularization to please the library
clf = LogisticRegression(C=1.0/(reg + 1e-12))
clf.fit(trainFeatures, trainLabels)
# Test on train set
pred = clf.predict(trainFeatures)
trainAccuracy = accuracy(trainLabels, pred)
print "Train accuracy (%%): %f" % trainAccuracy
# Test on dev set
pred = clf.predict(devFeatures)
devAccuracy = accuracy(devLabels, pred)
print "Dev accuracy (%%): %f" % devAccuracy
# Test on test set
# Note: always running on test is poor style. Typically, you should
# do this only after validation.
pred = clf.predict(testFeatures)
testAccuracy = accuracy(testLabels, pred)
print "Test accuracy (%%): %f" % testAccuracy
results.append({
"reg": reg,
"clf": clf,
"train": trainAccuracy,
"dev": devAccuracy,
"test": testAccuracy})
# Print the accuracies
print ""
print "=== Recap ==="
print "Reg\t\tTrain\tDev\tTest"
for result in results:
print "%.2E\t%.3f\t%.3f\t%.3f" % (
result["reg"],
result["train"],
result["dev"],
result["test"])
print ""
bestResult = chooseBestModel(results)
print "Best regularization value: %0.2E" % bestResult["reg"]
print "Test accuracy (%%): %f" % bestResult["test"]
# do some error analysis
if args.pretrained:
plotRegVsAccuracy(regValues, results, "q4_reg_v_acc.png")
outputConfusionMatrix(devFeatures, devLabels, bestResult["clf"],
"q4_dev_conf.png")
outputPredictions(devset, devFeatures, devLabels, bestResult["clf"],
"q4_dev_pred.txt")
if __name__ == "__main__":
main(getArguments())
| apache-2.0 | 4,027,457,613,679,352,000 | 30.835938 | 80 | 0.622577 | false |
xzturn/tensorflow | tensorflow/python/kernel_tests/proto/encode_proto_op_test_base.py | 26 | 8489 | # =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Table-driven test for encode_proto op.
It tests that encode_proto is a lossless inverse of decode_proto
(for the specified fields).
"""
# Python3 readiness boilerplate
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.kernel_tests.proto import proto_op_test_base as test_base
from tensorflow.python.kernel_tests.proto import test_example_pb2
from tensorflow.python.ops import array_ops
class EncodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase):
"""Base class for testing proto encoding ops."""
def __init__(self, decode_module, encode_module, methodName='runTest'): # pylint: disable=invalid-name
"""EncodeProtoOpTestBase initializer.
Args:
decode_module: a module containing the `decode_proto_op` method
encode_module: a module containing the `encode_proto_op` method
methodName: the name of the test method (same as for test.TestCase)
"""
super(EncodeProtoOpTestBase, self).__init__(methodName)
self._decode_module = decode_module
self._encode_module = encode_module
def testBadSizesShape(self):
if context.executing_eagerly():
expected_error = (errors.InvalidArgumentError,
r'Invalid shape for field double_value.')
else:
expected_error = (ValueError,
r'Shape must be at least rank 2 but is rank 0')
with self.assertRaisesRegexp(*expected_error):
self.evaluate(
self._encode_module.encode_proto(
sizes=1,
values=[np.double(1.0)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
def testBadInputs(self):
# Invalid field name
with self.assertRaisesOpError('Unknown field: non_existent_field'):
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1]],
values=[np.array([[0.0]], dtype=np.int32)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['non_existent_field']))
# Incorrect types.
with self.assertRaisesOpError('Incompatible type for field double_value.'):
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1]],
values=[np.array([[0.0]], dtype=np.int32)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
# Incorrect shapes of sizes.
for sizes_value in 1, np.array([[[0, 0]]]):
with self.assertRaisesOpError(
r'sizes should be batch_size \+ \[len\(field_names\)\]'):
if context.executing_eagerly():
self.evaluate(
self._encode_module.encode_proto(
sizes=sizes_value,
values=[np.array([[0.0]])],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
else:
with self.cached_session():
sizes = array_ops.placeholder(dtypes.int32)
values = array_ops.placeholder(dtypes.float64)
self._encode_module.encode_proto(
sizes=sizes,
values=[values],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']).eval(feed_dict={
sizes: sizes_value,
values: [[0.0]]
})
# Inconsistent shapes of values.
with self.assertRaisesOpError('Values must match up to the last dimension'):
if context.executing_eagerly():
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1, 1]],
values=[np.array([[0.0]]),
np.array([[0], [0]])],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value', 'int32_value']))
else:
with self.cached_session():
values1 = array_ops.placeholder(dtypes.float64)
values2 = array_ops.placeholder(dtypes.int32)
(self._encode_module.encode_proto(
sizes=[[1, 1]],
values=[values1, values2],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value', 'int32_value']).eval(feed_dict={
values1: [[0.0]],
values2: [[0], [0]]
}))
def _testRoundtrip(self, in_bufs, message_type, fields):
field_names = [f.name for f in fields]
out_types = [f.dtype for f in fields]
with self.cached_session() as sess:
sizes, field_tensors = self._decode_module.decode_proto(
in_bufs,
message_type=message_type,
field_names=field_names,
output_types=out_types)
out_tensors = self._encode_module.encode_proto(
sizes,
field_tensors,
message_type=message_type,
field_names=field_names)
out_bufs, = sess.run([out_tensors])
# Check that the re-encoded tensor has the same shape.
self.assertEqual(in_bufs.shape, out_bufs.shape)
# Compare the input and output.
for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
in_obj = test_example_pb2.TestValue()
in_obj.ParseFromString(in_buf)
out_obj = test_example_pb2.TestValue()
out_obj.ParseFromString(out_buf)
# Check that the deserialized objects are identical.
self.assertEqual(in_obj, out_obj)
# Check that the input and output serialized messages are identical.
# If we fail here, there is a difference in the serialized
# representation but the new serialization still parses. This could
# be harmless (a change in map ordering?) or it could be bad (e.g.
# loss of packing in the encoding).
self.assertEqual(in_buf, out_buf)
@parameterized.named_parameters(
*test_base.ProtoOpTestBase.named_parameters(extension=False))
def testRoundtrip(self, case):
in_bufs = [value.SerializeToString() for value in case.values]
# np.array silently truncates strings if you don't specify dtype=object.
in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
return self._testRoundtrip(
in_bufs, 'tensorflow.contrib.proto.TestValue', case.fields)
@parameterized.named_parameters(
*test_base.ProtoOpTestBase.named_parameters(extension=False))
def testRoundtripPacked(self, case):
# Now try with the packed serialization.
# We test the packed representations by loading the same test cases using
# PackedTestValue instead of TestValue. To do this we rely on the text
# format being the same for packed and unpacked fields, and reparse the test
# message using the packed version of the proto.
in_bufs = [
# Note: float_format='.17g' is necessary to ensure preservation of
# doubles and floats in text format.
text_format.Parse(
text_format.MessageToString(
value, float_format='.17g'),
test_example_pb2.PackedTestValue()).SerializeToString()
for value in case.values
]
# np.array silently truncates strings if you don't specify dtype=object.
in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
return self._testRoundtrip(
in_bufs, 'tensorflow.contrib.proto.PackedTestValue', case.fields)
| apache-2.0 | -645,645,493,953,483,900 | 39.8125 | 105 | 0.629756 | false |
lepricon49/headphones | lib/bs4/builder/__init__.py | 73 | 11234 | from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
whitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features."""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a document into a Beautiful Soup object tree."""
NAME = "[Unknown tree builder]"
ALTERNATE_NAMES = []
features = []
is_xml = False
picklable = False
preserve_whitespace_tags = set()
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
cdata_list_attributes = {}
def __init__(self):
self.soup = None
def reset(self):
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p />".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no contents.
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
be left alone.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
return markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
"""
return fragment
def set_up_substitutions(self, tag):
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""Replaces class="foo bar" with class=["foo", "bar"]
Modifies its input in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in attrs.keys():
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, basestring):
values = whitespace_re.split(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events."""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print "Start %s, %r" % (name, attrs)
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print "End %s" % name
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
preserve_whitespace_tags = set(['pre', 'textarea'])
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
cdata_list_attributes = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
def set_up_substitutions(self, tag):
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
# I'm fairly sure this is not the best way to do this.
this_module = sys.modules['bs4.builder']
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
pass
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last result.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
| gpl-3.0 | 1,191,731,705,983,103,200 | 33.67284 | 80 | 0.602368 | false |
fw1121/genomics | bcftbx/Pipeline.py | 1 | 28839 | #!/usr/bin/env python
#
# Pipeline.py: classes for running scripts iteratively
# Copyright (C) University of Manchester 2011 Peter Briggs
#
########################################################################
#
# Pipeline.py
#
#########################################################################
"""
Classes for running scripts iteratively over a collection of data files.
The essential classes are:
* Job: wrapper for setting up, submitting and monitoring running
scripts
* PipelineRunner: queue and run script multiple times on standard set
of inputs
* SolidPipelineRunner: subclass of PipelineRunner specifically for
running on SOLiD data (i.e. pairs of csfasta/qual files)
There are also some useful methods:
* GetSolidDataFiles: collect csfasta/qual file pairs from a specific
directory
* GetSolidPairedEndFile: collect csfasta/qual file pairs for paired
end data
* GetFastqFiles: collect fastq files from a specific directory
* GetFastqGzFiles: collect gzipped fastq files
The PipelineRunners depend on the JobRunner instances (created from
classes in the JobRunner module) to interface with the job management
system. So typical usage might look like:
>>> import JobRunner
>>> import Pipeline
>>> runner = JobRunner.GEJobRunner() # to use Grid Engine
>>> pipeline = Pipeline.PipelineRunner(runner)
>>> pipeline.queueJob(...)
>>> pipeline.run()
"""
#######################################################################
# Module metadata
#######################################################################
__version__ = "0.1.3"
#######################################################################
# Import modules that this module depends on
#######################################################################
import sys
import os
import re
import time
import Queue
import logging
#######################################################################
# Class definitions
#######################################################################
# Job: container for a script run
class Job:
"""Wrapper class for setting up, submitting and monitoring running scripts
Set up a job by creating a Job instance specifying the name, working directory,
script file to execute, and arguments to be supplied to the script.
The job is started by invoking the 'start' method; its status can be checked
with the 'isRunning' method, and terminated and restarted using the 'terminate'
and 'restart' methods respectively.
Information about the job can also be accessed via its properties. The following
properties record the original parameters supplied on instantiation:
name
working_dir
script
args
label
group_label
Additional information is set once the job has started or stopped running:
job_id The id number for the running job returned by the JobRunner
log The log file for the job (relative to working_dir)
start_time The start time (seconds since the epoch)
end_time The end time (seconds since the epoch)
The Job class uses a JobRunner instance (which supplies the necessary methods for
starting, stopping and monitoring) for low-level job interactions.
"""
def __init__(self,runner,name,dirn,script,args,label=None,group=None):
"""Create an instance of Job.
Arguments:
runner: a JobRunner instance supplying job control methods
name: name to give the running job
dirn: directory to run the script in
script: script file to submit, either a full path, relative path to dirn, or
must be on the user's PATH in the environment where jobs are executed
args: Python list with the arguments to supply to the script when it is
submitted
label: (optional) arbitrary string to use as an identifier for the job
group: (optional) arbitrary string to use as a 'group' identifier;
assign the same 'group' label to multiple jobs to indicate they're
related
"""
self.name = name
self.working_dir = dirn
self.script = script
self.args = args
self.label = label
self.group_label = group
self.job_id = None
self.log = None
self.submitted = False
self.failed = False
self.terminated = False
self.start_time = None
self.end_time = None
self.home_dir = os.getcwd()
self.__finished = False
self.__runner = runner
# Time interval to use when checking for job start (seconds)
# Can be floating point number e.g. 0.1 (= 100ms)
self.__poll_interval = 1
# Time out period to use before giving up on job submission
# (seconds)
self.__timeout = 3600
def start(self):
"""Start the job running
Returns:
Id for job
"""
if not self.submitted and not self.__finished:
self.job_id = self.__runner.run(self.name,self.working_dir,self.script,self.args)
self.submitted = True
self.start_time = time.time()
if self.job_id is None:
# Failed to submit correctly
logging.warning("Job submission failed")
self.failed = True
self.__finished = True
self.end_time = self.start_time
return self.job_id
self.submitted = True
self.start_time = time.time()
self.log = self.__runner.logFile(self.job_id)
# Wait for evidence that the job has started
logging.debug("Waiting for job to start")
time_waiting = 0
while not self.__runner.isRunning(self.job_id) and not os.path.exists(self.log):
time.sleep(self.__poll_interval)
time_waiting += self.__poll_interval
if time_waiting > self.__timeout:
# Waited too long for job to start, give up
logging.error("Timed out waiting for job to start")
self.failed = True
self.__finished = True
self.end_time = self.start_time
return self.job_id
logging.debug("Job %s started (%s)" % (self.job_id,
time.asctime(time.localtime(self.start_time))))
# Also report queue (for GE jobs only)
try:
logging.debug("Queue: %s" % self.__runner.queue(self.job_id))
except AttributeError:
pass
return self.job_id
def terminate(self):
"""Terminate a running job
"""
if self.isRunning():
self.__runner.terminate(self.job_id)
self.terminated = True
self.end_time = time.time()
def restart(self):
"""Restart the job
Terminates the job (if still running) and restarts"""
# Terminate running job
if self.isRunning():
self.terminate()
while self.isRunning():
time.sleep(self.__poll_interval)
# Reset flags
self.__finished = False
self.submitted = False
self.terminated = False
self.start_time = None
self.end_time = None
# Resubmit
return self.start()
def isRunning(self):
"""Check if job is still running
"""
if not self.submitted:
return False
if not self.__finished:
if not self.__runner.isRunning(self.job_id):
self.end_time = time.time()
self.__finished = True
return not self.__finished
def errorState(self):
"""Check if the job is in an error state
"""
return self.__runner.errorState(self.job_id)
def status(self):
"""Return descriptive string indicating job status
"""
if self.__finished:
if self.terminated:
return "Terminated"
else:
return "Finished"
elif self.submitted:
if self.terminated:
return "Running pending termination"
else:
return "Running"
else:
return "Waiting"
def wait(self):
"""Wait for job to complete
Block calling process until the job has finished running.
"""
while self.isRunning():
time.sleep(1)
return
@property
def runner(self):
"""Return the JobRunner instance associated with the Job
"""
return self.__runner
# PipelineRunner: class to set up and run multiple jobs
class PipelineRunner:
"""Class to run and manage multiple concurrent jobs.
PipelineRunner enables multiple jobs to be queued via the 'queueJob' method. The
pipeline is then started using the 'run' method - this starts each job up to a
a specified maximum of concurrent jobs, and then monitors their progress. As jobs
finish, pending jobs are started until all jobs have completed.
Example usage:
>>> p = PipelineRunner()
>>> p.queueJob('/home/foo','foo.sh','bar.in')
... Queue more jobs ...
>>> p.run()
By default the pipeline runs in 'blocking' mode, i.e. 'run' doesn't return until all
jobs have been submitted and have completed; see the 'run' method for details of
how to operate the pipeline in non-blocking mode.
The invoking subprogram can also specify functions that will be called when a job
completes ('jobCompletionHandler'), and when a group completes
('groupCompletionHandler'). These can perform any specific actions that are required
such as sending notification email, setting file ownerships and permissions etc.
"""
def __init__(self,runner,max_concurrent_jobs=4,poll_interval=30,jobCompletionHandler=None,
groupCompletionHandler=None):
"""Create new PipelineRunner instance.
Arguments:
runner: a JobRunner instance
max_concurrent_jobs: maximum number of jobs that the script will allow to run
at one time (default = 4)
poll_interval: time interval (in seconds) between checks on the queue status
(only used when pipeline is run in 'blocking' mode)
"""
# Parameters
self.__runner = runner
self.max_concurrent_jobs = max_concurrent_jobs
self.poll_interval = poll_interval
# Groups
self.groups = []
self.njobs_in_group = {}
# Queue of jobs to run
self.jobs = Queue.Queue()
# Subset that are currently running
self.running = []
# Subset that have completed
self.completed = []
# Callback functions
self.handle_job_completion = jobCompletionHandler
self.handle_group_completion = groupCompletionHandler
def queueJob(self,working_dir,script,script_args,label=None,group=None):
"""Add a job to the pipeline.
The job will be queued and executed once the pipeline's 'run' method has been
executed.
Arguments:
working_dir: directory to run the job in
script: script file to run
script_args: arguments to be supplied to the script at run time
label: (optional) arbitrary string to use as an identifier in the job name
group: (optional) arbitrary string to use as a 'group' identifier;
assign the same 'group' label to multiple jobs to indicate they're
related
"""
job_name = os.path.splitext(os.path.basename(script))[0]+'.'+str(label)
if group:
if group not in self.groups:
# New group label
self.groups.append(group)
self.njobs_in_group[group] = 1
else:
self.njobs_in_group[group] += 1
self.jobs.put(Job(self.__runner,job_name,working_dir,script,script_args,
label,group))
logging.debug("Added job: now %d jobs in pipeline" % self.jobs.qsize())
def nWaiting(self):
"""Return the number of jobs still waiting to be started
"""
return self.jobs.qsize()
def nRunning(self):
"""Return the number of jobs currently running
"""
return len(self.running)
def nCompleted(self):
"""Return the number of jobs that have completed
"""
return len(self.completed)
def isRunning(self):
"""Check whether the pipeline is still running
Returns True if the pipeline is still running (i.e. has either
running jobs, waiting jobs or both) and False otherwise.
"""
# First update the pipeline status
self.update()
# Return the status
return (self.nWaiting() > 0 or self.nRunning() > 0)
def run(self,blocking=True):
"""Execute the jobs in the pipeline
Each job previously added to the pipeline by 'queueJob' will be
started and checked periodically for termination.
By default 'run' operates in 'blocking' mode, so it doesn't return
until all jobs have been submitted and have finished executing.
To run in non-blocking mode, set the 'blocking' argument to False.
In this mode the pipeline starts and returns immediately; it is
the responsibility of the calling subprogram to then periodically
check the status of the pipeline, e.g.
>>> p = PipelineRunner()
>>> p.queueJob('/home/foo','foo.sh','bar.in')
>>> p.run()
>>> while p.isRunning():
>>> time.sleep(30)
"""
logging.debug("PipelineRunner: started")
logging.debug("Blocking mode : %s" % blocking)
# Report set up
print "Initially %d jobs waiting, %d running, %d finished" % \
(self.nWaiting(),self.nRunning(),self.nCompleted())
# Initial update sets the jobs running
self.update()
if blocking:
while self.isRunning():
# Pipeline is still executing so wait
time.sleep(self.poll_interval)
# Pipeline has finished
print "Pipeline completed"
def update(self):
"""Update the pipeline
The 'update' method checks and updates the status of running jobs,
and submits any waiting jobs if space is available.
"""
# Flag to report updated status
updated_status = False
# Look for running jobs that have completed
for job in self.running[::-1]:
if not job.isRunning():
# Job has completed
self.running.remove(job)
self.completed.append(job)
updated_status = True
print "Job has completed: %s: %s %s (%s)" % (
job.job_id,
job.name,
os.path.basename(job.working_dir),
time.asctime(time.localtime(job.end_time)))
# Invoke callback on job completion
if self.handle_job_completion:
self.handle_job_completion(job)
# Check for completed group
if job.group_label is not None:
jobs_in_group = []
for check_job in self.completed:
if check_job.group_label == job.group_label:
jobs_in_group.append(check_job)
if self.njobs_in_group[job.group_label] == len(jobs_in_group):
# All jobs in group have completed
print "Group '%s' has completed" % job.group_label
# Invoke callback on group completion
if self.handle_group_completion:
self.handle_group_completion(job.group_label,jobs_in_group)
else:
# Job is running, check it's not in an error state
if job.errorState():
# Terminate jobs in error state
logging.warning("Terminating job %s in error state" % job.job_id)
job.terminate()
# Submit new jobs to GE queue
while not self.jobs.empty() and self.nRunning() < self.max_concurrent_jobs:
next_job = self.jobs.get()
next_job.start()
self.running.append(next_job)
updated_status = True
print "Job has started: %s: %s %s (%s)" % (
next_job.job_id,
next_job.name,
os.path.basename(next_job.working_dir),
time.asctime(time.localtime(next_job.start_time)))
if self.jobs.empty():
logging.debug("PipelineRunner: all jobs now submitted")
# Report
if updated_status:
print "Currently %d jobs waiting, %d running, %d finished" % \
(self.nWaiting(),self.nRunning(),self.nCompleted())
def report(self):
"""Return a report of the pipeline status
"""
# Pipeline status
if self.nRunning() > 0:
status = "RUNNING"
elif self.nWaiting() > 0:
status = "WAITING"
else:
status = "COMPLETED"
report = "Pipeline status at %s: %s\n\n" % (time.asctime(),status)
# Report directories
dirs = []
for job in self.completed:
if job.working_dir not in dirs:
dirs.append(job.working_dir)
for dirn in dirs:
report += "\t%s\n" % dirn
# Report jobs waiting
if self.nWaiting() > 0:
report += "\n%d jobs waiting to run\n" % self.nWaiting()
# Report jobs running
if self.nRunning() > 0:
report += "\n%d jobs running:\n" % self.nRunning()
for job in self.running:
report += "\t%s\t%s\t%s\n" % (job.label,job.log,job.working_dir)
# Report completed jobs
if self.nCompleted() > 0:
report += "\n%d jobs completed:\n" % self.nCompleted()
for job in self.completed:
report += "\t%s\t%s\t%s\t%.1fs\t[%s]\n" % (job.label,
job.log,
job.working_dir,
(job.end_time - job.start_time),
job.status())
return report
def __del__(self):
"""Deal with deletion of the pipeline
If the pipeline object is deleted while still running
then terminate all running jobs.
"""
# Empty the queue
while not self.jobs.empty():
self.jobs.get()
# Terminate the running jobs
for job in self.running:
logging.debug("Terminating job %s" % job.job_id)
print "Terminating job %s" % job.job_id
try:
job.terminate()
except Exception, ex:
logging.error("Failed to terminate job %s: %s" % (job.job_id,ex))
class SolidPipelineRunner(PipelineRunner):
"""Class to run and manage multiple jobs for Solid data pipelines
Subclass of PipelineRunner specifically for dealing with scripts
that take Solid data (i.e. csfasta/qual file pairs).
Defines the addDir method in addition to all methods already defined
in the base class; use this method one or more times to specify
directories with data to run the script on. The SOLiD data file pairs
in each specified directory will be located automatically.
For example:
solid_pipeline = SolidPipelineRunner('qc.sh')
solid_pipeline.addDir('/path/to/datadir')
solid_pipeline.run()
"""
def __init__(self,runner,script,max_concurrent_jobs=4,poll_interval=30):
PipelineRunner.__init__(self,runner)
self.script = script
def addDir(self,dirn):
logging.debug("Add dir: %s" % dirn)
run_data = GetSolidDataFiles(dirn)
for data in run_data:
self.queueJob(dirn,self.script,data)
#######################################################################
# Module Functions
#######################################################################
def GetSolidDataFiles(dirn,pattern=None,file_list=None):
"""Return list of csfasta/qual file pairs in target directory
Note that files with names ending in '_T_F3' will be rejected
as these are assumed to come from the preprocess filtering stage.
Optionally also specify a regular expression pattern that file
names must also match in order to be included.
Arguments:
dirn: name/path of directory to look for files in
pattern: optional, regular expression pattern to filter names with
file_list: optional, a list of file names to use instead of
fetching a list of files from the specified directory
Returns:
List of tuples consisting of two csfasta-qual file pairs (F3 and F5).
"""
data_files = []
if file_list is not None:
# Used supplied file list
logging.debug("Ignoring dirn argument and using supplied file list")
all_files = file_list
else:
# Check directory exists
if not os.path.isdir(dirn):
logging.error("'%s' not a directory: unable to collect SOLiD files" % dirn)
return []
# Gather data files
logging.debug("Collecting csfasta/qual file pairs in %s" % dirn)
all_files = os.listdir(dirn)
all_files.sort()
# Regular expression pattern
if pattern is not None:
regex = re.compile(pattern)
# Look for csfasta and matching qual files
for filen in all_files:
logging.debug("Examining file %s" % filen)
root = os.path.splitext(filen)[0]
ext = os.path.splitext(filen)[1]
if ext == ".qual":
qual = filen
# Reject names ending with "_T_F3"
try:
i = root.rindex('_T_F3')
logging.debug("Rejecting %s" % qual)
continue
except ValueError:
# Name is okay, ignore
pass
# Match csfasta names which don't have "_QV" in them
try:
i = root.rindex('_QV')
csfasta = root[:i]+root[i+3:]+".csfasta"
except ValueError:
# QV not in name, try to match whole name
csfasta = root+".csfasta"
if csfasta in all_files:
# If a regex pattern is specified then also filter on it
if pattern is None or regex.search(csfasta):
data_files.append((csfasta,qual))
else:
logging.critical("Unable to get csfasta for %s" % filen)
# Done - return file pairs
return data_files
def GetSolidPairedEndFiles(dirn,pattern=None,file_list=None):
"""Return list of csfasta/qual file pairs for paired end data
Optionally also specify a regular expression pattern that file
names must also match in order to be included.
Arguments:
dirn: name/path of directory to look for files in
pattern: optional, regular expression pattern to filter names with
file_list: optional, a list of file names to use instead of
fetching a list of files from the specified directory
Returns:
List of csfasta-qual pair tuples.
"""
# Get list of pairs
file_pairs = GetSolidDataFiles(dirn,pattern=pattern,file_list=file_list)
if not file_pairs:
return []
# Now match pairs of pairs: files with the same name except for
# 'F3' and 'F5' or 'F5-BC'
logging.debug("Matching F3 csfasta/qual file pairs with F5 counterparts")
key_list = []
matched_files = dict()
for pair in file_pairs:
# Remove _F3, _F5 and _F5-BC components from csfasta to
# use as a key
key = pair[0].replace('_F3','').replace('_F5-BC','').replace('_F5','')
logging.debug("Key: %s for %s" % (key,pair))
if key in key_list:
# Already has an entry
matched_files[key].append(pair)
else:
# New key
key_list.append(key)
matched_files[key] = [pair]
# Check pairs of pairs
data_files = []
for key in key_list:
if len(matched_files[key]) != 2:
logging.debug("discarding pairs: %s" % matched_files[key])
else:
# Look for F3 and F5s and put into order
try:
matched_files[key][0][0].index('F5')
# F5 pair are first set
f3_index = 1
f5_index = 0
except ValueError:
# F3 pair are first set
f3_index = 0
f5_index = 1
# Pull out files and append to list in the
# correct order (F3 then F5)
csfasta_f3 = matched_files[key][f3_index][0]
qual_f3 = matched_files[key][f3_index][1]
csfasta_f5 = matched_files[key][f5_index][0]
qual_f5 = matched_files[key][f5_index][1]
data_files.append((csfasta_f3,qual_f3,csfasta_f5,qual_f5))
# Done - return file list
return data_files
def GetFastqFiles(dirn,pattern=None,file_list=None):
"""Return list of fastq files in target directory
Optionally also specify a regular expression pattern that file
names must also match in order to be included.
Arguments:
dirn: name/path of directory to look for files in
pattern: optional, regular expression pattern to filter names with
file_list: optional, a list of file names to use instead of
fetching a list of files from the specified directory
Returns:
List of file-pair tuples.
"""
data_files = []
if file_list is not None:
# Used supplied file list
logging.debug("Ignoring dirn argument and using supplied file list")
all_files = file_list
else:
# Check directory exists
if not os.path.isdir(dirn):
logging.error("'%s' not a directory: unable to collect fastq files" % dirn)
return []
# Gather data files
logging.debug("Collecting fastq files in %s" % dirn)
all_files = os.listdir(dirn)
all_files.sort()
# Regular expression pattern
if pattern is not None:
regex = re.compile(pattern)
# Look for csfasta and matching qual files
for filen in all_files:
logging.debug("Examining file %s" % filen)
root = os.path.splitext(filen)[0]
ext = os.path.splitext(filen)[1]
if ext == ".fastq":
# If a regex pattern is specified then also filter on it
if pattern is None or regex.search(root):
data_files.append((filen,))
# Done - return file list
return data_files
def GetFastqGzFiles(dirn,pattern=None,file_list=None):
"""Return list of fastq.gz files in target directory
Optionally also specify a regular expression pattern that file
names must also match in order to be included.
Arguments:
dirn: name/path of directory to look for files in
pattern: optional, regular expression pattern to filter names with
file_list: optional, a list of file names to use instead of
fetching a list of files from the specified directory
Returns:
List of file-pair tuples.
"""
data_files = []
if file_list is not None:
# Used supplied file list
logging.debug("Ignoring dirn argument and using supplied file list")
all_files = file_list
else:
# Check directory exists
if not os.path.isdir(dirn):
logging.error("'%s' not a directory: unable to collect fastq.gz files" % dirn)
return []
# Gather data files
logging.debug("Collecting fastq.gz files in %s" % dirn)
all_files = os.listdir(dirn)
all_files.sort()
# Regular expression pattern
if pattern is not None:
regex = re.compile(pattern)
# Look for .fastq.gz
for filen in all_files:
logging.debug("Examining file %s" % filen)
if filen.split('.')[-1] == "gz":
# Ends with gz
try:
if filen.split('.')[-2] == "fastq":
# If a regex pattern is specified then also filter on it
if pattern is None or regex.search(filen.split('.')[-3]):
data_files.append((filen,))
except IndexError:
# Ignore
pass
# Done - return file list
return data_files
| artistic-2.0 | -8,238,330,618,129,812,000 | 36.846457 | 94 | 0.576303 | false |
buntyke/Flask | microblog/flask/lib/python2.7/site-packages/wtforms/meta.py | 79 | 3822 | from wtforms.utils import WebobInputWrapper
from wtforms import i18n
class DefaultMeta(object):
"""
This is the default Meta class which defines all the default values and
therefore also the 'API' of the class Meta interface.
"""
# -- Basic form primitives
def bind_field(self, form, unbound_field, options):
"""
bind_field allows potential customization of how fields are bound.
The default implementation simply passes the options to
:meth:`UnboundField.bind`.
:param form: The form.
:param unbound_field: The unbound field.
:param options:
A dictionary of options which are typically passed to the field.
:return: A bound field
"""
return unbound_field.bind(form=form, **options)
def wrap_formdata(self, form, formdata):
"""
wrap_formdata allows doing custom wrappers of WTForms formdata.
The default implementation detects webob-style multidicts and wraps
them, otherwise passes formdata back un-changed.
:param form: The form.
:param formdata: Form data.
:return: A form-input wrapper compatible with WTForms.
"""
if formdata is not None and not hasattr(formdata, 'getlist'):
if hasattr(formdata, 'getall'):
return WebobInputWrapper(formdata)
else:
raise TypeError("formdata should be a multidict-type wrapper that supports the 'getlist' method")
return formdata
def render_field(self, field, render_kw):
"""
render_field allows customization of how widget rendering is done.
The default implementation calls ``field.widget(field, **render_kw)``
"""
other_kw = getattr(field, 'render_kw', None)
if other_kw is not None:
render_kw = dict(other_kw, **render_kw)
return field.widget(field, **render_kw)
# -- CSRF
csrf = False
csrf_field_name = 'csrf_token'
csrf_secret = None
csrf_context = None
csrf_class = None
def build_csrf(self, form):
"""
Build a CSRF implementation. This is called once per form instance.
The default implementation builds the class referenced to by
:attr:`csrf_class` with zero arguments. If `csrf_class` is ``None``,
will instead use the default implementation
:class:`wtforms.csrf.session.SessionCSRF`.
:param form: The form.
:return: A CSRF implementation.
"""
if self.csrf_class is not None:
return self.csrf_class()
from wtforms.csrf.session import SessionCSRF
return SessionCSRF()
# -- i18n
locales = False
cache_translations = True
translations_cache = {}
def get_translations(self, form):
"""
Override in subclasses to provide alternate translations factory.
See the i18n documentation for more.
:param form: The form.
:return: An object that provides gettext() and ngettext() methods.
"""
locales = self.locales
if locales is False:
return None
if self.cache_translations:
# Make locales be a hashable value
locales = tuple(locales) if locales else None
translations = self.translations_cache.get(locales)
if translations is None:
translations = self.translations_cache[locales] = i18n.get_translations(locales)
return translations
return i18n.get_translations(locales)
# -- General
def update_values(self, values):
"""
Given a dictionary of values, update values on this `Meta` instance.
"""
for key, value in values.items():
setattr(self, key, value)
| mit | -1,531,494,448,635,667,500 | 30.586777 | 113 | 0.621664 | false |
abhishekgahlot/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause | 5,239,517,398,036,559,000 | 28.02381 | 77 | 0.648072 | false |
2013Commons/HUE-SHARK | desktop/core/ext-py/Django-1.2.3/django/core/serializers/json.py | 107 | 2053 | """
Serialize data to/from JSON
"""
import datetime
import decimal
from StringIO import StringIO
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import datetime_safe
from django.utils import simplejson
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def end_serialization(self):
self.options.pop('stream', None)
self.options.pop('fields', None)
self.options.pop('use_natural_keys', None)
simplejson.dump(self.objects, self.stream, cls=DjangoJSONEncoder, **self.options)
def getvalue(self):
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(simplejson.load(stream), **options):
yield obj
class DjangoJSONEncoder(simplejson.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def default(self, o):
if isinstance(o, datetime.datetime):
d = datetime_safe.new_datetime(o)
return d.strftime("%s %s" % (self.DATE_FORMAT, self.TIME_FORMAT))
elif isinstance(o, datetime.date):
d = datetime_safe.new_date(o)
return d.strftime(self.DATE_FORMAT)
elif isinstance(o, datetime.time):
return o.strftime(self.TIME_FORMAT)
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
| apache-2.0 | 8,950,096,234,089,235,000 | 30.584615 | 89 | 0.669264 | false |
moyogo/tachyfont | run_time/src/gae_server/third_party/old-fonttools-master/Lib/fontTools/ttLib/tables/E_B_L_C_.py | 11 | 25091 | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from . import DefaultTable
from fontTools.misc.textTools import safeEval
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
import struct
import itertools
from collections import deque
eblcHeaderFormat = """
> # big endian
version: 16.16F
numSizes: I
"""
# The table format string is split to handle sbitLineMetrics simply.
bitmapSizeTableFormatPart1 = """
> # big endian
indexSubTableArrayOffset: I
indexTablesSize: I
numberOfIndexSubTables: I
colorRef: I
"""
# The compound type for hori and vert.
sbitLineMetricsFormat = """
> # big endian
ascender: b
descender: b
widthMax: B
caretSlopeNumerator: b
caretSlopeDenominator: b
caretOffset: b
minOriginSB: b
minAdvanceSB: b
maxBeforeBL: b
minAfterBL: b
pad1: b
pad2: b
"""
# hori and vert go between the two parts.
bitmapSizeTableFormatPart2 = """
> # big endian
startGlyphIndex: H
endGlyphIndex: H
ppemX: B
ppemY: B
bitDepth: B
flags: b
"""
indexSubTableArrayFormat = ">HHL"
indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat)
indexSubHeaderFormat = ">HHL"
indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
codeOffsetPairFormat = ">HH"
codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
class table_E_B_L_C_(DefaultTable.DefaultTable):
dependencies = ['EBDT']
# This method can be overridden in subclasses to support new formats
# without changing the other implementation. Also can be used as a
# convenience method for coverting a font file to an alternative format.
def getIndexFormatClass(self, indexFormat):
return eblc_sub_table_classes[indexFormat]
def decompile(self, data, ttFont):
# Save the original data because offsets are from the start of the table.
origData = data
dummy, data = sstruct.unpack2(eblcHeaderFormat, data, self)
self.strikes = []
for curStrikeIndex in range(self.numSizes):
curStrike = Strike()
self.strikes.append(curStrike)
curTable = curStrike.bitmapSizeTable
dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart1, data, curTable)
for metric in ('hori', 'vert'):
metricObj = SbitLineMetrics()
vars(curTable)[metric] = metricObj
dummy, data = sstruct.unpack2(sbitLineMetricsFormat, data, metricObj)
dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart2, data, curTable)
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
for subtableIndex in range(curTable.numberOfIndexSubTables):
lowerBound = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize
upperBound = lowerBound + indexSubTableArraySize
data = origData[lowerBound:upperBound]
tup = struct.unpack(indexSubTableArrayFormat, data)
(firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
offsetToIndexSubTable = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
data = origData[offsetToIndexSubTable:]
tup = struct.unpack(indexSubHeaderFormat, data[:indexSubHeaderSize])
(indexFormat, imageFormat, imageDataOffset) = tup
indexFormatClass = self.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(data[indexSubHeaderSize:], ttFont)
indexSubTable.firstGlyphIndex = firstGlyphIndex
indexSubTable.lastGlyphIndex = lastGlyphIndex
indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable
indexSubTable.indexFormat = indexFormat
indexSubTable.imageFormat = imageFormat
indexSubTable.imageDataOffset = imageDataOffset
curStrike.indexSubTables.append(indexSubTable)
def compile(self, ttFont):
dataList = []
self.numSizes = len(self.strikes)
dataList.append(sstruct.pack(eblcHeaderFormat, self))
# Data size of the header + bitmapSizeTable needs to be calculated
# in order to form offsets. This value will hold the size of the data
# in dataList after all the data is consolidated in dataList.
dataSize = len(dataList[0])
# The table will be structured in the following order:
# (0) header
# (1) Each bitmapSizeTable [1 ... self.numSizes]
# (2) Alternate between indexSubTableArray and indexSubTable
# for each bitmapSizeTable present.
#
# The issue is maintaining the proper offsets when table information
# gets moved around. All offsets and size information must be recalculated
# when building the table to allow editing within ttLib and also allow easy
# import/export to and from XML. All of this offset information is lost
# when exporting to XML so everything must be calculated fresh so importing
# from XML will work cleanly. Only byte offset and size information is
# calculated fresh. Count information like numberOfIndexSubTables is
# checked through assertions. If the information in this table was not
# touched or was changed properly then these types of values should match.
#
# The table will be rebuilt the following way:
# (0) Precompute the size of all the bitmapSizeTables. This is needed to
# compute the offsets properly.
# (1) For each bitmapSizeTable compute the indexSubTable and
# indexSubTableArray pair. The indexSubTable must be computed first
# so that the offset information in indexSubTableArray can be
# calculated. Update the data size after each pairing.
# (2) Build each bitmapSizeTable.
# (3) Consolidate all the data into the main dataList in the correct order.
for curStrike in self.strikes:
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat)
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
indexSubTablePairDataList = []
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
curTable.indexSubTableArrayOffset = dataSize
# Precompute the size of the indexSubTableArray. This information
# is important for correctly calculating the new value for
# additionalOffsetToIndexSubtable.
sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize
lowerBound = dataSize
dataSize += sizeOfSubTableArray
upperBound = dataSize
indexSubTableDataList = []
for indexSubTable in curStrike.indexSubTables:
indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset
glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
indexSubTable.firstGlyphIndex = min(glyphIds)
indexSubTable.lastGlyphIndex = max(glyphIds)
data = indexSubTable.compile(ttFont)
indexSubTableDataList.append(data)
dataSize += len(data)
curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables)
curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables)
for i in curStrike.indexSubTables:
data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable)
indexSubTablePairDataList.append(data)
indexSubTablePairDataList.extend(indexSubTableDataList)
curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
dataList.append(data)
for metric in ('hori', 'vert'):
metricObj = vars(curTable)[metric]
data = sstruct.pack(sbitLineMetricsFormat, metricObj)
dataList.append(data)
data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
dataList.append(data)
dataList.extend(indexSubTablePairDataList)
return bytesjoin(dataList)
def toXML(self, writer, ttFont):
writer.simpletag('header', [('version', self.version)])
writer.newline()
for curIndex, curStrike in enumerate(self.strikes):
curStrike.toXML(curIndex, writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == 'header':
self.version = safeEval(attrs['version'])
elif name == 'strike':
if not hasattr(self, 'strikes'):
self.strikes = []
strikeIndex = safeEval(attrs['index'])
curStrike = Strike()
curStrike.fromXML(name, attrs, content, ttFont, self)
# Grow the strike array to the appropriate size. The XML format
# allows for the strike index value to be out of order.
if strikeIndex >= len(self.strikes):
self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
self.strikes[strikeIndex] = curStrike
class Strike(object):
def __init__(self):
self.bitmapSizeTable = BitmapSizeTable()
self.indexSubTables = []
def toXML(self, strikeIndex, writer, ttFont):
writer.begintag('strike', [('index', strikeIndex)])
writer.newline()
self.bitmapSizeTable.toXML(writer, ttFont)
writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.')
writer.newline()
for indexSubTable in self.indexSubTables:
indexSubTable.toXML(writer, ttFont)
writer.endtag('strike')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, locator):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'bitmapSizeTable':
self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
elif name.startswith(_indexSubTableSubclassPrefix):
indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):])
indexFormatClass = locator.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(None, None)
indexSubTable.indexFormat = indexFormat
indexSubTable.fromXML(name, attrs, content, ttFont)
self.indexSubTables.append(indexSubTable)
class BitmapSizeTable(object):
# Returns all the simple metric names that bitmap size table
# cares about in terms of XML creation.
def _getXMLMetricNames(self):
dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1]
# Skip the first 3 data names because they are byte offsets and counts.
return dataNames[3:]
def toXML(self, writer, ttFont):
writer.begintag('bitmapSizeTable')
writer.newline()
for metric in ('hori', 'vert'):
getattr(self, metric).toXML(metric, writer, ttFont)
for metricName in self._getXMLMetricNames():
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('bitmapSizeTable')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
# Create a lookup for all the simple names that make sense to
# bitmap size table. Only read the information from these names.
dataNames = set(self._getXMLMetricNames())
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'sbitLineMetrics':
direction = attrs['direction']
assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid."
metricObj = SbitLineMetrics()
metricObj.fromXML(name, attrs, content, ttFont)
vars(self)[direction] = metricObj
elif name in dataNames:
vars(self)[name] = safeEval(attrs['value'])
else:
print("Warning: unknown name '%s' being ignored in BitmapSizeTable." % name)
class SbitLineMetrics(object):
def toXML(self, name, writer, ttFont):
writer.begintag('sbitLineMetrics', [('direction', name)])
writer.newline()
for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('sbitLineMetrics')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name in metricNames:
vars(self)[name] = safeEval(attrs['value'])
# Important information about the naming scheme. Used for identifying subtables.
_indexSubTableSubclassPrefix = 'eblc_index_sub_table_'
class EblcIndexSubTable(object):
def __init__(self, data, ttFont):
self.data = data
self.ttFont = ttFont
# TODO Currently non-lazy decompiling doesn't work for this class...
#if not ttFont.lazy:
# self.decompile()
# del self.data, self.ttFont
def __getattr__(self, attr):
# Allow lazy decompile.
if attr[:2] == '__':
raise AttributeError(attr)
if not hasattr(self, "data"):
raise AttributeError(attr)
self.decompile()
del self.data, self.ttFont
return getattr(self, attr)
# This method just takes care of the indexSubHeader. Implementing subclasses
# should call it to compile the indexSubHeader and then continue compiling
# the remainder of their unique format.
def compile(self, ttFont):
return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset)
# Creates the XML for bitmap glyphs. Each index sub table basically makes
# the same XML except for specific metric information that is written
# out via a method call that a subclass implements optionally.
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
('imageFormat', self.imageFormat),
('firstGlyphIndex', self.firstGlyphIndex),
('lastGlyphIndex', self.lastGlyphIndex),
])
writer.newline()
self.writeMetrics(writer, ttFont)
# Write out the names as thats all thats needed to rebuild etc.
# For font debugging of consecutive formats the ids are also written.
# The ids are not read when moving from the XML format.
glyphIds = map(ttFont.getGlyphID, self.names)
for glyphName, glyphId in zip(self.names, glyphIds):
writer.simpletag('glyphLoc', name=glyphName, id=glyphId)
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
# Read all the attributes. Even though the glyph indices are
# recalculated, they are still read in case there needs to
# be an immediate export of the data.
self.imageFormat = safeEval(attrs['imageFormat'])
self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex'])
self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex'])
self.readMetrics(name, attrs, content, ttFont)
self.names = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'glyphLoc':
self.names.append(attrs['name'])
# A helper method that writes the metrics for the index sub table. It also
# is responsible for writing the image size for fixed size data since fixed
# size is not recalculated on compile. Default behavior is to do nothing.
def writeMetrics(self, writer, ttFont):
pass
# A helper method that is the inverse of writeMetrics.
def readMetrics(self, name, attrs, content, ttFont):
pass
# This method is for fixed glyph data sizes. There are formats where
# the glyph data is fixed but are actually composite glyphs. To handle
# this the font spec in indexSubTable makes the data the size of the
# fixed size by padding the component arrays. This function abstracts
# out this padding process. Input is data unpadded. Output is data
# padded only in fixed formats. Default behavior is to return the data.
def padBitmapData(self, data):
return data
# Remove any of the glyph locations and names that are flagged as skipped.
# This only occurs in formats {1,3}.
def removeSkipGlyphs(self):
# Determines if a name, location pair is a valid data location.
# Skip glyphs are marked when the size is equal to zero.
def isValidLocation(args):
(name, (startByte, endByte)) = args
return startByte < endByte
# Remove all skip glyphs.
dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
self.names, self.locations = list(map(list, zip(*dataPairs)))
# A closure for creating a custom mixin. This is done because formats 1 and 3
# are very similar. The only difference between them is the size per offset
# value. Code put in here should handle both cases generally.
def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
# Prep the data size for the offset array data format.
dataFormat = '>'+formatStringForDataType
offsetDataSize = struct.calcsize(dataFormat)
class OffsetArrayIndexSubTableMixin(object):
def decompile(self):
numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations]
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
self.removeSkipGlyphs()
def compile(self, ttFont):
# First make sure that all the data lines up properly. Formats 1 and 3
# must have all its data lined up consecutively. If not this will fail.
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats"
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Make sure that all ids are sorted strictly increasing.
assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1))
# Run a simple algorithm to add skip glyphs to the data locations at
# the places where an id is not present.
idQueue = deque(glyphIds)
locQueue = deque(self.locations)
allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
allLocations = []
for curId in allGlyphIds:
if curId != idQueue[0]:
allLocations.append((locQueue[0][0], locQueue[0][0]))
else:
idQueue.popleft()
allLocations.append(locQueue.popleft())
# Now that all the locations are collected, pack them appropriately into
# offsets. This is the form where offset[i] is the location and
# offset[i+1]-offset[i] is the size of the data location.
offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
# Image data offset must be less than or equal to the minimum of locations.
# This offset may change the value for round tripping but is safer and
# allows imageDataOffset to not be required to be in the XML version.
self.imageDataOffset = min(offsets)
offsetArray = [offset - self.imageDataOffset for offset in offsets]
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray]
# Take care of any padding issues. Only occurs in format 3.
if offsetDataSize * len(dataList) % 4 != 0:
dataList.append(struct.pack(dataFormat, 0))
return bytesjoin(dataList)
return OffsetArrayIndexSubTableMixin
# A Mixin for functionality shared between the different kinds
# of fixed sized data handling. Both kinds have big metrics so
# that kind of special processing is also handled in this mixin.
class FixedSizeIndexSubTableMixin(object):
def writeMetrics(self, writer, ttFont):
writer.simpletag('imageSize', value=self.imageSize)
writer.newline()
self.metrics.toXML(writer, ttFont)
def readMetrics(self, name, attrs, content, ttFont):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'imageSize':
self.imageSize = safeEval(attrs['value'])
elif name == BigGlyphMetrics.__name__:
self.metrics = BigGlyphMetrics()
self.metrics.fromXML(name, attrs, content, ttFont)
elif name == SmallGlyphMetrics.__name__:
print("Warning: SmallGlyphMetrics being ignored in format %d." % self.indexFormat)
def padBitmapData(self, data):
# Make sure that the data isn't bigger than the fixed size.
assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat
# Pad the data so that it matches the fixed size.
pad = (self.imageSize - len(data)) * b'\0'
return data + pad
class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable):
pass
class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
(self.imageSize,) = struct.unpack(">L", self.data[:4])
self.metrics = BigGlyphMetrics()
sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
def compile(self, ttFont):
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Make sure all the ids are consecutive. This is required by Format 2.
assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive."
self.imageDataOffset = min(zip(*self.locations)[0])
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
return bytesjoin(dataList)
class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable):
pass
class eblc_index_sub_table_4(EblcIndexSubTable):
def decompile(self):
(numGlyphs,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations]
glyphIds, offsets = list(map(list, zip(*glyphArray)))
# There are one too many glyph ids. Get rid of the last one.
glyphIds.pop()
offsets = [offset + self.imageDataOffset for offset in offsets]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
def compile(self, ttFont):
# First make sure that all the data lines up properly. Format 4
# must have all its data lined up consecutively. If not this will fail.
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4"
offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
# Image data offset must be less than or equal to the minimum of locations.
# Resetting this offset may change the value for round tripping but is safer
# and allows imageDataOffset to not be required to be in the XML version.
self.imageDataOffset = min(offsets)
offsets = [offset - self.imageDataOffset for offset in offsets]
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Create an iterator over the ids plus a padding value.
idsPlusPad = list(itertools.chain(glyphIds, [0]))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", len(glyphIds)))
tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)]
dataList += tmp
data = bytesjoin(dataList)
return data
class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
self.origDataLen = 0
(self.imageSize,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics())
(numGlyphs,) = struct.unpack(">L", data[:4])
data = data[4:]
glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)]
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
def compile(self, ttFont):
self.imageDataOffset = min(zip(*self.locations)[0])
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
glyphIds = list(map(ttFont.getGlyphID, self.names))
dataList.append(struct.pack(">L", len(glyphIds)))
dataList += [struct.pack(">H", curId) for curId in glyphIds]
if len(glyphIds) % 2 == 1:
dataList.append(struct.pack(">H", 0))
return bytesjoin(dataList)
# Dictionary of indexFormat to the class representing that format.
eblc_sub_table_classes = {
1: eblc_index_sub_table_1,
2: eblc_index_sub_table_2,
3: eblc_index_sub_table_3,
4: eblc_index_sub_table_4,
5: eblc_index_sub_table_5,
}
| apache-2.0 | 2,088,799,548,908,137,500 | 39.666126 | 139 | 0.740544 | false |
tjanez/ansible | lib/ansible/plugins/terminal/iosxr.py | 19 | 1850 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_prompts_re = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"),
re.compile(r']]>]]>[\r\n]?')
]
terminal_errors_re = [
re.compile(r"% ?Error"),
re.compile(r"% ?Bad secret"),
re.compile(r"invalid input", re.I),
re.compile(r"(?:incomplete|ambiguous) command", re.I),
re.compile(r"connection timed out", re.I),
re.compile(r"[^\r\n]+ not found", re.I),
re.compile(r"'[^']' +returned error code: ?\d+"),
]
supports_multiplexing = False
def on_open_shell(self):
try:
for cmd in ['terminal length 0', 'terminal exec prompt no-timestamp']:
self._connection.exec_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
| gpl-3.0 | -3,848,447,093,796,002,000 | 33.259259 | 82 | 0.637838 | false |
knowsis/django | tests/mail/tests.py | 48 | 36821 | # coding: utf-8
from __future__ import unicode_literals
import asyncore
from email.mime.text import MIMEText
import os
import shutil
import smtpd
import sys
import tempfile
import threading
from django.core import mail
from django.core.mail import (EmailMessage, mail_admins, mail_managers,
EmailMultiAlternatives, send_mail, send_mass_mail)
from django.core.mail.backends import console, dummy, locmem, filebased, smtp
from django.core.mail.message import BadHeaderError
from django.test import SimpleTestCase
from django.test.utils import override_settings
from django.utils.encoding import force_text, force_bytes
from django.utils.six import PY3, StringIO, binary_type
from django.utils.translation import ugettext_lazy
if PY3:
from email.utils import parseaddr
from email import message_from_bytes, message_from_binary_file
else:
from email.Utils import parseaddr
from email import (message_from_string as message_from_bytes,
message_from_file as message_from_binary_file)
class HeadersCheckMixin(object):
def assertMessageHasHeaders(self, message, headers):
"""
Check that :param message: has all :param headers: headers.
:param message: can be an instance of an email.Message subclass or a
string with the contens of an email message.
:param headers: should be a set of (header-name, header-value) tuples.
"""
if isinstance(message, binary_type):
message = message_from_bytes(message)
msg_headers = set(message.items())
self.assertTrue(headers.issubset(msg_headers), msg='Message is missing '
'the following headers: %s' % (headers - msg_headers),)
class MailTests(HeadersCheckMixin, SimpleTestCase):
"""
Non-backend specific tests.
"""
def test_ascii(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], '[email protected]')
self.assertEqual(message['To'], '[email protected]')
def test_multiple_recipients(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], '[email protected]')
self.assertEqual(message['To'], '[email protected], [email protected]')
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]'])
message = email.message()
self.assertEqual(message['Cc'], '[email protected]')
self.assertEqual(email.recipients(), ['[email protected]', '[email protected]'])
# Test multiple CC with multiple To
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'], cc=['[email protected]', '[email protected]'])
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(email.recipients(), ['[email protected]', '[email protected]', '[email protected]', '[email protected]'])
# Testing with Bcc
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]', '[email protected]'], cc=['[email protected]', '[email protected]'], bcc=['[email protected]'])
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(email.recipients(), ['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'])
def test_recipients_as_tuple(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ('[email protected]', '[email protected]'), cc=('[email protected]', '[email protected]'), bcc=('[email protected]',))
message = email.message()
self.assertEqual(message['Cc'], '[email protected], [email protected]')
self.assertEqual(email.recipients(), ['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'])
def test_header_injection(self):
email = EmailMessage('Subject\nInjection Test', 'Content', '[email protected]', ['[email protected]'])
self.assertRaises(BadHeaderError, email.message)
email = EmailMessage(ugettext_lazy('Subject\nInjection Test'), 'Content', '[email protected]', ['[email protected]'])
self.assertRaises(BadHeaderError, email.message)
def test_space_continuation(self):
"""
Test for space continuation character in long (ascii) subject headers (#7747)
"""
email = EmailMessage('Long subject lines that get wrapped should contain a space continuation character to get expected behavior in Outlook and Thunderbird', 'Content', '[email protected]', ['[email protected]'])
message = email.message()
# Note that in Python 3, maximum line length has increased from 76 to 78
self.assertEqual(message['Subject'].encode(), b'Long subject lines that get wrapped should contain a space continuation\n character to get expected behavior in Outlook and Thunderbird')
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage('subject', 'content', '[email protected]', ['[email protected]'], headers=headers)
self.assertEqual(sorted(email.message().items()), [
('Content-Transfer-Encoding', '7bit'),
('Content-Type', 'text/plain; charset="utf-8"'),
('From', '[email protected]'),
('MIME-Version', '1.0'),
('Message-ID', 'foo'),
('Subject', 'subject'),
('To', '[email protected]'),
('date', 'Fri, 09 Nov 2001 01:08:47 -0000'),
])
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
message = email.message()
self.assertEqual(message['From'], '[email protected]')
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage('Subject', 'Content', '[email protected]',
['[email protected]', '[email protected]'],
headers={'To': '[email protected]'})
message = email.message()
self.assertEqual(message['To'], '[email protected]')
self.assertEqual(email.to, ['[email protected]', '[email protected]'])
# If we don't set the To header manually, it should default to the `to` argument to the constructor
email = EmailMessage('Subject', 'Content', '[email protected]',
['[email protected]', '[email protected]'])
message = email.message()
self.assertEqual(message['To'], '[email protected], [email protected]')
self.assertEqual(email.to, ['[email protected]', '[email protected]'])
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
message = email.message()
self.assertEqual(message['From'], '[email protected]')
message = email.message()
self.assertEqual(message['From'], '[email protected]')
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['"Firstname Sürname" <[email protected]>', '[email protected]'])
self.assertEqual(email.message()['To'], '=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>, [email protected]')
email = EmailMessage('Subject', 'Content', '[email protected]', ['"Sürname, Firstname" <[email protected]>', '[email protected]'])
self.assertEqual(email.message()['To'], '=?utf-8?q?S=C3=BCrname=2C_Firstname?= <[email protected]>, [email protected]')
def test_unicode_headers(self):
email = EmailMessage("Gżegżółka", "Content", "[email protected]", ["[email protected]"],
headers={"Sender": '"Firstname Sürname" <[email protected]>',
"Comments": 'My Sürname is non-ASCII'})
message = email.message()
self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=')
self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>')
self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=')
def test_safe_mime_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
SafeMIMEMultipart as well
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', '[email protected]', '"Sürname, Firstname" <[email protected]>'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.encoding = 'iso-8859-1'
self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <[email protected]>')
self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=')
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', '[email protected]', ['[email protected]'])
email.encoding = 'iso-8859-1'
message = email.message()
self.assertTrue(message.as_string().startswith('Content-Type: text/plain; charset="iso-8859-1"\nMIME-Version: 1.0\nContent-Transfer-Encoding: quoted-printable\nSubject: Subject\nFrom: [email protected]\nTo: [email protected]'))
self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.')
# Make sure MIME attachments also works correctly with other encodings than utf-8
text_content = 'Firstname Sürname is a great guy.'
html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>'
msg = EmailMultiAlternatives('Subject', text_content, '[email protected]', ['[email protected]'])
msg.encoding = 'iso-8859-1'
msg.attach_alternative(html_content, "text/html")
payload0 = msg.message().get_payload(0)
self.assertMessageHasHeaders(payload0, set((
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'))))
self.assertTrue(payload0.as_bytes().endswith(b'\n\nFirstname S=FCrname is a great guy.'))
payload1 = msg.message().get_payload(1)
self.assertMessageHasHeaders(payload1, set((
('MIME-Version', '1.0'),
('Content-Type', 'text/html; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'))))
self.assertTrue(payload1.as_bytes().endswith(b'\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>'))
def test_attachments(self):
"""Regression test for #9367"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', '[email protected]', '[email protected]'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), 'multipart/mixed')
self.assertEqual(message.get_default_type(), 'text/plain')
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), 'multipart/alternative')
self.assertEqual(payload[1].get_content_type(), 'application/pdf')
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', '[email protected]', '[email protected]'
content = 'This is the message.'
msg = EmailMessage(subject, content, from_email, [to], headers=headers)
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
payload = message.get_payload()
self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf')
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo='bar')
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection('mail.custombackend.EmailBackend')
self.assertTrue(hasattr(conn, 'test_outbox'))
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.locmem.EmailBackend'), locmem.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.console.EmailBackend'), console.EmailBackend)
tmp_dir = tempfile.mkdtemp()
try:
self.assertIsInstance(mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir), filebased.EmailBackend)
finally:
shutil.rmtree(tmp_dir)
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
ADMINS=[('nobody', '[email protected]')],
MANAGERS=[('nobody', '[email protected]')])
def test_connection_arg(self):
"""Test connection argument to send_mail(), et. al."""
mail.outbox = []
# Send using non-default connection
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, 'Subject')
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mass_mail([
('Subject1', 'Content1', '[email protected]', ['[email protected]']),
('Subject2', 'Content2', '[email protected]', ['[email protected]']),
], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, 'Subject1')
self.assertEqual(connection.test_outbox[1].subject, 'Subject2')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_admins('Admin message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_managers('Manager message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message')
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage('Subject', 'From the future', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
self.assertFalse(b'>From the future' in email.message().as_bytes())
def test_dont_base64_encode(self):
# Ticket #3472
# Shouldn't use Base64 encoding at all
msg = EmailMessage('Subject', 'UTF-8 encoded body', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
self.assertFalse(b'Content-Transfer-Encoding: base64' in msg.message().as_bytes())
# Ticket #11212
# Shouldn't use quoted printable, should detect it can represent content with 7 bit data
msg = EmailMessage('Subject', 'Body with only ASCII characters.', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
s = msg.message().as_bytes()
self.assertFalse(b'Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue(b'Content-Transfer-Encoding: 7bit' in s)
# Shouldn't use quoted printable, should detect it can represent content with 8 bit data
msg = EmailMessage('Subject', 'Body with latin characters: àáä.', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
s = msg.message().as_bytes()
self.assertFalse(b'Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue(b'Content-Transfer-Encoding: 8bit' in s)
msg = EmailMessage('Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
s = msg.message().as_bytes()
self.assertFalse(b'Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue(b'Content-Transfer-Encoding: 8bit' in s)
class BaseEmailBackendTests(object):
email_backend = None
def setUp(self):
self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def assertStartsWith(self, first, second):
if not first.startswith(second):
self.longMessage = True
self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.")
def get_mailbox_content(self):
raise NotImplementedError
def flush_mailbox(self):
raise NotImplementedError
def get_the_message(self):
mailbox = self.get_mailbox_content()
self.assertEqual(len(mailbox), 1,
"Expected exactly one message, got %d.\n%r" % (len(mailbox), [
m.as_string() for m in mailbox]))
return mailbox[0]
def test_send(self):
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "[email protected]")
self.assertEqual(message.get_all("to"), ["[email protected]"])
def test_send_unicode(self):
email = EmailMessage('Chère maman', 'Je t\'aime très fort', '[email protected]', ['[email protected]'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=')
self.assertEqual(force_text(message.get_payload(decode=True)), 'Je t\'aime très fort')
def test_send_many(self):
email1 = EmailMessage('Subject', 'Content1', '[email protected]', ['[email protected]'])
email2 = EmailMessage('Subject', 'Content2', '[email protected]', ['[email protected]'])
num_sent = mail.get_connection().send_messages([email1, email2])
self.assertEqual(num_sent, 2)
messages = self.get_mailbox_content()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].get_payload(), "Content1")
self.assertEqual(messages[1].get_payload(), "Content2")
def test_send_verbose_name(self):
email = EmailMessage("Subject", "Content", '"Firstname Sürname" <[email protected]>',
["[email protected]"])
email.send()
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>")
@override_settings(MANAGERS=[('nobody', '[email protected]')])
def test_html_mail_managers(self):
"""Test html_message argument to mail_managers"""
mail_managers('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(ADMINS=[('nobody', '[email protected]')])
def test_html_mail_admins(self):
"""Test html_message argument to mail_admins """
mail_admins('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['[email protected]'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(
ADMINS=[('nobody', '[email protected]')],
MANAGERS=[('nobody', '[email protected]')])
def test_manager_and_admin_mail_prefix(self):
"""
String prefix + lazy translated subject = bad output
Regression for #13494
"""
mail_managers(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.flush_mailbox()
mail_admins(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
@override_settings(ADMINS=(), MANAGERS=())
def test_empty_admins(self):
"""
Test that mail_admins/mail_managers doesn't connect to the mail server
if there are no recipients (#9383)
"""
mail_admins('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
mail_managers('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
def test_message_cc_header(self):
"""
Regression test for #7722
"""
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], cc=['[email protected]'])
mail.get_connection().send_messages([email])
message = self.get_the_message()
self.assertStartsWith(message.as_string(), 'MIME-Version: 1.0\nContent-Type: text/plain; charset="utf-8"\nContent-Transfer-Encoding: 7bit\nSubject: Subject\nFrom: [email protected]\nTo: [email protected]\nCc: [email protected]\nDate: ')
def test_idn_send(self):
"""
Regression test for #14301
"""
self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com']))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
self.flush_mailbox()
m = EmailMessage('Subject', 'Content', 'from@öäü.com',
['to@öäü.com'], cc=['cc@öäü.com'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
self.assertEqual(message.get('cc'), '[email protected]')
def test_recipient_without_domain(self):
"""
Regression test for #15042
"""
self.assertTrue(send_mail("Subject", "Content", "tester", ["django"]))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), "tester")
self.assertEqual(message.get('to'), "django")
def test_close_connection(self):
"""
Test that connection can be closed (even when not explicitely opened)
"""
conn = mail.get_connection(username='', password='')
try:
conn.close()
except Exception as e:
self.fail("close() unexpectedly raised an exception: %s" % e)
class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.locmem.EmailBackend'
def get_mailbox_content(self):
return [m.message() for m in mail.outbox]
def flush_mailbox(self):
mail.outbox = []
def tearDown(self):
super(LocmemBackendTests, self).tearDown()
mail.outbox = []
def test_locmem_shared_messages(self):
"""
Make sure that the locmen backend populates the outbox.
"""
connection = locmem.EmailBackend()
connection2 = locmem.EmailBackend()
email = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
connection.send_messages([email])
connection2.send_messages([email])
self.assertEqual(len(mail.outbox), 2)
def test_validate_multiline_headers(self):
# Ticket #18861 - Validate emails when using the locmem backend
with self.assertRaises(BadHeaderError):
send_mail('Subject\nMultiline', 'Content', '[email protected]', ['[email protected]'])
class FileBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.filebased.EmailBackend'
def setUp(self):
super(FileBackendTests, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp_dir)
self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir)
self._settings_override.enable()
def tearDown(self):
self._settings_override.disable()
super(FileBackendTests, self).tearDown()
def flush_mailbox(self):
for filename in os.listdir(self.tmp_dir):
os.unlink(os.path.join(self.tmp_dir, filename))
def get_mailbox_content(self):
messages = []
for filename in os.listdir(self.tmp_dir):
with open(os.path.join(self.tmp_dir, filename), 'rb') as fp:
session = fp.read().split(force_bytes('\n' + ('-' * 79) + '\n', encoding='ascii'))
messages.extend(message_from_bytes(m) for m in session if m)
return messages
def test_file_sessions(self):
"""Make sure opening a connection creates a new file"""
msg = EmailMessage('Subject', 'Content', '[email protected]', ['[email protected]'], headers={'From': '[email protected]'})
connection = mail.get_connection()
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 1)
with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0]), 'rb') as fp:
message = message_from_binary_file(fp)
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), '[email protected]')
self.assertEqual(message.get('to'), '[email protected]')
connection2 = mail.get_connection()
connection2.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
msg.connection = mail.get_connection()
self.assertTrue(connection.open())
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
connection.close()
class ConsoleBackendTests(HeadersCheckMixin, BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.console.EmailBackend'
def setUp(self):
super(ConsoleBackendTests, self).setUp()
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super(ConsoleBackendTests, self).tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
def get_mailbox_content(self):
messages = self.stream.getvalue().split(str('\n' + ('-' * 79) + '\n'))
return [message_from_bytes(force_bytes(m)) for m in messages if m]
def test_console_stream_kwarg(self):
"""
Test that the console backend can be pointed at an arbitrary stream.
"""
s = StringIO()
connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s)
send_mail('Subject', 'Content', '[email protected]', ['[email protected]'], connection=connection)
message = force_bytes(s.getvalue().split('\n' + ('-' * 79) + '\n')[0])
self.assertMessageHasHeaders(message, set((
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', '[email protected]'),
('To', '[email protected]'))))
self.assertIn(b'\nDate: ', message)
class FakeSMTPServer(smtpd.SMTPServer, threading.Thread):
"""
Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from:
http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup
"""
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
smtpd.SMTPServer.__init__(self, *args, **kwargs)
self._sink = []
self.active = False
self.active_lock = threading.Lock()
self.sink_lock = threading.Lock()
def process_message(self, peer, mailfrom, rcpttos, data):
if PY3:
data = data.encode('utf-8')
m = message_from_bytes(data)
maddr = parseaddr(m.get('from'))[1]
if mailfrom != maddr:
return "553 '%s' != '%s'" % (mailfrom, maddr)
with self.sink_lock:
self._sink.append(m)
def get_sink(self):
with self.sink_lock:
return self._sink[:]
def flush_sink(self):
with self.sink_lock:
self._sink[:] = []
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
asyncore.close_all()
def stop(self):
if self.active:
self.active = False
self.join()
class SMTPBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.smtp.EmailBackend'
@classmethod
def setUpClass(cls):
cls.server = FakeSMTPServer(('127.0.0.1', 0), None)
cls._settings_override = override_settings(
EMAIL_HOST="127.0.0.1",
EMAIL_PORT=cls.server.socket.getsockname()[1])
cls._settings_override.enable()
cls.server.start()
@classmethod
def tearDownClass(cls):
cls._settings_override.disable()
cls.server.stop()
def setUp(self):
super(SMTPBackendTests, self).setUp()
self.server.flush_sink()
def tearDown(self):
self.server.flush_sink()
super(SMTPBackendTests, self).tearDown()
def flush_mailbox(self):
self.server.flush_sink()
def get_mailbox_content(self):
return self.server.get_sink()
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.username, 'not empty username')
self.assertEqual(backend.password, 'not empty password')
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_override_settings(self):
backend = smtp.EmailBackend(username='username', password='password')
self.assertEqual(backend.username, 'username')
self.assertEqual(backend.password, 'password')
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_disabled_authentication(self):
backend = smtp.EmailBackend(username='', password='')
self.assertEqual(backend.username, '')
self.assertEqual(backend.password, '')
def test_server_stopped(self):
"""
Test that closing the backend while the SMTP server is stopped doesn't
raise an exception.
"""
backend = smtp.EmailBackend(username='', password='')
backend.open()
self.server.stop()
try:
backend.close()
except Exception as e:
self.fail("close() unexpectedly raised an exception: %s" % e)
| bsd-3-clause | 3,697,639,605,785,755,600 | 46.016624 | 237 | 0.6347 | false |
IONISx/edx-platform | openedx/core/lib/extract_tar.py | 135 | 2427 | """
Safe version of tarfile.extractall which does not extract any files that would
be, or symlink to a file that is, outside of the directory extracted in.
Adapted from:
http://stackoverflow.com/questions/10060069/safely-extract-zip-or-tar-using-python
"""
from os.path import abspath, realpath, dirname, join as joinpath
from django.core.exceptions import SuspiciousOperation
from django.conf import settings
import logging
log = logging.getLogger(__name__)
def resolved(rpath):
"""
Returns the canonical absolute path of `rpath`.
"""
return realpath(abspath(rpath))
def _is_bad_path(path, base):
"""
Is (the canonical absolute path of) `path` outside `base`?
"""
return not resolved(joinpath(base, path)).startswith(base)
def _is_bad_link(info, base):
"""
Does the file sym- or hard-link to files outside `base`?
"""
# Links are interpreted relative to the directory containing the link
tip = resolved(joinpath(base, dirname(info.name)))
return _is_bad_path(info.linkname, base=tip)
def safemembers(members, base):
"""
Check that all elements of a tar file are safe.
"""
base = resolved(base)
# check that we're not trying to import outside of the data_dir
if not base.startswith(resolved(settings.DATA_DIR)):
raise SuspiciousOperation("Attempted to import course outside of data dir")
for finfo in members:
if _is_bad_path(finfo.name, base):
log.debug("File %r is blocked (illegal path)", finfo.name)
raise SuspiciousOperation("Illegal path")
elif finfo.issym() and _is_bad_link(finfo, base):
log.debug("File %r is blocked: Hard link to %r", finfo.name, finfo.linkname)
raise SuspiciousOperation("Hard link")
elif finfo.islnk() and _is_bad_link(finfo, base):
log.debug("File %r is blocked: Symlink to %r", finfo.name,
finfo.linkname)
raise SuspiciousOperation("Symlink")
elif finfo.isdev():
log.debug("File %r is blocked: FIFO, device or character file",
finfo.name)
raise SuspiciousOperation("Dev file")
return members
def safetar_extractall(tar_file, path=".", members=None): # pylint: disable=unused-argument
"""
Safe version of `tar_file.extractall()`.
"""
return tar_file.extractall(path, safemembers(tar_file, path))
| agpl-3.0 | 5,292,224,991,190,633,000 | 32.246575 | 92 | 0.660486 | false |
EliteTK/PyBot | Modules/requests/packages/urllib3/util/retry.py | 699 | 9924 | import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.MAX_BACKOFF`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
| gpl-3.0 | -7,587,537,545,977,619,000 | 33.821053 | 104 | 0.6184 | false |
samthor/intellij-community | python/lib/Lib/_strptime.py | 90 | 19538 | """Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
"""
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE
from re import escape as re_escape
from datetime import date as datetime_date
try:
from thread import allocate_lock as _thread_allocate_lock
except:
from dummy_thread import allocate_lock as _thread_allocate_lock
__author__ = "Brett Cannon"
__email__ = "[email protected]"
__all__ = ['strptime']
def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError("locale changed during initialization")
def __pad(self, seq, front):
# Add '' to seq to either the front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
# Set self.a_weekday and self.f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
# Set self.f_month and self.a_month using the calendar module.
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
# Set self.am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (01,22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
# Set self.date_time, self.date, & self.time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
# 2005-01-03 occurs before the first Monday of the year. Otherwise
# %U is used.
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
# Set self.timezone by using time.tzname.
# Do not worry about possibility of time.tzname[0] == timetzname[1]
# and time.daylight; handle that in strptime .
try:
time.tzset()
except AttributeError:
pass
no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
if time.daylight:
has_saving = frozenset([time.tzname[1].lower()])
else:
has_saving = frozenset()
self.timezone = (no_saving, has_saving)
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Create keys/values.
Order of execution is important for dependency reasons.
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
base = super(TimeRE, self)
base.__init__({
# The " \d" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
#XXX: Does 'Y' need to worry about having less or more than
# 4 digits?
'Y': r"(?P<Y>\d\d\d\d)",
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
'%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occuring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
"""
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
if value != '':
break
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax. Cannot use re.escape since we have to deal with
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile('\s+')
format = whitespace_replacement.sub('\s+', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE)
_cache_lock = _thread_allocate_lock()
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
# first!
_TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
first_weekday = datetime_date(year, 1, 1).weekday()
# If we are dealing with the %U directive (week starts on Sunday), it's
# easier to just shift the view to Sunday being the first day of the
# week.
if not week_starts_Mon:
first_weekday = (first_weekday + 1) % 7
day_of_week = (day_of_week + 1) % 7
# Need to watch out for a week 0 (when the first day of the year is not
# the same as that specified by %U or %W).
week_0_length = (7 - first_weekday) % 7
if week_of_year == 0:
return 1 + day_of_week - first_weekday
else:
days_to_week = week_0_length + (7 * (week_of_year - 1))
return 1 + days_to_week + day_of_week
def strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input string and the format string."""
global _TimeRE_cache, _regex_cache
_cache_lock.acquire()
try:
if _getlang() != _TimeRE_cache.locale_time.lang:
_TimeRE_cache = TimeRE()
_regex_cache.clear()
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
# KeyError raised when a bad format is found; can be specified as
# \\, in which case it was a stray % but with a space after it
except KeyError, err:
bad_directive = err.args[0]
if bad_directive == "\\":
bad_directive = "%"
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format))
# IndexError only occurs when the format string is "%"
except IndexError:
raise ValueError("stray %% in format '%s'" % format)
_regex_cache[format] = format_regex
finally:
_cache_lock.release()
found = format_regex.match(data_string)
if not found:
raise ValueError("time data did not match format: data=%s fmt=%s" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
year = 1900
month = day = 1
hour = minute = second = 0
tz = -1
# Default to -1 to signify that values not known; not critical to have,
# though
week_of_year = -1
week_of_year_start = -1
# weekday and julian defaulted to -1 so as to signal need to calculate
# values
weekday = julian = -1
found_dict = found.groupdict()
for group_key in found_dict.iterkeys():
# Directives not explicitly handled below:
# c, x, X
# handled by making out of other directives
# U, W
# worthless without day of the week
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0]):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
# U starts week on Sunday.
week_of_year_start = 6
else:
# W starts week on Monday.
week_of_year_start = 0
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
# Deal with bad locale setup where timezone names are the
# same and yet time.daylight is true; too ambiguous to
# be able to tell what timezone has daylight savings
if (time.tzname[0] == time.tzname[1] and
time.daylight and found_zone not in ("utc", "gmt")):
break
else:
tz = value
break
# If we know the week of the year and what day of that week, we can figure
# out the Julian day of the year.
if julian == -1 and week_of_year != -1 and weekday != -1:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
# Cannot pre-calculate datetime_date() since can change in Julian
# calculation and thus could have different value for the day of the week
# calculation.
if julian == -1:
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
# be accurate.
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
return time.struct_time((year, month, day,
hour, minute, second,
weekday, julian, tz))
| apache-2.0 | 1,566,149,884,057,280,000 | 42.321508 | 105 | 0.546832 | false |
vitorio/bite-project | deps/gdata-python-client/samples/apps/marketplace_sample/gdata/tlslite/api.py | 359 | 2965 | """Import this module for easy access to TLS Lite objects.
The TLS Lite API consists of classes, functions, and variables spread
throughout this package. Instead of importing them individually with::
from tlslite.TLSConnection import TLSConnection
from tlslite.HandshakeSettings import HandshakeSettings
from tlslite.errors import *
.
.
It's easier to do::
from tlslite.api import *
This imports all the important objects (TLSConnection, Checker,
HandshakeSettings, etc.) into the global namespace. In particular, it
imports::
from constants import AlertLevel, AlertDescription, Fault
from errors import *
from Checker import Checker
from HandshakeSettings import HandshakeSettings
from Session import Session
from SessionCache import SessionCache
from SharedKeyDB import SharedKeyDB
from TLSConnection import TLSConnection
from VerifierDB import VerifierDB
from X509 import X509
from X509CertChain import X509CertChain
from integration.HTTPTLSConnection import HTTPTLSConnection
from integration.POP3_TLS import POP3_TLS
from integration.IMAP4_TLS import IMAP4_TLS
from integration.SMTP_TLS import SMTP_TLS
from integration.XMLRPCTransport import XMLRPCTransport
from integration.TLSSocketServerMixIn import TLSSocketServerMixIn
from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn
from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper
from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded,
gmpyLoaded, pycryptoLoaded, prngName
from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey,
parseAsPublicKey, parsePrivateKey
"""
from constants import AlertLevel, AlertDescription, Fault
from errors import *
from Checker import Checker
from HandshakeSettings import HandshakeSettings
from Session import Session
from SessionCache import SessionCache
from SharedKeyDB import SharedKeyDB
from TLSConnection import TLSConnection
from VerifierDB import VerifierDB
from X509 import X509
from X509CertChain import X509CertChain
from integration.HTTPTLSConnection import HTTPTLSConnection
from integration.TLSSocketServerMixIn import TLSSocketServerMixIn
from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn
from integration.POP3_TLS import POP3_TLS
from integration.IMAP4_TLS import IMAP4_TLS
from integration.SMTP_TLS import SMTP_TLS
from integration.XMLRPCTransport import XMLRPCTransport
try:
import twisted
del(twisted)
from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper
except ImportError:
pass
from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded, gmpyLoaded, \
pycryptoLoaded, prngName
from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey, \
parseAsPublicKey, parsePrivateKey
| apache-2.0 | 6,781,431,970,549,082,000 | 38.533333 | 79 | 0.790556 | false |
kitefishlabs/CorpusDB2 | corpusdb2/nodegraph.py | 1 | 6336 | # nodegraph.py - nodegraphs
# CorpusDB2 - Corpus-based processing for audio.
"""
Graph of Nodes.
Nodes encapsulate audio processsing.
1:M relationship to source file (optional).
1:1 relationship to (potential) DataCollections.
"""
__version__ = '1.0'
__author__ = 'Thomas Stoll'
__copyright__ = "Copyright (C) 2014 Thomas Stoll, Kitefish Labs, All Rights Reserved"
__license__ = "gpl 2.0 or higher"
__email__ = '[email protected]'
import os, json
import numpy as np
from bregman.features import LinearFrequencySpectrum, LogFrequencySpectrum, MelFrequencySpectrum, MelFrequencyCepstrum, Chromagram, dBPower
from scikits.audiolab import wavread
# DEFAULT_IMAGESC_KWARGS={'origin':'upper', 'cmap':P.cm.hot, 'aspect':'auto', 'interpolation':'nearest'}
"""
These are the default params/metadata for the feature extractors:
{
'rootpath' : '~/comp/corpusdb2/fulltest/',
'filename' : 'cage.wav',
'feature' : LinearFrequencySpectrum,
'sr': 44100, # The audio sample rate
'nbpo': 12, # Number of Bands Per Octave for front-end filterbank
'ncoef' : 10, # Number of cepstral coefficients to use for cepstral features
'lcoef' : 1, # Starting cepstral coefficient
'lo': 62.5, # Lowest band edge frequency of filterbank
'hi': 16000, # Highest band edge frequency of filterbank
'nfft': 16384, # FFT length for filterbank
'wfft': 8192, # FFT signal window length
'nhop': 4410, # FFT hop size
'window' : 'hamm', # FFT window type
'log10': False, # Whether to use log output
'magnitude': True, # Whether to use magnitude (False=power)
'power_ext': ".power", # File extension for power files
'intensify' : False, # Whether to use critical band masking in chroma extraction
'verbosity' : 1, # How much to tell the user about extraction
'available_features' : [
LinearFrequencySpectrum, # all 6 available Bregman features
LogFrequencySpectrum,
MelFrequencySpectrum,
MelFrequencyCepstrum,
Chromagram,
dBPower]
}
"""
class BregmanNodeGraph(object):
"""
Based on Features class of Bregman Audio Toolkit.
"""
rawaudio = None
sr = 0
fmt = ''
X = None
# _feature = None
def __init__(self, arg=None, metadata=None):
self._initialize(metadata)
def _initialize(self, metadata):
"""
Initialize important parameters
"""
self.metadata = self.default_metadata()
self._check_metadata(metadata)
@staticmethod
def default_metadata():
""" metadata == analysis params + available features """
metadata = {
'rootpath' : '~/comp/corpusdb2/fulltest/',
'filename' : 'cage.wav',
'feature' : LinearFrequencySpectrum,
'sr': 44100,
'fmt' : 'pcm16',
'nbpo': 12,
'ncoef' : 10,
'lcoef' : 1,
'lo': 62.5,
'hi': 16000,
'nfft': 16384,
'wfft': 8192,
'nhop': 4410,
'window' : 'hamm',
'intensify' : False,
'verbosity' : 0,
'available_features' : {
'LinearFrequencySpectrum' : '.linfreqspeq',
'LogFrequencySpectrum' : '.logfreqspeq',
'MelFrequencySpectrum' : '.melfreqspeq',
'MelFrequencyCepstrum' : '.mfcc',
'Chroma' : '.chroma',
'dBPower' : '.dbp'
}
}
return metadata
def _check_metadata(self, metadata=None):
self.metadata = metadata if metadata is not None else self.metadata
md = self.default_metadata()
for k in md.keys():
self.metadata[k] = self.metadata.get(k, md[k])
self.__setattr__(k, self.metadata[k])
return self.metadata
def __repr__(self):
return "%s | %s | %s" % (self.rootpath, self.filename, self.feature)
def _read_wav_file(self):
"""
Simply read raw audio data into class var.
"""
fullsndpath = os.path.join(os.path.expanduser(self.rootpath), 'snd', self.filename)
try:
self.rawaudio, self.sr, self.fmt = wavread(fullsndpath)
except IOError:
return "IOError! WAV read failed!"
return self.rawaudio
def get_full_ngpath(self, mflag=False, alt=None):
# basename, just in case?
dir = 'ng'
if alt is not None:
dir = str(alt)
filename = os.path.basename(self.filename)
extstring = self.available_features[self.feature.__class__.__name__] # well aren't we clever
# print 'dir: ', dir
if mflag:
extstring += ".json"
return os.path.join(
os.path.expanduser(self.rootpath),
dir,
(str(filename)+extstring))
def process_wav_file(self, filename=None, ftr=None):
# filename != None means the file name was passed in as an arg
if filename is not None:
self.metadata.filename = os.path.basename(filename)
self._read_wav_file()
# ftr != None means the feature name was passed in as an arg
if ftr is None:
ftr = self.feature
if self.rawaudio is not None:
# ftr is a class
self.feature = ftr(self.rawaudio)
self.X = self.feature.X
self.dims = np.shape(self.X)
extstring = self.available_features[self.feature.__class__.__name__] # well aren't we clever
md_filepath = self.get_full_ngpath(mflag=True)
clean_md = self.metadata
clean_md['feature'] = clean_md['feature'].__name__
j = json.dumps(self.metadata, indent=4)
f = open(md_filepath, 'w')
print >> f, j
f.close()
return self.X, self.dims
else:
return None
"""
if type(arg)==P.ndarray:
self.set_audio(arg, sr=self.sr)
self.extract()
elif type(arg)==str:
if arg:
self.load_audio(arg) # open file as MONO signal
self.extract()
""" | gpl-3.0 | -8,220,952,945,913,133,000 | 34.601124 | 139 | 0.559343 | false |
hynekcer/django | tests/admin_inlines/models.py | 276 | 6824 | """
Testing of admin inline formsets.
"""
from __future__ import unicode_literals
import random
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Parent(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Teacher(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Child(models.Model):
name = models.CharField(max_length=50)
teacher = models.ForeignKey(Teacher, models.CASCADE)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey()
def __str__(self):
return 'I am %s, a child of %s' % (self.name, self.parent)
class Book(models.Model):
name = models.CharField(max_length=50)
class Author(models.Model):
name = models.CharField(max_length=50)
books = models.ManyToManyField(Book)
class NonAutoPKBook(models.Model):
rand_pk = models.IntegerField(primary_key=True, editable=False)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
def save(self, *args, **kwargs):
while not self.rand_pk:
test_pk = random.randint(1, 99999)
if not NonAutoPKBook.objects.filter(rand_pk=test_pk).exists():
self.rand_pk = test_pk
super(NonAutoPKBook, self).save(*args, **kwargs)
class EditablePKBook(models.Model):
manual_pk = models.IntegerField(primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
class Holder(models.Model):
dummy = models.IntegerField()
class Inner(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder, models.CASCADE)
readonly = models.CharField("Inner readonly label", max_length=1)
def get_absolute_url(self):
return '/inner/'
class Holder2(models.Model):
dummy = models.IntegerField()
class Inner2(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder2, models.CASCADE)
class Holder3(models.Model):
dummy = models.IntegerField()
class Inner3(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder3, models.CASCADE)
# Models for ticket #8190
class Holder4(models.Model):
dummy = models.IntegerField()
class Inner4Stacked(models.Model):
dummy = models.IntegerField(help_text="Awesome stacked help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Inner4Tabular(models.Model):
dummy = models.IntegerField(help_text="Awesome tabular help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
# Models for #12749
class Person(models.Model):
firstname = models.CharField(max_length=15)
class OutfitItem(models.Model):
name = models.CharField(max_length=15)
class Fashionista(models.Model):
person = models.OneToOneField(Person, models.CASCADE, primary_key=True)
weaknesses = models.ManyToManyField(OutfitItem, through='ShoppingWeakness', blank=True)
class ShoppingWeakness(models.Model):
fashionista = models.ForeignKey(Fashionista, models.CASCADE)
item = models.ForeignKey(OutfitItem, models.CASCADE)
# Models for #13510
class TitleCollection(models.Model):
pass
class Title(models.Model):
collection = models.ForeignKey(TitleCollection, models.SET_NULL, blank=True, null=True)
title1 = models.CharField(max_length=100)
title2 = models.CharField(max_length=100)
# Models for #15424
class Poll(models.Model):
name = models.CharField(max_length=40)
class Question(models.Model):
poll = models.ForeignKey(Poll, models.CASCADE)
class Novel(models.Model):
name = models.CharField(max_length=40)
class Chapter(models.Model):
name = models.CharField(max_length=40)
novel = models.ForeignKey(Novel, models.CASCADE)
class FootNote(models.Model):
"""
Model added for ticket 19838
"""
chapter = models.ForeignKey(Chapter, models.PROTECT)
note = models.CharField(max_length=40)
# Models for #16838
class CapoFamiglia(models.Model):
name = models.CharField(max_length=100)
class Consigliere(models.Model):
name = models.CharField(max_length=100, help_text='Help text for Consigliere')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class SottoCapo(models.Model):
name = models.CharField(max_length=100)
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name='+')
class ReadOnlyInline(models.Model):
name = models.CharField(max_length=100, help_text='Help text for ReadOnlyInline')
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE)
# Models for #18433
class ParentModelWithCustomPk(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
class ChildModel1(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model1/'
class ChildModel2(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return '/child_model2/'
# Models for #19425
class BinaryTree(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True)
# Models for #19524
class LifeForm(models.Model):
pass
class ExtraTerrestrial(LifeForm):
name = models.CharField(max_length=100)
class Sighting(models.Model):
et = models.ForeignKey(ExtraTerrestrial, models.CASCADE)
place = models.CharField(max_length=100)
# Models for #18263
class SomeParentModel(models.Model):
name = models.CharField(max_length=1)
class SomeChildModel(models.Model):
name = models.CharField(max_length=1)
position = models.PositiveIntegerField()
parent = models.ForeignKey(SomeParentModel, models.CASCADE)
# Other models
class ProfileCollection(models.Model):
pass
class Profile(models.Model):
collection = models.ForeignKey(ProfileCollection, models.SET_NULL, blank=True, null=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
| bsd-3-clause | -4,537,177,001,815,998,000 | 24.750943 | 93 | 0.722743 | false |
giacomov/3ML | threeML/utils/spectrum/pha_spectrum.py | 1 | 34989 | from __future__ import division
from builtins import range
from past.utils import old_div
import collections
import astropy.io.fits as fits
import numpy as np
import os
import warnings
import six
from threeML.io.progress_bar import progress_bar
from threeML.utils.OGIP.response import OGIPResponse, InstrumentResponse
from threeML.utils.OGIP.pha import PHAII
from threeML.utils.spectrum.binned_spectrum import BinnedSpectrumWithDispersion, Quality
from threeML.utils.spectrum.binned_spectrum_set import BinnedSpectrumSet
from threeML.utils.time_interval import TimeIntervalSet
_required_keywords = {}
_required_keywords["observed"] = (
"mission:TELESCOP,instrument:INSTRUME,filter:FILTER,"
+ "exposure:EXPOSURE,backfile:BACKFILE,"
+ "respfile:RESPFILE,"
+ "ancrfile:ANCRFILE,hduclass:HDUCLASS,"
+ "hduclas1:HDUCLAS1,poisserr:POISSERR,"
+ "chantype:CHANTYPE,detchans:DETCHANS,"
"backscal:BACKSCAL"
).split(",")
# python types, not fits
_required_keyword_types = {"POISSERR": bool}
# hduvers:HDUVERS
_required_keywords["background"] = (
"mission:TELESCOP,instrument:INSTRUME,filter:FILTER,"
+ "exposure:EXPOSURE,"
+ "hduclass:HDUCLASS,"
+ "hduclas1:HDUCLAS1,poisserr:POISSERR,"
+ "chantype:CHANTYPE,detchans:DETCHANS,"
"backscal:BACKSCAL"
).split(",")
# hduvers:HDUVERS
_might_be_columns = {}
_might_be_columns["observed"] = (
"EXPOSURE,BACKFILE," + "CORRFILE,CORRSCAL," + "RESPFILE,ANCRFILE," "BACKSCAL"
).split(",")
_might_be_columns["background"] = ("EXPOSURE,BACKSCAL").split(",")
def _read_pha_or_pha2_file(
pha_file_or_instance,
spectrum_number=None,
file_type="observed",
rsp_file=None,
arf_file=None,
treat_as_time_series=False,
):
"""
A function to extract information from pha and pha2 files. It is kept separate because the same method is
used for reading time series (MUCH faster than building a lot of individual spectra) and single spectra.
:param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
:param spectrum_number: (optional) the spectrum number of the TypeII file to be used
:param file_type: observed or background
:param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
:param arf_file: (optional) and ARF filename
:param treat_as_time_series:
:return:
"""
assert isinstance(pha_file_or_instance, six.string_types) or isinstance(
pha_file_or_instance, PHAII
), "Must provide a FITS file name or PHAII instance"
if isinstance(pha_file_or_instance, six.string_types):
ext = os.path.splitext(pha_file_or_instance)[-1]
if "{" in ext:
spectrum_number = int(ext.split("{")[-1].replace("}", ""))
pha_file_or_instance = pha_file_or_instance.split("{")[0]
# Read the data
filename = pha_file_or_instance
# create a FITS_FILE instance
pha_file_or_instance = PHAII.from_fits_file(pha_file_or_instance)
# If this is already a FITS_FILE instance,
elif isinstance(pha_file_or_instance, PHAII):
# we simply create a dummy filename
filename = "pha_instance"
else:
raise RuntimeError("This is a bug")
file_name = filename
assert file_type.lower() in [
"observed",
"background",
], "Unrecognized filetype keyword value"
file_type = file_type.lower()
try:
HDUidx = pha_file_or_instance.index_of("SPECTRUM")
except:
raise RuntimeError(
"The input file %s is not in PHA format" % (pha_file_or_instance)
)
# spectrum_number = spectrum_number
spectrum = pha_file_or_instance[HDUidx]
data = spectrum.data
header = spectrum.header
# We don't support yet the rescaling
if "CORRFILE" in header:
if (header.get("CORRFILE").upper().strip() != "NONE") and (
header.get("CORRFILE").upper().strip() != ""
):
raise RuntimeError("CORRFILE is not yet supported")
# See if there is there is a QUALITY==0 in the header
if "QUALITY" in header:
has_quality_column = False
if header["QUALITY"] == 0:
is_all_data_good = True
else:
is_all_data_good = False
else:
if "QUALITY" in data.columns.names:
has_quality_column = True
is_all_data_good = False
else:
has_quality_column = False
is_all_data_good = True
warnings.warn(
"Could not find QUALITY in columns or header of PHA file. This is not a valid OGIP file. Assuming QUALITY =0 (good)"
)
# looking for tstart and tstop
tstart = None
tstop = None
has_tstart = False
has_tstop = False
has_telapse = False
if "TSTART" in header:
has_tstart_column = False
has_tstart = True
else:
if "TSTART" in data.columns.names:
has_tstart_column = True
has_tstart = True
if "TELAPSE" in header:
has_telapse_column = False
has_telapse = True
else:
if "TELAPSE" in data.columns.names:
has_telapse_column = True
has_telapse = True
if "TSTOP" in header:
has_tstop_column = False
has_tstop = True
else:
if "TSTOP" in data.columns.names:
has_tstop_column = True
has_tstop = True
if has_tstop and has_telapse:
warnings.warn("Found TSTOP and TELAPSE. This file is invalid. Using TSTOP.")
has_telapse = False
# Determine if this file contains COUNTS or RATES
if "COUNTS" in data.columns.names:
has_rates = False
data_column_name = "COUNTS"
elif "RATE" in data.columns.names:
has_rates = True
data_column_name = "RATE"
else:
raise RuntimeError(
"This file does not contain a RATE nor a COUNTS column. "
"This is not a valid PHA file"
)
# Determine if this is a PHA I or PHA II
if len(data.field(data_column_name).shape) == 2:
typeII = True
if spectrum_number == None and not treat_as_time_series:
raise RuntimeError(
"This is a PHA Type II file. You have to provide a spectrum number"
)
else:
typeII = False
# Collect information from mandatory keywords
keys = _required_keywords[file_type]
gathered_keywords = {}
for k in keys:
internal_name, keyname = k.split(":")
key_has_been_collected = False
if keyname in header:
if (
keyname in _required_keyword_types
and type(header.get(keyname)) is not _required_keyword_types[keyname]
):
warnings.warn(
"unexpected type of %(keyname)s, expected %(expected_type)s\n found %(found_type)s: %(found_value)s"
% dict(
keyname=keyname,
expected_type=_required_keyword_types[keyname],
found_type=type(header.get(keyname)),
found_value=header.get(keyname),
)
)
else:
gathered_keywords[internal_name] = header.get(keyname)
# Fix "NONE" in None
if (
gathered_keywords[internal_name] == "NONE"
or gathered_keywords[internal_name] == "none"
):
gathered_keywords[internal_name] = None
key_has_been_collected = True
# Note that we check again because the content of the column can override the content of the header
if keyname in _might_be_columns[file_type] and typeII:
# Check if there is a column with this name
if keyname in data.columns.names:
# This will set the exposure, among other things
if not treat_as_time_series:
# if we just want a single spectrum
gathered_keywords[internal_name] = data[keyname][
spectrum_number - 1
]
else:
# else get all the columns
gathered_keywords[internal_name] = data[keyname]
# Fix "NONE" in None
if (
gathered_keywords[internal_name] == "NONE"
or gathered_keywords[internal_name] == "none"
):
gathered_keywords[internal_name] = None
key_has_been_collected = True
if not key_has_been_collected:
# The keyword POISSERR is a special case, because even if it is missing,
# it is assumed to be False if there is a STAT_ERR column in the file
if keyname == "POISSERR" and "STAT_ERR" in data.columns.names:
warnings.warn(
"POISSERR is not set. Assuming non-poisson errors as given in the "
"STAT_ERR column"
)
gathered_keywords["poisserr"] = False
elif keyname == "ANCRFILE":
# Some non-compliant files have no ARF because they don't need one. Don't fail, but issue a
# warning
warnings.warn(
"ANCRFILE is not set. This is not a compliant OGIP file. Assuming no ARF."
)
gathered_keywords["ancrfile"] = None
elif keyname == "FILTER":
# Some non-compliant files have no FILTER because they don't need one. Don't fail, but issue a
# warning
warnings.warn(
"FILTER is not set. This is not a compliant OGIP file. Assuming no FILTER."
)
gathered_keywords["filter"] = None
else:
raise RuntimeError(
"Keyword %s not found. File %s is not a proper PHA "
"file" % (keyname, filename)
)
is_poisson = gathered_keywords["poisserr"]
exposure = gathered_keywords["exposure"]
# now we need to get the response file so that we can extract the EBOUNDS
if file_type == "observed":
if rsp_file is None:
# this means it should be specified in the header
rsp_file = gathered_keywords["respfile"]
if arf_file is None:
arf_file = gathered_keywords["ancrfile"]
# Read in the response
if isinstance(rsp_file, six.string_types) or isinstance(rsp_file, str):
rsp = OGIPResponse(rsp_file, arf_file=arf_file)
else:
# assume a fully formed OGIPResponse
rsp = rsp_file
if file_type == "background":
# we need the rsp ebounds from response to build the histogram
assert isinstance(
rsp_file, InstrumentResponse
), "You must supply and OGIPResponse to extract the energy bounds"
rsp = rsp_file
# Now get the data (counts or rates) and their errors. If counts, transform them in rates
if typeII:
# PHA II file
if has_rates:
if not treat_as_time_series:
rates = data.field(data_column_name)[spectrum_number - 1, :]
rate_errors = None
if not is_poisson:
rate_errors = data.field("STAT_ERR")[spectrum_number - 1, :]
else:
rates = data.field(data_column_name)
rate_errors = None
if not is_poisson:
rate_errors = data.field("STAT_ERR")
else:
if not treat_as_time_series:
rates = old_div(
data.field(data_column_name)[spectrum_number - 1, :], exposure
)
rate_errors = None
if not is_poisson:
rate_errors = old_div(
data.field("STAT_ERR")[spectrum_number - 1, :], exposure
)
else:
rates = old_div(data.field(data_column_name), np.atleast_2d(exposure).T)
rate_errors = None
if not is_poisson:
rate_errors = old_div(
data.field("STAT_ERR"), np.atleast_2d(exposure).T
)
if "SYS_ERR" in data.columns.names:
if not treat_as_time_series:
sys_errors = data.field("SYS_ERR")[spectrum_number - 1, :]
else:
sys_errors = data.field("SYS_ERR")
else:
sys_errors = np.zeros(rates.shape)
if has_quality_column:
if not treat_as_time_series:
try:
quality = data.field("QUALITY")[spectrum_number - 1, :]
except (IndexError):
# GBM CSPEC files do not follow OGIP conventions and instead
# list simply QUALITY=0 for each spectrum
# so we have to read them differently
quality_element = data.field("QUALITY")[spectrum_number - 1]
warnings.warn(
"The QUALITY column has the wrong shape. This PHAII file does not follow OGIP standards"
)
if quality_element == 0:
quality = np.zeros_like(rates, dtype=int)
else:
quality = np.zeros_like(rates, dtype=int) + 5
else:
# we need to be careful again because the QUALITY column is not always the correct shape
quality_element = data.field("QUALITY")
if quality_element.shape == rates.shape:
# This is the proper way for the quality to be stored
quality = quality_element
else:
quality = np.zeros_like(rates, dtype=int)
for i, q in enumerate(quality_element):
if q != 0:
quality[i, :] = 5
else:
if is_all_data_good:
quality = np.zeros_like(rates, dtype=int)
else:
quality = np.zeros_like(rates, dtype=int) + 5
if has_tstart:
if has_tstart_column:
if not treat_as_time_series:
tstart = data.field("TSTART")[spectrum_number - 1]
else:
tstart = data.field("TSTART")
if has_tstop:
if has_tstop_column:
if not treat_as_time_series:
tstop = data.field("TSTOP")[spectrum_number - 1]
else:
tstop = data.field("TSTOP")
if has_telapse:
if has_telapse_column:
if not treat_as_time_series:
tstop = tstart + data.field("TELAPSE")[spectrum_number - 1]
else:
tstop = tstart + data.field("TELAPSE")
elif typeII == False:
assert (
not treat_as_time_series
), "This is not a PHAII file but you specified to treat it as a time series"
# PHA 1 file
if has_rates:
rates = data.field(data_column_name)
rate_errors = None
if not is_poisson:
rate_errors = data.field("STAT_ERR")
else:
rates = old_div(data.field(data_column_name), exposure)
rate_errors = None
if not is_poisson:
rate_errors = old_div(data.field("STAT_ERR"), exposure)
if "SYS_ERR" in data.columns.names:
sys_errors = data.field("SYS_ERR")
else:
sys_errors = np.zeros(rates.shape)
if has_quality_column:
quality = data.field("QUALITY")
else:
if is_all_data_good:
quality = np.zeros_like(rates, dtype=int)
else:
quality = np.zeros_like(rates, dtype=int) + 5
# read start and stop times if needed
if has_tstart:
if has_tstart_column:
tstart = data.field("TSTART")
else:
tstart = header["TSTART"]
if has_tstop:
if has_tstop_column:
tstop = data.field("TSTOP")
else:
tstop = header["TSTOP"]
if has_telapse:
if has_telapse_column:
tstop = tstart + data.field("TELAPSE")
else:
tstop = tstart + header["TELAPSE"]
# Now that we have read it, some safety checks
assert rates.shape[0] == gathered_keywords["detchans"], (
"The data column (RATES or COUNTS) has a different number of entries than the "
"DETCHANS declared in the header"
)
quality = Quality.from_ogip(quality)
if not treat_as_time_series:
counts = rates * exposure
if not is_poisson:
count_errors = rate_errors * exposure
else:
count_errors = None
else:
exposure = np.atleast_2d(exposure).T
counts = rates * exposure
if not is_poisson:
count_errors = rate_errors * exposure
else:
count_errors = None
out = collections.OrderedDict(
counts=counts,
count_errors=count_errors,
rates=rates,
rate_errors=rate_errors,
sys_errors=sys_errors,
exposure=exposure,
is_poisson=is_poisson,
rsp=rsp,
gathered_keywords=gathered_keywords,
quality=quality,
file_name=file_name,
tstart=tstart,
tstop=tstop,
)
return out
class PHASpectrum(BinnedSpectrumWithDispersion):
def __init__(
self,
pha_file_or_instance,
spectrum_number=None,
file_type="observed",
rsp_file=None,
arf_file=None,
):
"""
A spectrum with dispersion build from an OGIP-compliant PHA FITS file. Both Type I & II files can be read. Type II
spectra are selected either by specifying the spectrum_number or via the {spectrum_number} file name convention used
in XSPEC. If the file_type is background, a 3ML InstrumentResponse or subclass must be passed so that the energy
bounds can be obtained.
:param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
:param spectrum_number: (optional) the spectrum number of the TypeII file to be used
:param file_type: observed or background
:param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
:param arf_file: (optional) and ARF filename
"""
# extract the spectrum number if needed
assert isinstance(pha_file_or_instance, six.string_types) or isinstance(
pha_file_or_instance, PHAII
), "Must provide a FITS file name or PHAII instance"
pha_information = _read_pha_or_pha2_file(
pha_file_or_instance,
spectrum_number,
file_type,
rsp_file,
arf_file,
treat_as_time_series=False,
)
# default the grouping to all open bins
# this will only be altered if the spectrum is rebinned
self._grouping = np.ones_like(pha_information["counts"])
# this saves the extra properties to the class
self._gathered_keywords = pha_information["gathered_keywords"]
self._file_type = file_type
self._file_name = pha_information["file_name"]
# pass the needed spectrum values back up
# remember that Spectrum reads counts, but returns
# rates!
super(PHASpectrum, self).__init__(
counts=pha_information["counts"],
exposure=pha_information["exposure"],
response=pha_information["rsp"],
count_errors=pha_information["count_errors"],
sys_errors=pha_information["sys_errors"],
is_poisson=pha_information["is_poisson"],
quality=pha_information["quality"],
mission=pha_information["gathered_keywords"]["mission"],
instrument=pha_information["gathered_keywords"]["instrument"],
tstart=pha_information["tstart"],
tstop=pha_information["tstop"],
)
def _return_file(self, key):
if key in self._gathered_keywords:
return self._gathered_keywords[key]
else:
return None
def set_ogip_grouping(self, grouping):
"""
If the counts are rebinned, this updates the grouping
:param grouping:
"""
self._grouping = grouping
@property
def filename(self):
return self._file_name
@property
def background_file(self):
"""
Returns the background file definied in the header, or None if there is none defined
p
:return: a path to a file, or None
"""
back_file = self._return_file('backfile')
if back_file == "":
back_file = None
return back_file
@property
def scale_factor(self):
"""
This is a scale factor (in the BACKSCAL keyword) which must be used to rescale background and source
regions
:return:
"""
return self._gathered_keywords["backscal"]
@property
def response_file(self):
"""
Returns the response file definied in the header, or None if there is none defined
:return: a path to a file, or None
"""
return self._return_file("respfile")
@property
def ancillary_file(self):
"""
Returns the ancillary file definied in the header, or None if there is none defined
:return: a path to a file, or None
"""
return self._return_file("ancrfile")
@property
def grouping(self):
return self._grouping
def clone(
self,
new_counts=None,
new_count_errors=None,
new_exposure=None,
new_scale_factor=None,
):
"""
make a new spectrum with new counts and errors and all other
parameters the same
:param new_exposure: the new exposure for the clone
:param new_scale_factor: the new scale factor for the clone
:param new_counts: new counts for the spectrum
:param new_count_errors: new errors from the spectrum
:return: new pha spectrum
"""
if new_exposure is None:
new_exposure = self.exposure
if new_counts is None:
new_counts = self.counts
new_count_errors = self.count_errors
if new_count_errors is None:
stat_err = None
else:
stat_err = old_div(new_count_errors, new_exposure)
if self._tstart is None:
tstart = 0
else:
tstart = self._tstart
if self._tstop is None:
telapse = new_exposure
else:
telapse = self._tstop - tstart
if new_scale_factor is None:
new_scale_factor = self.scale_factor
# create a new PHAII instance
pha = PHAII(
instrument_name=self.instrument,
telescope_name=self.mission,
tstart=tstart,
telapse=telapse,
channel=list(range(1, len(self) + 1)),
rate=old_div(new_counts, self.exposure),
stat_err=stat_err,
quality=self.quality.to_ogip(),
grouping=self.grouping,
exposure=new_exposure,
backscale=new_scale_factor,
respfile=None,
ancrfile=None,
is_poisson=self.is_poisson,
)
return pha
@classmethod
def from_dispersion_spectrum(cls, dispersion_spectrum, file_type="observed"):
# type: (BinnedSpectrumWithDispersion, str) -> PHASpectrum
if dispersion_spectrum.is_poisson:
rate_errors = None
else:
rate_errors = dispersion_spectrum.rate_errors
if dispersion_spectrum.tstart is None:
tstart = 0
else:
tstart = dispersion_spectrum.tstart
if dispersion_spectrum.tstop is None:
telapse = dispersion_spectrum.exposure
else:
telapse = dispersion_spectrum.tstop - tstart
pha = PHAII(
instrument_name=dispersion_spectrum.instrument,
telescope_name=dispersion_spectrum.mission,
tstart=tstart, # TODO: add this in so that we have proper time!
telapse=telapse,
channel=list(range(1, len(dispersion_spectrum) + 1)),
rate=dispersion_spectrum.rates,
stat_err=rate_errors,
quality=dispersion_spectrum.quality.to_ogip(),
grouping=np.ones(len(dispersion_spectrum)),
exposure=dispersion_spectrum.exposure,
backscale=dispersion_spectrum.scale_factor,
respfile=None,
ancrfile=None,
is_poisson=dispersion_spectrum.is_poisson,
)
return cls(
pha_file_or_instance=pha,
spectrum_number=1,
file_type=file_type,
rsp_file=dispersion_spectrum.response,
)
class PHASpectrumSet(BinnedSpectrumSet):
def __init__(
self, pha_file_or_instance, file_type="observed", rsp_file=None, arf_file=None
):
"""
A spectrum with dispersion build from an OGIP-compliant PHA FITS file. Both Type I & II files can be read. Type II
spectra are selected either by specifying the spectrum_number or via the {spectrum_number} file name convention used
in XSPEC. If the file_type is background, a 3ML InstrumentResponse or subclass must be passed so that the energy
bounds can be obtained.
:param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
:param spectrum_number: (optional) the spectrum number of the TypeII file to be used
:param file_type: observed or background
:param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
:param arf_file: (optional) and ARF filename
"""
# extract the spectrum number if needed
assert isinstance(pha_file_or_instance, six.string_types) or isinstance(
pha_file_or_instance, PHAII
), "Must provide a FITS file name or PHAII instance"
with fits.open(pha_file_or_instance) as f:
try:
HDUidx = f.index_of("SPECTRUM")
except:
raise RuntimeError(
"The input file %s is not in PHA format" % (pha2_file)
)
spectrum = f[HDUidx]
data = spectrum.data
if "COUNTS" in data.columns.names:
has_rates = False
data_column_name = "COUNTS"
elif "RATE" in data.columns.names:
has_rates = True
data_column_name = "RATE"
else:
raise RuntimeError(
"This file does not contain a RATE nor a COUNTS column. "
"This is not a valid PHA file"
)
# Determine if this is a PHA I or PHA II
if len(data.field(data_column_name).shape) == 2:
num_spectra = data.field(data_column_name).shape[0]
else:
raise RuntimeError("This appears to be a PHA I and not PHA II file")
pha_information = _read_pha_or_pha2_file(
pha_file_or_instance,
None,
file_type,
rsp_file,
arf_file,
treat_as_time_series=True,
)
# default the grouping to all open bins
# this will only be altered if the spectrum is rebinned
self._grouping = np.ones_like(pha_information["counts"])
# this saves the extra properties to the class
self._gathered_keywords = pha_information["gathered_keywords"]
self._file_type = file_type
# need to see if we have count errors, tstart, tstop
# if not, we create an list of None
if pha_information["count_errors"] is None:
count_errors = [None] * num_spectra
else:
count_errors = pha_information["count_errors"]
if pha_information["tstart"] is None:
tstart = [None] * num_spectra
else:
tstart = pha_information["tstart"]
if pha_information["tstop"] is None:
tstop = [None] * num_spectra
else:
tstop = pha_information["tstop"]
# now build the list of binned spectra
list_of_binned_spectra = []
with progress_bar(num_spectra, title="Loading PHAII spectra") as p:
for i in range(num_spectra):
list_of_binned_spectra.append(
BinnedSpectrumWithDispersion(
counts=pha_information["counts"][i],
exposure=pha_information["exposure"][i, 0],
response=pha_information["rsp"],
count_errors=count_errors[i],
sys_errors=pha_information["sys_errors"][i],
is_poisson=pha_information["is_poisson"],
quality=pha_information["quality"].get_slice(i),
mission=pha_information["gathered_keywords"]["mission"],
instrument=pha_information["gathered_keywords"]["instrument"],
tstart=tstart[i],
tstop=tstop[i],
)
)
p.increase()
# now get the time intervals
start_times = data.field("TIME")
stop_times = data.field("ENDTIME")
time_intervals = TimeIntervalSet.from_starts_and_stops(start_times, stop_times)
reference_time = 0
# see if there is a reference time in the file
if "TRIGTIME" in spectrum.header:
reference_time = spectrum.header["TRIGTIME"]
for t_number in range(spectrum.header["TFIELDS"]):
if "TZERO%d" % t_number in spectrum.header:
reference_time = spectrum.header["TZERO%d" % t_number]
super(PHASpectrumSet, self).__init__(
list_of_binned_spectra,
reference_time=reference_time,
time_intervals=time_intervals,
)
def _return_file(self, key):
if key in self._gathered_keywords:
return self._gathered_keywords[key]
else:
return None
def set_ogip_grouping(self, grouping):
"""
If the counts are rebinned, this updates the grouping
:param grouping:
"""
self._grouping = grouping
@property
def filename(self):
return self._file_name
@property
def background_file(self):
"""
Returns the background file definied in the header, or None if there is none defined
p
:return: a path to a file, or None
"""
return self._return_file("backfile")
@property
def scale_factor(self):
"""
This is a scale factor (in the BACKSCAL keyword) which must be used to rescale background and source
regions
:return:
"""
return self._gathered_keywords["backscal"]
@property
def response_file(self):
"""
Returns the response file definied in the header, or None if there is none defined
:return: a path to a file, or None
"""
return self._return_file("respfile")
@property
def ancillary_file(self):
"""
Returns the ancillary file definied in the header, or None if there is none defined
:return: a path to a file, or None
"""
return self._return_file("ancrfile")
@property
def grouping(self):
return self._grouping
def clone(
self, new_counts=None, new_count_errors=None,
):
"""
make a new spectrum with new counts and errors and all other
parameters the same
:param new_counts: new counts for the spectrum
:param new_count_errors: new errors from the spectrum
:return: new pha spectrum
"""
if new_counts is None:
new_counts = self.counts
new_count_errors = self.count_errors
if new_count_errors is None:
stat_err = None
else:
stat_err = old_div(new_count_errors, self.exposure)
# create a new PHAII instance
pha = PHAII(
instrument_name=self.instrument,
telescope_name=self.mission,
tstart=0,
telapse=self.exposure,
channel=list(range(1, len(self) + 1)),
rate=old_div(new_counts, self.exposure),
stat_err=stat_err,
quality=self.quality.to_ogip(),
grouping=self.grouping,
exposure=self.exposure,
backscale=self.scale_factor,
respfile=None,
ancrfile=None,
is_poisson=self.is_poisson,
)
return pha
@classmethod
def from_dispersion_spectrum(cls, dispersion_spectrum, file_type="observed"):
# type: (BinnedSpectrumWithDispersion, str) -> PHASpectrum
if dispersion_spectrum.is_poisson:
rate_errors = None
else:
rate_errors = dispersion_spectrum.rate_errors
pha = PHAII(
instrument_name=dispersion_spectrum.instrument,
telescope_name=dispersion_spectrum.mission,
tstart=dispersion_spectrum.tstart,
telapse=dispersion_spectrum.tstop - dispersion_spectrum.tstart,
channel=list(range(1, len(dispersion_spectrum) + 1)),
rate=dispersion_spectrum.rates,
stat_err=rate_errors,
quality=dispersion_spectrum.quality.to_ogip(),
grouping=np.ones(len(dispersion_spectrum)),
exposure=dispersion_spectrum.exposure,
backscale=dispersion_spectrum.scale_factor,
respfile=None,
ancrfile=None,
is_poisson=dispersion_spectrum.is_poisson,
)
return cls(
pha_file_or_instance=pha,
spectrum_number=1,
file_type=file_type,
rsp_file=dispersion_spectrum.response,
)
| bsd-3-clause | 7,735,432,735,798,420,000 | 26.25 | 132 | 0.559604 | false |
pjuu/pjuu | tests/test_parser.py | 1 | 5540 | # -*- coding: utf8 -*-
"""Post backend tests.
:license: AGPL v3, see LICENSE for more details
:copyright: 2014-2021 Joe Doherty
"""
from pjuu.auth.backend import create_account, activate
from pjuu.lib.parser import (parse_hashtags, parse_links, parse_mentions,
parse_post)
from tests import BackendTestCase
class ParserTests(BackendTestCase):
"""Ensure the text parser, parses correctly."""
def test_simple_url_http(self):
"""Simple HTTP urls"""
links = parse_links('Hello http://pjuu.com')
self.assertEqual(links[0]['link'], 'http://pjuu.com')
def test_simple_url_https(self):
"""Simpe HTTPS urls"""
links = parse_links('Hello https://pjuu.com')
self.assertEqual(links[0]['link'], 'https://pjuu.com')
def test_urls_are_fixed(self):
"""Ensure simple link are fixed up."""
links = parse_links('Hello pjuu.com')
self.assertEqual(links[0]['link'], 'http://pjuu.com')
self.assertEqual(links[0]['span'], (6, 14))
def test_anchors_in_urls(self):
"""Query strings and anchor points"""
links = parse_links('https://pjuu.com/joe?page=2#hello')
self.assertEqual(links[0]['link'], 'https://pjuu.com/joe?page=2#hello')
def test_weird_query_strings(self):
"""Ensure strange characters are handled"""
links = parse_links(
'http://pjuu.com:5000/a/post/url?page=1&q=abc,def#something')
self.assertEqual(
links[0]['link'],
'http://pjuu.com:5000/a/post/url?page=1&q=abc,def#something')
def test_hashtags_are_not_parsed(self):
"""Ensure achors are not parsed as hashtags"""
hashtags = parse_hashtags(
'http://pjuu.com:5000/a/post/url?page=1&q=abc,def#something')
self.assertEqual(len(hashtags), 0)
def test_urls_and_hashtags(self):
"""Hashtags intermixed with urls"""
links, mentions, hashtags = parse_post('pjuu.com/#bottom #plop')
self.assertEqual(links[0]['link'], 'http://pjuu.com/#bottom')
self.assertEqual(hashtags[0]['hashtag'], 'plop')
def test_short_hashtags(self):
"""Hashtags musy be more than 1 character long."""
hashtags = parse_hashtags('#cheese #j #jo #joe')
self.assertEqual(hashtags[0]['hashtag'], 'cheese')
self.assertEqual(hashtags[1]['hashtag'], 'jo')
self.assertEqual(hashtags[2]['hashtag'], 'joe')
def test_mention_no_user(self):
"""Find a user mention (doens't exist)"""
mentions = parse_mentions('@joe @ant', check_user=False)
self.assertEqual(mentions[0]['username'], 'joe')
self.assertEqual(mentions[0]['user_id'], 'NA')
self.assertEqual(mentions[0]['span'], (0, 4))
self.assertEqual(mentions[1]['username'], 'ant')
self.assertEqual(mentions[1]['user_id'], 'NA')
self.assertEqual(mentions[1]['span'], (5, 9))
def test_mention_real_user(self):
"""Find a user mentions (user does exist)"""
user1 = create_account('user1', '[email protected]', 'Password1')
activate(user1)
mentions = parse_mentions('@user1 @user2')
self.assertEqual(len(mentions), 1)
self.assertEqual(mentions[0]['username'], 'user1')
self.assertEqual(mentions[0]['user_id'], user1)
self.assertEqual(mentions[0]['span'], (0, 6))
def test_unicode_character(self):
"""Do unicode characters break things."""
user1 = create_account('user1', '[email protected]', 'Password1')
activate(user1)
links, mentions, hashtags = parse_post('၍ @user1, ☂pjuu.com, 㒅 #hash')
self.assertEqual(links[0]['link'], 'http://pjuu.com')
self.assertEqual(mentions[0]['username'], 'user1')
self.assertEqual(hashtags[0]['hashtag'], 'hash')
def test_surrounding_characters(self):
"""Can parse objects be in parenthesis"""
user1 = create_account('user1', '[email protected]', 'Password1')
activate(user1)
links, mentions, hashtags = parse_post('(@user1), (pjuu.com), (#hash)')
self.assertEqual(links[0]['link'], 'http://pjuu.com')
self.assertEqual(mentions[0]['username'], 'user1')
self.assertEqual(hashtags[0]['hashtag'], 'hash')
def test_parenethesis_in_paths(self):
"""Handle URLs surrounded by parenthesis and containing them."""
links = parse_links('(https://pjuu.com/user1)')
self.assertEqual(links[0]['link'], 'https://pjuu.com/user1')
links = parse_links('https://pjuu.com/user1(awesome)')
self.assertEqual(links[0]['link'], 'https://pjuu.com/user1(awesome)')
def test_quoting_mentions_hashtags(self):
"""Parenthesis around items"""
links = parse_links('"https://pjuu.com/user1"')
self.assertEqual(links[0]['link'], 'https://pjuu.com/user1')
hashtags = parse_hashtags('"#pjuu"')
self.assertEqual(hashtags[0]['hashtag'], 'pjuu')
mentions = parse_mentions('"@joe"', check_user=False)
self.assertEqual(mentions[0]['username'], 'joe')
def test_delimited(self):
"""Ensure hashtags can be delimited"""
hashtags = parse_hashtags('#pjuu\'s test')
self.assertEqual(hashtags[0]['hashtag'], 'pjuu')
user1 = create_account('user1', '[email protected]', 'Password1')
activate(user1)
mentions = parse_mentions('@user1\'s')
self.assertEqual(mentions[0]['username'], 'user1')
self.assertEqual(mentions[0]['user_id'], user1)
| agpl-3.0 | -6,101,324,134,235,354,000 | 41.244275 | 79 | 0.612035 | false |
hunch/hunch-gift-app | django/contrib/gis/db/backends/postgis/creation.py | 12 | 2905 | from django.conf import settings
from django.db.backends.postgresql.creation import DatabaseCreation
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_opts = 'GIST_GEOMETRY_OPS'
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography:
# Geogrophy columns are created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns
if f.geography:
index_opts = ''
else:
index_opts = ' ' + style.SQL_KEYWORD(self.geom_index_opts)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_opts + ' );')
return output
def sql_table_creation_suffix(self):
qn = self.connection.ops.quote_name
return ' TEMPLATE %s' % qn(getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis'))
| mit | 1,325,786,248,027,895,300 | 46.416667 | 93 | 0.481239 | false |
PiafPowaz/MultiCode | multicode.py | 1 | 2682 | #DEFINE_TYPE_CODE#py
#sDEFINE_TYPE_CODE#py
# -*- coding: utf-8 -*-
import platform
import os
def main():
if int(platform.python_version_tuple()[0]) < 3:
fullPathFile = raw_input("File's path :")
else:
fullPathFile = input("File's path :")
pathFileNoExt = fullPathFile.split('.')[0]
nameFileNoExt = pathFileNoExt.split('\\')[-1]
pathFile = '/'.join(pathFileNoExt.split('\\')[:-1]) + '/'
if pathFile == []:
pathFile = '/'.join(pathFileNoExt.split('/')[:-1]) + '/'
nameFileNoExt = pahtFileNoExt.split('/')[-1]
newF = None
fileClosed = True
totNewFile = 0
fullPathNewFiles = []
if pathFile == '/':
pathFile = ''
pathNewFile = pathFile
nameNewFile = None
fullPathNewFile = None
with open(fullPathFile, 'r') as f:
for line in f:
define = line.split('#')
if fileClosed:
last_word = define[-1]
last_word = last_word.split('\n')
del define[-1]
define += last_word
for word in define:
if word == 'DEFINE_PATH_TYPE_CODE' and len(define) >= define.index(word)+2:
if nameNewFile == None:
nameCode = '.' + str(define[define.index(word)+1])
nameNewFile = nameFileNoExt + nameCode
pathNewFile = str(define[define.index(word)+2])
fullPathNewFile = pathNewFile + nameNewFile
if word == 'DEFINE_NAME_FILE_TYPE_CODE' and len(define) >= define.index(word)+2:
nameCode = '.' + str(define[define.index(word)+1])
nameNewFile = str(define[define.index(word)+2]) + nameCode
fullPathNewFile = pathNewFile + nameNewFile
if word == 'DEFINE_TYPE_CODE' and len(define) > define.index(word):
if fullPathNewFile == None:
if nameNewFile == None:
nameCode = '.' + str(define[define.index(word)+1])
nameNewFile = nameFileNoExt + nameCode
pathNewFile = pathFile
fullPathNewFile = pathNewFile + nameNewFile
newF = open(fullPathNewFile, 'w')
totNewFile += 1
fullPathNewFiles.append(fullPathNewFile)
fileClosed = False
firstLine = True
if word == 'END_DEFINE_TYPE_CODE' and len(define) > define.index(word):
if not fileClosed:
newF.close()
nameCode = None
fileClosed = True
pathNewFile = pathFile
nameNewFile = None
fullPathNewFile = None
if word == 'OS_CMD' and len(define) > define.index(word):
os.system(str(define[define.index(word)+1]))
if newF != None and not fileClosed:
if not firstLine:
newF.write(line)
else:
firstLine = False
print('New files :', totNewFile)
for fullPathNewFile in fullPathNewFiles:
print(fullPathNewFile)
main()
#sEND_DEFINE_TYPE_CODE#py
#END_DEFINE_TYPE_CODE#py
| gpl-3.0 | 4,394,695,285,595,441,700 | 32.384615 | 84 | 0.629381 | false |
nigelb/simpleui | simpleui/cli_impl/impl.py | 1 | 2102 | # simpleui implements a number of simple UI patterns with fallback to CLI if the
# selected GUI fails.
#
# Copyright (C) 2012 NigelB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import getpass
class cli_impl:
ui_type = "cli_impl"
def prompt_credentials(self, service):
print "Please enter you your Credentials for %s: "%service
username = raw_input("Username: ")
password = getpass.getpass("Password: ")
return (True, username, password)
def prompt_file_selector(self, title="Enter the filename:", start_dir=".", type=""):
return [raw_input(title)]
def prompt_yes_no(self, message):
input = raw_input("%s [Y/N]: "%message)
if not len(input): return self.prompt_yes_no(message)
try:
return {"Y":True,"N":false}[input[0].upper()]
except Exception as e:
return self.prompt_yes_no(message)
def prompt_list(self, title, prompt, data, multi_select=False):
print(title)
for item in range(len(data)):
print ("\t%i. %s"%(item,data[item]))
toRet = []
if multi_select is False:
return [int(raw_input(prompt))]
else:
print ()
print ('Enter as many as required then enter "f" to finish.')
print ()
try:
while True:
toRet.append(int(raw_input("%s "%prompt)))
except ValueError as v:
pass
print ()
return toRet
| gpl-3.0 | 2,391,133,582,813,218,300 | 33.459016 | 88 | 0.62274 | false |
lowitty/sendtrap | lib/pysnmp/entity/rfc3413/ntforg.py | 3 | 16905 | import sys
from pyasn1.compat.octets import null
from pysnmp.entity.rfc3413 import config
from pysnmp.proto.proxy import rfc2576
from pysnmp.proto.api import v2c
from pysnmp.proto import error
from pysnmp import nextid
from pysnmp import debug
getNextHandle = nextid.Integer(0x7fffffff)
class NotificationOriginator:
acmID = 3 # default MIB access control method to use
def __init__(self, snmpContext):
self.__pendingReqs = {}
self.__pendingNotifications = {}
self.snmpContext = snmpContext
def processResponsePdu(
self,
snmpEngine,
messageProcessingModel,
securityModel,
securityName,
securityLevel,
contextEngineId,
contextName,
pduVersion,
PDU,
statusInformation,
sendPduHandle,
cbInfo
):
(cbFun, cbCtx) = cbInfo
# 3.3.6d
if sendPduHandle not in self.__pendingReqs:
raise error.ProtocolError('Missing sendPduHandle %s' % sendPduHandle)
( origTransportDomain,
origTransportAddress,
origMessageProcessingModel,
origSecurityModel,
origSecurityName,
origSecurityLevel,
origContextEngineId,
origContextName,
origPdu,
origTimeout,
origRetryCount,
origRetries,
metaSendPduHandle
) = self.__pendingReqs[sendPduHandle]
del self.__pendingReqs[sendPduHandle]
self.__pendingNotifications[metaSendPduHandle] -= 1
snmpEngine.transportDispatcher.jobFinished(id(self))
if statusInformation:
debug.logger & debug.flagApp and debug.logger('processResponsePdu: metaSendPduHandle %s, sendPduHandle %s statusInformation %s' % (metaSendPduHandle, sendPduHandle, statusInformation))
if origRetries == origRetryCount:
debug.logger & debug.flagApp and debug.logger('processResponsePdu: metaSendPduHandle %s, sendPduHandle %s retry count %d exceeded' % (metaSendPduHandle, sendPduHandle, origRetries))
if not self.__pendingNotifications[metaSendPduHandle]:
del self.__pendingNotifications[metaSendPduHandle]
self._handleResponse(
metaSendPduHandle,
statusInformation['errorIndication'],
0, 0, (),
cbFun,
cbCtx
)
return
# Convert timeout in seconds into timeout in timer ticks
timeoutInTicks = float(origTimeout)/100/snmpEngine.transportDispatcher.getTimerResolution()
# User-side API assumes SMIv2
if messageProcessingModel == 0:
reqPDU = rfc2576.v2ToV1(origPdu)
pduVersion = 0
else:
reqPDU = origPdu
pduVersion = 1
# 3.3.6a
try:
sendPduHandle = snmpEngine.msgAndPduDsp.sendPdu(
snmpEngine,
origTransportDomain,
origTransportAddress,
origMessageProcessingModel,
origSecurityModel,
origSecurityName,
origSecurityLevel,
origContextEngineId,
origContextName,
pduVersion,
reqPDU,
1, # expectResponse
timeoutInTicks,
self.processResponsePdu,
(cbFun, cbCtx)
)
except error.StatusInformation:
statusInformation = sys.exc_info()[1]
debug.logger & debug.flagApp and debug.logger('processResponsePdu: metaSendPduHandle %s: sendPdu() failed with %r ' % (metaSendPduHandle, statusInformation))
if not self.__pendingNotifications[metaSendPduHandle]:
del self.__pendingNotifications[metaSendPduHandle]
self._handleResponse(
metaSendPduHandle,
statusInformation['errorIndication'],
0, 0, (),
cbFun,
cbCtx
)
return
self.__pendingNotifications[metaSendPduHandle] += 1
snmpEngine.transportDispatcher.jobStarted(id(self))
debug.logger & debug.flagApp and debug.logger('processResponsePdu: metaSendPduHandle %s, sendPduHandle %s, timeout %d, retry %d of %d' % (metaSendPduHandle, sendPduHandle, origTimeout, origRetries, origRetryCount))
# 3.3.6b
self.__pendingReqs[sendPduHandle] = (
origTransportDomain,
origTransportAddress,
origMessageProcessingModel,
origSecurityModel,
origSecurityName,
origSecurityLevel,
origContextEngineId,
origContextName,
origPdu,
origTimeout,
origRetryCount,
origRetries + 1,
metaSendPduHandle
)
return
# 3.3.6c
if not self.__pendingNotifications[metaSendPduHandle]:
del self.__pendingNotifications[metaSendPduHandle]
# User-side API assumes SMIv2
if messageProcessingModel == 0:
PDU = rfc2576.v1ToV2(PDU, origPdu)
self._handleResponse(metaSendPduHandle, None,
v2c.apiPDU.getErrorStatus(PDU),
v2c.apiPDU.getErrorIndex(PDU,muteErrors=True),
v2c.apiPDU.getVarBinds(PDU),
cbFun, cbCtx)
def _handleResponse(self,
sendRequestHandle,
errorIndication,
errorStatus, errorIndex,
varBinds,
cbFun, cbCtx):
try:
# we need to pass response PDU information to user for INFORMs
cbFun(sendRequestHandle, errorIndication,
errorStatus, errorIndex, varBinds, cbCtx)
except TypeError:
# a backward compatible way of calling user function
cbFun(sendRequestHandle, errorIndication, cbCtx)
def sendNotification(
self,
snmpEngine,
notificationTarget,
notificationName,
additionalVarBinds=(),
cbFun=None,
cbCtx=None,
contextName=null,
instanceIndex=None
):
debug.logger & debug.flagApp and debug.logger('sendNotification: notificationTarget %s, notificationName %s, additionalVarBinds %s, contextName "%s", instanceIndex %s' % (notificationTarget, notificationName, additionalVarBinds, contextName, instanceIndex))
if contextName:
__SnmpAdminString, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('SNMP-FRAMEWORK-MIB', 'SnmpAdminString')
contextName = __SnmpAdminString(contextName)
# 3.3
( notifyTag,
notifyType ) = config.getNotificationInfo(
snmpEngine, notificationTarget
)
metaSendPduHandle = getNextHandle()
debug.logger & debug.flagApp and debug.logger('sendNotification: metaSendPduHandle %s, notifyTag %s, notifyType %s' % (metaSendPduHandle, notifyTag, notifyType))
contextMibInstrumCtl = self.snmpContext.getMibInstrum(contextName)
additionalVarBinds = [ (v2c.ObjectIdentifier(x),y) for x,y in additionalVarBinds ]
for targetAddrName in config.getTargetNames(snmpEngine, notifyTag):
( transportDomain,
transportAddress,
timeout,
retryCount,
params ) = config.getTargetAddr(snmpEngine, targetAddrName)
( messageProcessingModel,
securityModel,
securityName,
securityLevel ) = config.getTargetParams(snmpEngine, params)
debug.logger & debug.flagApp and debug.logger('sendNotification: metaSendPduHandle %s, notifyTag %s yields: transportDomain %s, transportAddress %r, securityModel %s, securityName %s, securityLevel %s' % (metaSendPduHandle, notifyTag, transportDomain, transportAddress, securityModel, securityName, securityLevel))
# 3.3.1 XXX
# XXX filtering's yet to be implemented
# filterProfileName = config.getNotifyFilterProfile(params)
# ( filterSubtree,
# filterMask,
# filterType ) = config.getNotifyFilter(filterProfileName)
varBinds = []
# 3.3.2 & 3.3.3
sysUpTime, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'sysUpTime')
for varName, varVal in additionalVarBinds:
if varName == sysUpTime.name:
varBinds.append((varName, varVal))
break
if not varBinds:
varBinds.append((sysUpTime.name,
sysUpTime.syntax.clone())) # for actual value
snmpTrapOid, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpTrapOID')
if len(notificationName) == 2: # ('MIB', 'symbol')
notificationTypeObject, = contextMibInstrumCtl.mibBuilder.importSymbols(*notificationName)
varBinds.append((snmpTrapOid.name, v2c.ObjectIdentifier(notificationTypeObject.name)))
debug.logger & debug.flagApp and debug.logger('sendNotification: notification type object is %s' % notificationTypeObject)
for notificationObject in notificationTypeObject.getObjects():
mibNode, = contextMibInstrumCtl.mibBuilder.importSymbols(*notificationObject)
if instanceIndex:
mibNode = mibNode.getNode(mibNode.name + instanceIndex)
else:
mibNode = mibNode.getNextNode(mibNode.name)
varBinds.append((mibNode.name, mibNode.syntax))
debug.logger & debug.flagApp and debug.logger('sendNotification: processed notification object %s, instance index %s, var-bind %s' % (notificationObject, instanceIndex is None and "<first>" or instanceIndex, mibNode))
elif notificationName: # numeric OID
varBinds.append(
(snmpTrapOid.name,
snmpTrapOid.syntax.clone(notificationName))
)
else:
varBinds.append((snmpTrapOid.name, snmpTrapOid.syntax))
for varName, varVal in additionalVarBinds:
if varName in (sysUpTime.name, snmpTrapOid.name):
continue
try:
snmpEngine.accessControlModel[self.acmID].isAccessAllowed(
snmpEngine, securityModel, securityName,
securityLevel, 'notify', contextName, varName
)
except error.StatusInformation:
debug.logger & debug.flagApp and debug.logger('sendNotification: OID %s not allowed for %s, droppping notification' % (varName, securityName))
return
else:
varBinds.append((varName, varVal))
# 3.3.4
if notifyType == 1:
pdu = v2c.SNMPv2TrapPDU()
elif notifyType == 2:
pdu = v2c.InformRequestPDU()
else:
raise RuntimeError()
v2c.apiPDU.setDefaults(pdu)
v2c.apiPDU.setVarBinds(pdu, varBinds)
# User-side API assumes SMIv2
if messageProcessingModel == 0:
reqPDU = rfc2576.v2ToV1(pdu)
pduVersion = 0
else:
reqPDU = pdu
pduVersion = 1
# 3.3.5
if notifyType == 1:
try:
snmpEngine.msgAndPduDsp.sendPdu(
snmpEngine,
transportDomain,
transportAddress,
messageProcessingModel,
securityModel,
securityName,
securityLevel,
self.snmpContext.contextEngineId,
contextName,
pduVersion,
reqPDU,
None
)
except error.StatusInformation:
statusInformation = sys.exc_info()[1]
debug.logger & debug.flagApp and debug.logger('sendReq: metaSendPduHandle %s: sendPdu() failed with %r' % (metaSendPduHandle, statusInformation))
if metaSendPduHandle not in self.__pendingNotifications or \
not self.__pendingNotifications[metaSendPduHandle]:
if metaSendPduHandle in self.__pendingNotifications:
del self.__pendingNotifications[metaSendPduHandle]
self._handleResponse(
metaSendPduHandle,
statusInformation['errorIndication'],
0, 0, (),
cbFun,
cbCtx
)
return metaSendPduHandle
else:
# Convert timeout in seconds into timeout in timer ticks
timeoutInTicks = float(timeout)/100/snmpEngine.transportDispatcher.getTimerResolution()
# 3.3.6a
try:
sendPduHandle = snmpEngine.msgAndPduDsp.sendPdu(
snmpEngine,
transportDomain,
transportAddress,
messageProcessingModel,
securityModel,
securityName,
securityLevel,
self.snmpContext.contextEngineId,
contextName,
pduVersion,
reqPDU,
1, # expectResponse
timeoutInTicks,
self.processResponsePdu,
(cbFun, cbCtx)
)
except error.StatusInformation:
statusInformation = sys.exc_info()[1]
debug.logger & debug.flagApp and debug.logger('sendReq: metaSendPduHandle %s: sendPdu() failed with %r' % (metaSendPduHandle, statusInformation))
if metaSendPduHandle not in self.__pendingNotifications or \
not self.__pendingNotifications[metaSendPduHandle]:
if metaSendPduHandle in self.__pendingNotifications:
del self.__pendingNotifications[metaSendPduHandle]
self._handleResponse(
metaSendPduHandle,
statusInformation['errorIndication'],
0, 0, (),
cbFun,
cbCtx
)
return metaSendPduHandle
debug.logger & debug.flagApp and debug.logger('sendNotification: metaSendPduHandle %s, sendPduHandle %s, timeout %d' % (metaSendPduHandle, sendPduHandle, timeout))
# 3.3.6b
self.__pendingReqs[sendPduHandle] = (
transportDomain,
transportAddress,
messageProcessingModel,
securityModel,
securityName,
securityLevel,
self.snmpContext.contextEngineId,
contextName,
pdu,
timeout,
retryCount,
1,
metaSendPduHandle
)
if metaSendPduHandle not in self.__pendingNotifications:
self.__pendingNotifications[metaSendPduHandle] = 0
self.__pendingNotifications[metaSendPduHandle] += 1
snmpEngine.transportDispatcher.jobStarted(id(self))
debug.logger & debug.flagApp and debug.logger('sendNotification: metaSendPduHandle %s, notification(s) sent' % metaSendPduHandle)
return metaSendPduHandle
# XXX
# move/group/implement config setting/retrieval at a stand-alone module
| mit | 5,982,526,749,757,127,000 | 42.235294 | 326 | 0.542325 | false |
40223148/2015cda_g5 | static/Brython3.1.1-20150328-091302/Lib/urllib/parse.py | 735 | 35170 | """Parse (absolute and relative) URLs.
urlparse module is based upon the following RFC specifications.
RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
and L. Masinter, January 2005.
RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
and L.Masinter, December 1999.
RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
Berners-Lee, R. Fielding, and L. Masinter, August 1998.
RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998.
RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
1995.
RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
McCahill, December 1994
RFC 3986 is considered the current standard and any future changes to
urlparse module should conform with it. The urlparse module is
currently not entirely compliant with this RFC due to defacto
scenarios for parsing, and for backward compatibility purposes, some
parsing quirks from older RFCs are retained. The testcases in
test_urlparse.py provides a good indicator of parsing behavior.
"""
import re
import sys
import collections
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "urlencode", "parse_qs",
"parse_qsl", "quote", "quote_plus", "quote_from_bytes",
"unquote", "unquote_plus", "unquote_to_bytes"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp',
'svn', 'svn+ssh']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp', 'tel']
# These are not actually used anymore, but should stay for backwards
# compatibility. (They are undocumented, but have a public-looking name.)
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
# XXX: Consider replacing with functools.lru_cache
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache and the quoters cache."""
_parse_cache.clear()
_safe_quoters.clear()
# Helpers for bytes handling
# For 3.2, we deliberately require applications that
# handle improperly quoted URLs to do their own
# decoding and encoding. If valid use cases are
# presented, we may relax this by using latin-1
# decoding internally for 3.3
_implicit_encoding = 'ascii'
_implicit_errors = 'strict'
def _noop(obj):
return obj
def _encode_result(obj, encoding=_implicit_encoding,
errors=_implicit_errors):
return obj.encode(encoding, errors)
def _decode_args(args, encoding=_implicit_encoding,
errors=_implicit_errors):
return tuple(x.decode(encoding, errors) if x else '' for x in args)
def _coerce_args(*args):
# Invokes decode if necessary to create str args
# and returns the coerced inputs along with
# an appropriate result coercion function
# - noop for str inputs
# - encoding function otherwise
str_input = isinstance(args[0], str)
for arg in args[1:]:
# We special-case the empty string to support the
# "scheme=''" default argument to some functions
if arg and isinstance(arg, str) != str_input:
raise TypeError("Cannot mix str and non-str arguments")
if str_input:
return args + (_noop,)
return _decode_args(args) + (_encode_result,)
# Result objects are more helpful than simple tuples
class _ResultMixinStr(object):
"""Standard approach to encoding parsed results from str to bytes"""
__slots__ = ()
def encode(self, encoding='ascii', errors='strict'):
return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self))
class _ResultMixinBytes(object):
"""Standard approach to decoding parsed results from bytes to str"""
__slots__ = ()
def decode(self, encoding='ascii', errors='strict'):
return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self))
class _NetlocResultMixinBase(object):
"""Shared methods for the parsed result objects containing a netloc element"""
__slots__ = ()
@property
def username(self):
return self._userinfo[0]
@property
def password(self):
return self._userinfo[1]
@property
def hostname(self):
hostname = self._hostinfo[0]
if not hostname:
hostname = None
elif hostname is not None:
hostname = hostname.lower()
return hostname
@property
def port(self):
port = self._hostinfo[1]
if port is not None:
port = int(port, 10)
# Return None on an illegal port
if not ( 0 <= port <= 65535):
return None
return port
class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition('@')
if have_info:
username, have_password, password = userinfo.partition(':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition('@')
_, have_open_br, bracketed = hostinfo.partition('[')
if have_open_br:
hostname, _, port = bracketed.partition(']')
_, have_port, port = port.partition(':')
else:
hostname, have_port, port = hostinfo.partition(':')
if not have_port:
port = None
return hostname, port
class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition(b'@')
if have_info:
username, have_password, password = userinfo.partition(b':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition(b'@')
_, have_open_br, bracketed = hostinfo.partition(b'[')
if have_open_br:
hostname, _, port = bracketed.partition(b']')
_, have_port, port = port.partition(b':')
else:
hostname, have_port, port = hostinfo.partition(b':')
if not have_port:
port = None
return hostname, port
from collections import namedtuple
_DefragResultBase = namedtuple('DefragResult', 'url fragment')
_SplitResultBase = namedtuple('SplitResult', 'scheme netloc path query fragment')
_ParseResultBase = namedtuple('ParseResult', 'scheme netloc path params query fragment')
# For backwards compatibility, alias _NetlocResultMixinStr
# ResultBase is no longer part of the documented API, but it is
# retained since deprecating it isn't worth the hassle
ResultBase = _NetlocResultMixinStr
# Structured result objects for string data
class DefragResult(_DefragResultBase, _ResultMixinStr):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + '#' + self.fragment
else:
return self.url
class SplitResult(_SplitResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResult(_ParseResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Structured result objects for bytes data
class DefragResultBytes(_DefragResultBase, _ResultMixinBytes):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + b'#' + self.fragment
else:
return self.url
class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Set up the encode/decode result pairs
def _fix_result_transcoding():
_result_pairs = (
(DefragResult, DefragResultBytes),
(SplitResult, SplitResultBytes),
(ParseResult, ParseResultBytes),
)
for _decoded, _encoded in _result_pairs:
_decoded._encoded_counterpart = _encoded
_encoded._decoded_counterpart = _decoded
_fix_result_transcoding()
del _fix_result_transcoding
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return _coerce_result(cached)
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
for c in url[:i]:
if c not in scheme_chars:
break
else:
# make sure "url" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i+1:]
if not rest or any(c not in '0123456789' for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
def urlunparse(components):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment, _coerce_result = (
_coerce_args(*components))
if params:
url = "%s;%s" % (url, params)
return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment)))
def urlunsplit(components):
"""Combine the elements of a tuple as returned by urlsplit() into a
complete URL as a string. The data argument can be any five-item iterable.
This may result in a slightly different, but equivalent URL, if the URL that
was parsed originally had unnecessary delimiters (for example, a ? with an
empty query; the RFC states that these are equivalent)."""
scheme, netloc, url, query, fragment, _coerce_result = (
_coerce_args(*components))
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return _coerce_result(url)
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
base, url, _coerce_result = _coerce_args(base, url)
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return _coerce_result(url)
if scheme in uses_netloc:
if netloc:
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
netloc = bnetloc
if path[:1] == '/':
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
if not path and not params:
path = bpath
params = bparams
if not query:
query = bquery
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if (segments[i] == '..'
and segments[i-1] not in ('', '..')):
del segments[i-1:i+1]
break
i = i+1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return _coerce_result(urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment)))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
url, _coerce_result = _coerce_args(url)
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
else:
frag = ''
defrag = url
return _coerce_result(DefragResult(defrag, frag))
_hexdig = '0123456789ABCDEFabcdef'
_hextobyte = {(a + b).encode(): bytes([int(a + b, 16)])
for a in _hexdig for b in _hexdig}
def unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(_hextobyte[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
_asciire = re.compile('([\x00-\x7f]+)')
def unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
"""
parsed_result = {}
pairs = parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
def parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
Returns a list, as G-d intended.
"""
qs, _coerce_result = _coerce_args(qs)
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return unquote(string, encoding, errors)
_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'abcdefghijklmnopqrstuvwxyz'
b'0123456789'
b'_.-')
_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE)
_safe_quoters = {}
class Quoter(collections.defaultdict):
"""A mapping from bytes (in range(0,256)) to strings.
String values are percent-encoded byte values, unless the key < 128, and
in the "safe" set (either the specified safe set, or default set).
"""
# Keeps a cache internally, using defaultdict, for efficiency (lookups
# of cached keys don't call Python code at all).
def __init__(self, safe):
"""safe: bytes object."""
self.safe = _ALWAYS_SAFE.union(safe)
def __repr__(self):
# Without this, will just display as a defaultdict
return "<Quoter %r>" % dict(self)
def __missing__(self, b):
# Handle a cache miss. Store quoted string in cache and return.
res = chr(b) if b in self.safe else '%{:02X}'.format(b)
self[b] = res
return res
def quote(string, safe='/', encoding=None, errors=None):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
string and safe may be either str or bytes objects. encoding must
not be specified if string is a str.
The optional encoding and errors parameters specify how to deal with
non-ASCII characters, as accepted by the str.encode method.
By default, encoding='utf-8' (characters are encoded with UTF-8), and
errors='strict' (unsupported characters raise a UnicodeEncodeError).
"""
if isinstance(string, str):
if not string:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'strict'
string = string.encode(encoding, errors)
else:
if encoding is not None:
raise TypeError("quote() doesn't support 'encoding' for bytes")
if errors is not None:
raise TypeError("quote() doesn't support 'errors' for bytes")
return quote_from_bytes(string, safe)
def quote_plus(string, safe='', encoding=None, errors=None):
"""Like quote(), but also replace ' ' with '+', as required for quoting
HTML form values. Plus signs in the original string are escaped unless
they are included in safe. It also does not have safe default to '/'.
"""
# Check if ' ' in string, where string may either be a str or bytes. If
# there are no spaces, the regular quote will produce the right answer.
if ((isinstance(string, str) and ' ' not in string) or
(isinstance(string, bytes) and b' ' not in string)):
return quote(string, safe, encoding, errors)
if isinstance(safe, str):
space = ' '
else:
space = b' '
string = quote(string, safe + space, encoding, errors)
return string.replace(' ', '+')
def quote_from_bytes(bs, safe='/'):
"""Like quote(), but accepts a bytes object rather than a str, and does
not perform string-to-bytes encoding. It always returns an ASCII string.
quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f'
"""
if not isinstance(bs, (bytes, bytearray)):
raise TypeError("quote_from_bytes() expected bytes")
if not bs:
return ''
if isinstance(safe, str):
# Normalize 'safe' by converting to bytes and removing non-ASCII chars
safe = safe.encode('ascii', 'ignore')
else:
safe = bytes([c for c in safe if c < 128])
if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe):
return bs.decode()
try:
quoter = _safe_quoters[safe]
except KeyError:
_safe_quoters[safe] = quoter = Quoter(safe).__getitem__
return ''.join([quoter(char) for char in bs])
def urlencode(query, doseq=False, safe='', encoding=None, errors=None):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The query arg may be either a string or a bytes type. When query arg is a
string, the safe, encoding and error parameters are sent the quote_plus for
encoding.
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("not a valid non-string sequence "
"or mapping object").with_traceback(tb)
l = []
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
else:
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
l.append(k + '=' + v)
elif isinstance(v, str):
v = quote_plus(v, safe, encoding, errors)
l.append(k + '=' + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_plus(elt, safe)
else:
elt = quote_plus(str(elt), safe, encoding, errors)
l.append(k + '=' + elt)
return '&'.join(l)
# Utilities to parse URLs (most of these return None for missing parts):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
# splittype('type:opaquestring') --> 'type', 'opaquestring'
# splithost('//host[:port]/path') --> 'host[:port]', '/path'
# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
# splitpasswd('user:passwd') -> 'user', 'passwd'
# splitport('host:port') --> 'host', 'port'
# splitquery('/path?query') --> '/path', 'query'
# splittag('/path#tag') --> '/path', 'tag'
# splitattr('/path;attr1=value1;attr2=value2;...') ->
# '/path', ['attr1=value1', 'attr2=value2', ...]
# splitvalue('attr=value') --> 'attr', 'value'
# urllib.parse.unquote('abc%20def') -> 'abc def'
# quote('abc def') -> 'abc%20def')
def to_bytes(url):
"""to_bytes(u"URL") --> 'URL'."""
# Most URL schemes require ASCII. If that changes, the conversion
# can be relaxed.
# XXX get rid of to_bytes()
if isinstance(url, str):
try:
url = url.encode("ASCII").decode()
except UnicodeError:
raise UnicodeError("URL " + repr(url) +
" contains non-ASCII characters")
return url
def unwrap(url):
"""unwrap('<URL:type://host/path>') --> 'type://host/path'."""
url = str(url).strip()
if url[:1] == '<' and url[-1:] == '>':
url = url[1:-1].strip()
if url[:4] == 'URL:': url = url[4:].strip()
return url
_typeprog = None
def splittype(url):
"""splittype('type:opaquestring') --> 'type', 'opaquestring'."""
global _typeprog
if _typeprog is None:
import re
_typeprog = re.compile('^([^/:]+):')
match = _typeprog.match(url)
if match:
scheme = match.group(1)
return scheme.lower(), url[len(scheme) + 1:]
return None, url
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match:
host_port = match.group(1)
path = match.group(2)
if path and not path.startswith('/'):
path = '/' + path
return host_port, path
return None, url
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
_passwdprog = None
def splitpasswd(user):
"""splitpasswd('user:passwd') -> 'user', 'passwd'."""
global _passwdprog
if _passwdprog is None:
import re
_passwdprog = re.compile('^([^:]*):(.*)$',re.S)
match = _passwdprog.match(user)
if match: return match.group(1, 2)
return user, None
# splittag('/path#tag') --> '/path', 'tag'
_portprog = None
def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
import re
_portprog = re.compile('^(.*):([0-9]+)$')
match = _portprog.match(host)
if match: return match.group(1, 2)
return host, None
_nportprog = None
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
global _nportprog
if _nportprog is None:
import re
_nportprog = re.compile('^(.*):(.*)$')
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
try:
if not port: raise ValueError("no digits")
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport
_queryprog = None
def splitquery(url):
"""splitquery('/path?query') --> '/path', 'query'."""
global _queryprog
if _queryprog is None:
import re
_queryprog = re.compile('^(.*)\?([^?]*)$')
match = _queryprog.match(url)
if match: return match.group(1, 2)
return url, None
_tagprog = None
def splittag(url):
"""splittag('/path#tag') --> '/path', 'tag'."""
global _tagprog
if _tagprog is None:
import re
_tagprog = re.compile('^(.*)#([^#]*)$')
match = _tagprog.match(url)
if match: return match.group(1, 2)
return url, None
def splitattr(url):
"""splitattr('/path;attr1=value1;attr2=value2;...') ->
'/path', ['attr1=value1', 'attr2=value2', ...]."""
words = url.split(';')
return words[0], words[1:]
_valueprog = None
def splitvalue(attr):
"""splitvalue('attr=value') --> 'attr', 'value'."""
global _valueprog
if _valueprog is None:
import re
_valueprog = re.compile('^([^=]*)=(.*)$')
match = _valueprog.match(attr)
if match: return match.group(1, 2)
return attr, None
| gpl-3.0 | -5,291,321,667,042,135,000 | 35.10883 | 88 | 0.588939 | false |
kosgroup/odoo | addons/stock/models/stock_location.py | 4 | 13345 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime
from dateutil import relativedelta
from odoo import api, fields, models, _
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
class Location(models.Model):
_name = "stock.location"
_description = "Inventory Locations"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
_rec_name = 'complete_name'
@api.model
def default_get(self, fields):
res = super(Location, self).default_get(fields)
if 'barcode' in fields and 'barcode' not in res and res.get('complete_name'):
res['barcode'] = res['complete_name']
return res
name = fields.Char('Location Name', required=True, translate=True)
# TDE CLEAME: unnecessary field, use name_get instead
complete_name = fields.Char("Full Location Name", compute='_compute_complete_name', store=True)
active = fields.Boolean('Active', default=True, help="By unchecking the active field, you may hide a location without deleting it.")
usage = fields.Selection([
('supplier', 'Vendor Location'),
('view', 'View'),
('internal', 'Internal Location'),
('customer', 'Customer Location'),
('inventory', 'Inventory Loss'),
('procurement', 'Procurement'),
('production', 'Production'),
('transit', 'Transit Location')], string='Location Type',
default='internal', index=True, required=True,
help="* Vendor Location: Virtual location representing the source location for products coming from your vendors"
"\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products"
"\n* Internal Location: Physical locations inside your own warehouses,"
"\n* Customer Location: Virtual location representing the destination location for products sent to your customers"
"\n* Inventory Loss: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)"
"\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (vendor or production) is not known yet. This location should be empty when the procurement scheduler has finished running."
"\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products"
"\n* Transit Location: Counterpart location that should be used in inter-companies or inter-warehouses operations")
location_id = fields.Many2one(
'stock.location', 'Parent Location', index=True, ondelete='cascade',
help="The parent location that includes this location. Example : The 'Dispatch Zone' is the 'Gate 1' parent location.")
child_ids = fields.One2many('stock.location', 'location_id', 'Contains')
partner_id = fields.Many2one('res.partner', 'Owner', help="Owner of the location if not internal")
comment = fields.Text('Additional Information')
posx = fields.Integer('Corridor (X)', default=0, help="Optional localization details, for information purpose only")
posy = fields.Integer('Shelves (Y)', default=0, help="Optional localization details, for information purpose only")
posz = fields.Integer('Height (Z)', default=0, help="Optional localization details, for information purpose only")
parent_left = fields.Integer('Left Parent', index=True)
parent_right = fields.Integer('Right Parent', index=True)
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('stock.location'), index=True,
help='Let this field empty if this location is shared between companies')
scrap_location = fields.Boolean('Is a Scrap Location?', default=False, help='Check this box to allow using this location to put scrapped/damaged goods.')
return_location = fields.Boolean('Is a Return Location?', help='Check this box to allow using this location as a return location.')
removal_strategy_id = fields.Many2one('product.removal', 'Removal Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to take the products from, which lot etc. for this location. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here.")
putaway_strategy_id = fields.Many2one('product.putaway', 'Put Away Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to store the products. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here.")
barcode = fields.Char('Barcode', copy=False, oldname='loc_barcode')
_sql_constraints = [('barcode_company_uniq', 'unique (barcode,company_id)', 'The barcode for a location must be unique per company !')]
@api.one
@api.depends('name', 'location_id')
def _compute_complete_name(self):
""" Forms complete name of location from parent location to child location. """
name = self.name
current = self
while current.location_id and current.usage != 'view':
current = current.location_id
name = '%s/%s' % (current.name, name)
self.complete_name = name
@api.multi
def name_get(self):
return [(location.id, location.complete_name) for location in self]
def get_putaway_strategy(self, product):
''' Returns the location where the product has to be put, if any compliant putaway strategy is found. Otherwise returns None.'''
current_location = self
putaway_location = self.env['stock.location']
while current_location and not putaway_location:
if current_location.putaway_strategy_id:
putaway_location = current_location.putaway_strategy_id.putaway_apply(product)
current_location = current_location.location_id
return putaway_location
@api.multi
@api.returns('stock.warehouse', lambda value: value.id)
def get_warehouse(self):
""" Returns warehouse id of warehouse that contains location """
return self.env['stock.warehouse'].search([
('view_location_id.parent_left', '<=', self.parent_left),
('view_location_id.parent_right', '>=', self.parent_left)], limit=1)
class Route(models.Model):
_name = 'stock.location.route'
_description = "Inventory Routes"
_order = 'sequence'
name = fields.Char('Route Name', required=True, translate=True)
active = fields.Boolean('Active', default=True, help="If the active field is set to False, it will allow you to hide the route without removing it.")
sequence = fields.Integer('Sequence', default=0)
pull_ids = fields.One2many('procurement.rule', 'route_id', 'Procurement Rules', copy=True)
push_ids = fields.One2many('stock.location.path', 'route_id', 'Push Rules', copy=True)
product_selectable = fields.Boolean('Applicable on Product', default=True, help="When checked, the route will be selectable in the Inventory tab of the Product form. It will take priority over the Warehouse route. ")
product_categ_selectable = fields.Boolean('Applicable on Product Category', help="When checked, the route will be selectable on the Product Category. It will take priority over the Warehouse route. ")
warehouse_selectable = fields.Boolean('Applicable on Warehouse', help="When a warehouse is selected for this route, this route should be seen as the default route when products pass through this warehouse. This behaviour can be overridden by the routes on the Product/Product Categories or by the Preferred Routes on the Procurement")
supplied_wh_id = fields.Many2one('stock.warehouse', 'Supplied Warehouse')
supplier_wh_id = fields.Many2one('stock.warehouse', 'Supplying Warehouse')
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('stock.location.route'), index=True,
help='Leave this field empty if this route is shared between all companies')
product_ids = fields.Many2many('product.template', 'stock_route_product', 'route_id', 'product_id', 'Products')
categ_ids = fields.Many2many('product.category', 'stock_location_route_categ', 'route_id', 'categ_id', 'Product Categories')
warehouse_ids = fields.Many2many('stock.warehouse', 'stock_route_warehouse', 'route_id', 'warehouse_id', 'Warehouses')
@api.multi
def write(self, values):
'''when a route is deactivated, deactivate also its pull and push rules'''
res = super(Route, self).write(values)
if 'active' in values:
self.mapped('push_ids').filtered(lambda path: path.active != values['active']).write({'active': values['active']})
self.mapped('pull_ids').filtered(lambda rule: rule.active != values['active']).write({'active': values['active']})
return res
@api.multi
def view_product_ids(self):
return {
'name': _('Products'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'product.template',
'type': 'ir.actions.act_window',
'domain': [('route_ids', 'in', self.ids)],
}
@api.multi
def view_categ_ids(self):
return {
'name': _('Product Categories'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'product.category',
'type': 'ir.actions.act_window',
'domain': [('route_ids', 'in', self.ids)],
}
class PushedFlow(models.Model):
_name = "stock.location.path"
_description = "Pushed Flow"
_order = "name"
name = fields.Char('Operation Name', required=True)
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('procurement.order'), index=True)
route_id = fields.Many2one('stock.location.route', 'Route')
location_from_id = fields.Many2one(
'stock.location', 'Source Location', index=True, ondelete='cascade', required=True,
help="This rule can be applied when a move is confirmed that has this location as destination location")
location_dest_id = fields.Many2one(
'stock.location', 'Destination Location', index=True, ondelete='cascade', required=True,
help="The new location where the goods need to go")
delay = fields.Integer('Delay (days)', default=0, help="Number of days needed to transfer the goods")
picking_type_id = fields.Many2one(
'stock.picking.type', 'Picking Type', required=True,
help="This is the picking type that will be put on the stock moves")
auto = fields.Selection([
('manual', 'Manual Operation'),
('transparent', 'Automatic No Step Added')], string='Automatic Move',
default='manual', index=True, required=True,
help="The 'Manual Operation' value will create a stock move after the current one."
"With 'Automatic No Step Added', the location is replaced in the original move.")
propagate = fields.Boolean('Propagate cancel and split', default=True, help='If checked, when the previous move is cancelled or split, the move generated by this move will too')
active = fields.Boolean('Active', default=True)
warehouse_id = fields.Many2one('stock.warehouse', 'Warehouse')
route_sequence = fields.Integer('Route Sequence', related='route_id.sequence', store=True)
sequence = fields.Integer('Sequence')
def _apply(self, move):
new_date = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=self.delay)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if self.auto == 'transparent':
move.write({
'date': new_date,
'date_expected': new_date,
'location_dest_id': self.location_dest_id.id})
# avoid looping if a push rule is not well configured; otherwise call again push_apply to see if a next step is defined
if self.location_dest_id != move.location_dest_id:
# TDE FIXME: should probably be done in the move model IMO
move._push_apply()
else:
new_move = move.copy({
'origin': move.origin or move.picking_id.name or "/",
'location_id': move.location_dest_id.id,
'location_dest_id': self.location_dest_id.id,
'date': new_date,
'date_expected': new_date,
'company_id': self.company_id.id,
'picking_id': False,
'picking_type_id': self.picking_type_id.id,
'propagate': self.propagate,
'push_rule_id': self.id,
'warehouse_id': self.warehouse_id.id,
'procurement_id': False,
})
move.write({'move_dest_id': new_move.id})
new_move.action_confirm()
| gpl-3.0 | -5,531,126,282,891,788,000 | 59.659091 | 356 | 0.667291 | false |
sfluo/Mr.Bot | crypto/pycrypto-2.6/build/lib.macosx-10.7-intel-2.7/Crypto/Hash/hashalgo.py | 124 | 3984 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from binascii import hexlify
class HashAlgo:
"""A generic class for an abstract cryptographic hash algorithm.
:undocumented: block_size
"""
#: The size of the resulting hash in bytes.
digest_size = None
#: The internal block size of the hash algorithm in bytes.
block_size = None
def __init__(self, hashFactory, data=None):
"""Initialize the hash object.
:Parameters:
hashFactory : callable
An object that will generate the actual hash implementation.
*hashFactory* must have a *new()* method, or must be directly
callable.
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `update()`.
"""
if hasattr(hashFactory, 'new'):
self._hash = hashFactory.new()
else:
self._hash = hashFactory()
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Repeated calls are equivalent to a single call with the concatenation
of all the arguments. In other words:
>>> m.update(a); m.update(b)
is equivalent to:
>>> m.update(a+b)
:Parameters:
data : byte string
The next chunk of the message being hashed.
"""
return self._hash.update(data)
def digest(self):
"""Return the **binary** (non-printable) digest of the message that has been hashed so far.
This method does not change the state of the hash object.
You can continue updating the object after calling this function.
:Return: A byte string of `digest_size` bytes. It may contain non-ASCII
characters, including null bytes.
"""
return self._hash.digest()
def hexdigest(self):
"""Return the **printable** digest of the message that has been hashed so far.
This method does not change the state of the hash object.
:Return: A string of 2* `digest_size` characters. It contains only
hexadecimal ASCII digits.
"""
return self._hash.hexdigest()
def copy(self):
"""Return a copy ("clone") of the hash object.
The copy will have the same internal state as the original hash
object.
This can be used to efficiently compute the digests of strings that
share a common initial substring.
:Return: A hash object of the same type
"""
return self._hash.copy()
def new(self, data=None):
"""Return a fresh instance of the hash object.
Unlike the `copy` method, the internal state of the object is empty.
:Parameters:
data : byte string
The next chunk of the message being hashed.
:Return: A hash object of the same type
"""
pass
| bsd-3-clause | 2,363,372,076,258,632,000 | 33.344828 | 99 | 0.610693 | false |
reddraggone9/youtube-dl | youtube_dl/extractor/infoq.py | 92 | 2315 | from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urlparse,
)
class InfoQIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?infoq\.com/(?:[^/]+/)+(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things',
'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2',
'info_dict': {
'id': '12-jan-pythonthings',
'ext': 'mp4',
'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.',
'title': 'A Few of My Favorite [Python] Things',
},
}, {
'url': 'http://www.infoq.com/fr/presentations/changez-avis-sur-javascript',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
video_description = self._html_search_meta('description', webpage, 'description')
# The server URL is hardcoded
video_url = 'rtmpe://video.infoq.com/cfx/st/'
# Extract video URL
encoded_id = self._search_regex(
r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id')
real_id = compat_urllib_parse_unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8'))
playpath = 'mp4:' + real_id
video_filename = playpath.split('/')[-1]
video_id, extension = video_filename.split('.')
http_base = self._search_regex(
r'EXPRESSINSTALL_SWF\s*=\s*[^"]*"((?:https?:)?//[^/"]+/)', webpage,
'HTTP base URL')
formats = [{
'format_id': 'rtmp',
'url': video_url,
'ext': extension,
'play_path': playpath,
}, {
'format_id': 'http',
'url': compat_urlparse.urljoin(url, http_base) + real_id,
}]
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'description': video_description,
'formats': formats,
}
| unlicense | -1,128,485,824,751,588,400 | 33.044118 | 169 | 0.554212 | false |
phoenixstar7/libsvm | tools/subset.py | 124 | 3202 | #!/usr/bin/env python
import os, sys, math, random
from collections import defaultdict
if sys.version_info[0] >= 3:
xrange = range
def exit_with_help(argv):
print("""\
Usage: {0} [options] dataset subset_size [output1] [output2]
This script randomly selects a subset of the dataset.
options:
-s method : method of selection (default 0)
0 -- stratified selection (classification only)
1 -- random selection
output1 : the subset (optional)
output2 : rest of the data (optional)
If output1 is omitted, the subset will be printed on the screen.""".format(argv[0]))
exit(1)
def process_options(argv):
argc = len(argv)
if argc < 3:
exit_with_help(argv)
# default method is stratified selection
method = 0
subset_file = sys.stdout
rest_file = None
i = 1
while i < argc:
if argv[i][0] != "-":
break
if argv[i] == "-s":
i = i + 1
method = int(argv[i])
if method not in [0,1]:
print("Unknown selection method {0}".format(method))
exit_with_help(argv)
i = i + 1
dataset = argv[i]
subset_size = int(argv[i+1])
if i+2 < argc:
subset_file = open(argv[i+2],'w')
if i+3 < argc:
rest_file = open(argv[i+3],'w')
return dataset, subset_size, method, subset_file, rest_file
def random_selection(dataset, subset_size):
l = sum(1 for line in open(dataset,'r'))
return sorted(random.sample(xrange(l), subset_size))
def stratified_selection(dataset, subset_size):
labels = [line.split(None,1)[0] for line in open(dataset)]
label_linenums = defaultdict(list)
for i, label in enumerate(labels):
label_linenums[label] += [i]
l = len(labels)
remaining = subset_size
ret = []
# classes with fewer data are sampled first; otherwise
# some rare classes may not be selected
for label in sorted(label_linenums, key=lambda x: len(label_linenums[x])):
linenums = label_linenums[label]
label_size = len(linenums)
# at least one instance per class
s = int(min(remaining, max(1, math.ceil(label_size*(float(subset_size)/l)))))
if s == 0:
sys.stderr.write('''\
Error: failed to have at least one instance per class
1. You may have regression data.
2. Your classification data is unbalanced or too small.
Please use -s 1.
''')
sys.exit(-1)
remaining -= s
ret += [linenums[i] for i in random.sample(xrange(label_size), s)]
return sorted(ret)
def main(argv=sys.argv):
dataset, subset_size, method, subset_file, rest_file = process_options(argv)
#uncomment the following line to fix the random seed
#random.seed(0)
selected_lines = []
if method == 0:
selected_lines = stratified_selection(dataset, subset_size)
elif method == 1:
selected_lines = random_selection(dataset, subset_size)
#select instances based on selected_lines
dataset = open(dataset,'r')
prev_selected_linenum = -1
for i in xrange(len(selected_lines)):
for cnt in xrange(selected_lines[i]-prev_selected_linenum-1):
line = dataset.readline()
if rest_file:
rest_file.write(line)
subset_file.write(dataset.readline())
prev_selected_linenum = selected_lines[i]
subset_file.close()
if rest_file:
for line in dataset:
rest_file.write(line)
rest_file.close()
dataset.close()
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause | 946,341,884,544,384,600 | 25.683333 | 84 | 0.68426 | false |
anaran/kuma | vendor/packages/translate/filters/test_prefilters.py | 33 | 1123 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""tests decoration handling functions that are used by checks"""
from translate.filters import prefilters
def test_removekdecomments():
assert prefilters.removekdecomments(u"Some sṱring") == u"Some sṱring"
assert prefilters.removekdecomments(u"_: Commenṱ\\n\nSome sṱring") == u"Some sṱring"
assert prefilters.removekdecomments(u"_: Commenṱ\\n\n") == u""
def test_filterwordswithpunctuation():
string = u"Nothing in here."
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == string
# test listed words (start / end with apostrophe)
string = u"'n Boom het 'n tak."
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == "n Boom het n tak."
# test words containing apostrophe
string = u"It's in it's own place."
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == "Its in its own place."
# test strings in unicode
string = u"Iṱ'š"
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == u"Iṱš"
| mpl-2.0 | -6,775,069,672,364,273,000 | 35.833333 | 88 | 0.709502 | false |
hpcloud-mon/tempest | tempest/api_schema/response/compute/version.py | 16 | 2153 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
version = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'version': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'href': {'type': 'string', 'format': 'uri'},
'rel': {'type': 'string'},
'type': {'type': 'string'}
},
'required': ['href', 'rel']
}
},
'media-types': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'base': {'type': 'string'},
'type': {'type': 'string'}
},
'required': ['base', 'type']
}
},
'status': {'type': 'string'},
'updated': {'type': 'string', 'format': 'date-time'}
},
'required': ['id', 'links', 'media-types', 'status', 'updated']
}
},
'required': ['version']
}
}
| apache-2.0 | -8,302,505,869,527,729,000 | 38.145455 | 79 | 0.391547 | false |
peiyuwang/pants | tests/python/pants_test/backend/python/tasks/interpreter_cache_test_mixin.py | 17 | 1196 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
class InterpreterCacheTestMixin(object):
"""A mixin to allow tests to use the "real" interpreter cache.
This is so each test doesn't waste huge amounts of time recreating the cache on each run.
Note: Must be mixed in to a subclass of BaseTest.
"""
def setUp(self):
super(InterpreterCacheTestMixin, self).setUp()
# It would be nice to get the location of the real interpreter cache from PythonSetup,
# but unfortunately real subsystems aren't available here (for example, we have no access
# to the enclosing pants instance's options), so we have to hard-code it.
python_setup_workdir = os.path.join(self.real_build_root, '.pants.d', 'python-setup')
self.set_options_for_scope('python-setup',
interpreter_cache_dir=os.path.join(python_setup_workdir, 'interpreters'),
chroot_cache_dir=os.path.join(python_setup_workdir, 'chroots'))
| apache-2.0 | 7,725,585,480,457,705,000 | 41.714286 | 93 | 0.720736 | false |
implemento/domino | app/app/urls.py | 1 | 1107 | from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls import url, include
from django.contrib.auth.models import User
from rest_framework import routers, serializers, viewsets
# Serializers define the API representation.
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'is_staff')
# ViewSets define the view behavior.
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'users', UserViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include('domino.urls')),
url(r'^api/v1/', include(router.urls)),
url(r'^domino/', include('domino.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| gpl-3.0 | 9,174,950,371,001,252,000 | 34.709677 | 82 | 0.730804 | false |
dimmddr/roadSignsNN | prepare_images.py | 1 | 8513 | import cv2
import matplotlib.pyplot as plt
import numpy as np
from numpy.lib.stride_tricks import as_strided
import nn
from settings import COVER_PERCENT
IMG_WIDTH = 1025
IMG_HEIGHT = 523
IMG_LAYERS = 3
SUB_IMG_WIDTH = 48
SUB_IMG_HEIGHT = 48
SUB_IMG_LAYERS = 3
WIDTH = 2
HEIGHT = 1
LAYERS = 0
XMIN = 0
YMIN = 1
XMAX = 2
YMAX = 3
# TODO: переписать либо все с использованием Rectangle namedtuple, либо через numpy. Например с помощью recarray
def compute_covering(window, label):
dx = min(window.xmax, label.xmax) - max(window.xmin, label.xmin)
dy = min(window.ymax, label.ymax) - max(window.ymin, label.ymin)
if (dx >= 0) and (dy >= 0):
label_cover = dx * dy / ((label.xmax - label.xmin) * (label.ymax - label.ymin))
window_cover = dx * dy / ((window.xmax - window.xmin) * (window.ymax - window.ymin))
return max(label_cover, window_cover)
else:
return 0
def split_into_subimgs(img, sub_img_shape, debug, step=1):
shape = (int(np.floor((img.shape[HEIGHT] - sub_img_shape[HEIGHT]) / step)),
int(np.floor((img.shape[WIDTH] - sub_img_shape[WIDTH]) / step)),
SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH)
# shape = (lbl_array.shape[0], SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH)
result_array = as_strided(img, shape=shape,
strides=(
img.strides[1] * step + (img.shape[WIDTH] - sub_img_shape[WIDTH]) % step *
img.strides[2],
img.strides[2] * step,
img.strides[0], img.strides[1], img.strides[2]))
return result_array
def get_labels(labels, result_array_shape, step, sub_img_shape):
lbl_array = np.zeros(shape=(result_array_shape[0], result_array_shape[1]))
index = 0
for i in range(lbl_array.shape[0]):
for ii in range(lbl_array.shape[1]):
# Rectangle = namedtuple('Rectangle', ['xmin', 'ymin', 'xmax', 'ymax'])
window = nn.Rectangle(ii * step, i * step, ii * step + sub_img_shape[HEIGHT],
i * step + sub_img_shape[WIDTH])
cover = np.array([compute_covering(window=window,
label=nn.Rectangle(lbl[0], lbl[1], lbl[2], lbl[3])) for lbl in labels])
is_cover = int(np.any(cover > COVER_PERCENT))
lbl_array[i, ii] = is_cover
index += 1
return lbl_array
def prepare(img_path, labels, debug=False):
step = 2
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if debug:
print("Prepare image " + img_path)
print(img.shape)
print(labels)
res_img = img / 255
res_img = np.array([res_img[:, :, 0], res_img[:, :, 1], res_img[:, :, 2]])
res = split_into_subimgs(res_img, sub_img_shape=(SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH),
step=step, debug=debug)
lbl_res = get_labels(labels=labels, result_array_shape=res.shape,
step=step, sub_img_shape=(SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH))
return res, lbl_res
def prepare_calibration(img_path, labels, debug=False):
# Возвращает метки в виде (yn, xn, wn, hn), для калибровки рамки изображения
# если (x, y) координаты верхенго левого угла и (w, h) соответственно ширина и высота,
# то новая рамка будет (x - xn * w / wn, y - yn * h / hn), (w / wn, h / hn)
"""
:param img_path:
:param labels:
:param debug:
:return:
@note: Первая сетка должна преобразовывать изображение в пределах [16, 64], вторая в [8, 128]
Так как изначально окно 32х32, то максимальное значение корректировки должно быть 2, минимально 0.5.
Делать по три класса на ширину и высоту удобно, но вряд ли практично. Стоит попробовать сделать хотя бы по 5.
Делать удобно нечетное количество, чтобы были доступны три варианта: максимум, минимум и оставить как есть.
Варианты множителей получаются: [1/2, 3/4, 1, 6/4, 2]
соответсвенно размеры для изначального варианта 32: [16, 24, 32, 48, 64]
"""
step = 2
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if debug:
print("Prepare image " + img_path)
print(img.shape)
print(labels)
res_img = img / 255
res_img = np.array([res_img[:, :, 0], res_img[:, :, 1], res_img[:, :, 2]])
res = split_into_subimgs(res_img, sub_img_shape=(SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH),
step=step, debug=debug)
lbl_res = get_labels(labels=labels, result_array_shape=res.shape,
step=step, sub_img_shape=(SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH))
# todo: необходимо решить что делать в случае с несколькими знаками -
# нужно каким-то образом получить координаты нужного
xmin, ymin, xmax, ymax = labels[0]
for image in res[lbl_res == 1]:
pass
# нужно из массива изображений и массива меток к ним вытащить координаты изображений.
# Ширина и высота в случае первичной подготовки известны и одинаковы.
# Координаты можно получить индекса изображения, нужно только достать этот индекс
return res, lbl_res
def show_sign(img_path, lbl):
print(img_path)
print(lbl)
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
cv2.imshow("img", img[lbl[1]:lbl[3], lbl[0]:lbl[2], :])
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.rectangle(img, (lbl[0], lbl[1]), (lbl[2], lbl[3]), 2)
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def show_roi(roi_list):
for roi in roi_list:
(r, g, b) = (roi[0], roi[1], roi[2])
roi = cv2.merge((r, g, b))
cv2.imshow("img", roi)
cv2.waitKey(0)
cv2.destroyAllWindows()
def show_rectangles(filename, rectangles_list, show_type='matplotlib'):
img = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
for rect in rectangles_list:
if rect is not None:
cv2.rectangle(img, (rect[XMIN], rect[YMIN]), (rect[XMAX], rect[YMAX]), (0, 255, 0), 1)
if show_type == 'matplotlib':
(b, g, r) = cv2.split(img)
img = cv2.merge((r, g, b))
plt.imshow(img)
plt.show()
else:
cv2.imshow(filename, img)
cv2.waitKey()
# TODO добавить схранение в отдельный каталог
def save_img_with_rectangles(dataset_path, filename, rectangles_list):
img = cv2.imread(dataset_path + filename, cv2.IMREAD_UNCHANGED)
for rect in rectangles_list:
if rect is not None:
cv2.rectangle(img, (rect[XMIN], rect[YMIN]), (rect[XMAX], rect[YMAX]), (0, 255, 0), 1)
cv2.imwrite(dataset_path + "results/" + filename + "_with_rects.jpg", img)
# Probably temp function before I fix localization
def get_roi_from_images(images, img_path):
res_roi = []
res_label = []
label_dict = dict()
for image in images:
img = cv2.imread(img_path + image.filename.decode('utf8'), cv2.IMREAD_UNCHANGED)
for sign in image.signs:
if sign.label not in label_dict:
label_dict[sign.label] = len(label_dict)
(x1, y1, x2, y2) = sign.coord
roi = img[y1:y2, x1:x2, :]
res_roi.append(np.array([roi[:, :, 0], roi[:, :, 1], roi[:, :, 2]]))
res_label.append(label_dict[sign.label])
return res_roi, res_label, label_dict
def create_synthetic_data(imgs):
# Create array of size mods [1, 4], step = 0.5
sizes = np.arange(start=1, stop=4.5, step=0.5)
total = imgs.shape[0] * sizes.shape[0] * 2 # *2
res = []
return imgs
| mit | 6,245,023,130,045,099,000 | 37.695431 | 118 | 0.599764 | false |
ZHAW-INES/rioxo-uClinux-dist | user/python/python-2.4.4/Lib/test/test_operations.py | 5 | 2028 | # Python test set -- part 3, built-in operations.
print '3. Operations'
print 'XXX Mostly not yet implemented'
print '3.1 Dictionary lookups succeed even if __cmp__() raises an exception'
class BadDictKey:
already_printed_raising_error = 0
def __hash__(self):
return hash(self.__class__)
def __cmp__(self, other):
if isinstance(other, self.__class__):
if not BadDictKey.already_printed_raising_error:
# How many times __cmp__ gets called depends on the hash
# code and the internals of the dict implementation; we
# know it will be called at least once, but that's it.
# already_printed_raising_error makes sure the expected-
# output file prints the msg at most once.
BadDictKey.already_printed_raising_error = 1
print "raising error"
raise RuntimeError, "gotcha"
return other
d = {}
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
d[x2] = 2
print "No exception passed through."
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = {}
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = {}
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
print 'resize bugs not triggered.'
| gpl-2.0 | -8,823,277,903,149,683,000 | 26.780822 | 76 | 0.627712 | false |
eric-haibin-lin/mxnet | example/profiler/profiler_ndarray.py | 27 | 11345 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import mxnet as mx
import numpy as np
import pickle as pkl
def _np_reduce(dat, axis, keepdims, numpy_reduce_func):
if isinstance(axis, int):
axis = [axis]
else:
axis = list(axis) if axis is not None else range(len(dat.shape))
ret = dat
for i in reversed(sorted(axis)):
ret = numpy_reduce_func(ret, axis=i)
if keepdims:
keepdims_shape = list(dat.shape)
for i in axis:
keepdims_shape[i] = 1
ret = ret.reshape(tuple(keepdims_shape))
return ret
def reldiff(a, b):
diff = np.abs(a - b)
norm = np.abs(a)
reldiff = np.max(diff / (norm + 1e-7))
return reldiff
def same(a, b):
return np.sum(a != b) == 0
def check_with_uniform(uf, arg_shapes, dim=None, npuf=None, rmin=-10, type_list=[np.float32]):
"""check function consistency with uniform random numbers"""
if isinstance(arg_shapes, int):
assert dim
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
arg_shapes = [shape] * arg_shapes
for dtype in type_list:
ndarray_arg = []
numpy_arg = []
for s in arg_shapes:
npy = np.random.uniform(rmin, 10, s).astype(dtype)
narr = mx.nd.array(npy, dtype=dtype)
ndarray_arg.append(narr)
numpy_arg.append(npy)
out1 = uf(*ndarray_arg)
if npuf is None:
out2 = uf(*numpy_arg).astype(dtype)
else:
out2 = npuf(*numpy_arg).astype(dtype)
assert out1.shape == out2.shape
if isinstance(out1, mx.nd.NDArray):
out1 = out1.asnumpy()
if dtype == np.float16:
assert reldiff(out1, out2) < 2e-3
else:
assert reldiff(out1, out2) < 1e-6
def random_ndarray(dim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
data = mx.nd.array(np.random.uniform(-10, 10, shape))
return data
def test_ndarray_elementwise():
np.random.seed(0)
nrepeat = 10
maxdim = 4
all_type = [np.float32, np.float64, np.float16, np.uint8, np.int32]
real_type = [np.float32, np.float64, np.float16]
for repeat in range(nrepeat):
for dim in range(1, maxdim):
check_with_uniform(lambda x, y: x + y, 2, dim, type_list=all_type)
check_with_uniform(lambda x, y: x - y, 2, dim, type_list=all_type)
check_with_uniform(lambda x, y: x * y, 2, dim, type_list=all_type)
check_with_uniform(lambda x, y: x / y, 2, dim, type_list=real_type)
check_with_uniform(lambda x, y: x / y, 2, dim, rmin=1, type_list=all_type)
check_with_uniform(mx.nd.sqrt, 1, dim, np.sqrt, rmin=0)
check_with_uniform(mx.nd.square, 1, dim, np.square, rmin=0)
check_with_uniform(lambda x: mx.nd.norm(x).asscalar(), 1, dim, np.linalg.norm)
def test_ndarray_negate():
npy = np.random.uniform(-10, 10, (2,3,4))
arr = mx.nd.array(npy)
assert reldiff(npy, arr.asnumpy()) < 1e-6
assert reldiff(-npy, (-arr).asnumpy()) < 1e-6
# a final check to make sure the negation (-) is not implemented
# as inplace operation, so the contents of arr does not change after
# we compute (-arr)
assert reldiff(npy, arr.asnumpy()) < 1e-6
def test_ndarray_choose():
shape = (100, 20)
npy = np.arange(np.prod(shape)).reshape(shape)
arr = mx.nd.array(npy)
nrepeat = 3
for repeat in range(nrepeat):
indices = np.random.randint(shape[1], size=shape[0])
assert same(npy[np.arange(shape[0]), indices],
mx.nd.choose_element_0index(arr, mx.nd.array(indices)).asnumpy())
def test_ndarray_fill():
shape = (100, 20)
npy = np.arange(np.prod(shape)).reshape(shape)
arr = mx.nd.array(npy)
new_npy = npy.copy()
nrepeat = 3
for repeat in range(nrepeat):
indices = np.random.randint(shape[1], size=shape[0])
val = np.random.randint(shape[1], size=shape[0])
new_npy[:] = npy
new_npy[np.arange(shape[0]), indices] = val
assert same(new_npy,
mx.nd.fill_element_0index(arr, mx.nd.array(val), mx.nd.array(indices)).asnumpy())
def test_ndarray_onehot():
shape = (100, 20)
npy = np.arange(np.prod(shape)).reshape(shape)
arr = mx.nd.array(npy)
nrepeat = 3
for repeat in range(nrepeat):
indices = np.random.randint(shape[1], size=shape[0])
npy[:] = 0.0
npy[np.arange(shape[0]), indices] = 1.0
mx.nd.onehot_encode(mx.nd.array(indices), out=arr)
assert same(npy, arr.asnumpy())
def test_ndarray_copy():
c = mx.nd.array(np.random.uniform(-10, 10, (10, 10)))
d = c.copyto(mx.Context('cpu', 0))
assert np.sum(np.abs(c.asnumpy() != d.asnumpy())) == 0.0
def test_ndarray_scalar():
c = mx.nd.empty((10,10))
d = mx.nd.empty((10,10))
c[:] = 0.5
d[:] = 1.0
d -= c * 2 / 3 * 6.0
c += 0.5
assert(np.sum(c.asnumpy()) - 100 < 1e-5)
assert(np.sum(d.asnumpy()) + 100 < 1e-5)
c[:] = 2
assert(np.sum(c.asnumpy()) - 200 < 1e-5)
d = -c + 2
assert(np.sum(d.asnumpy()) < 1e-5)
def test_ndarray_pickle():
np.random.seed(0)
maxdim = 5
nrepeat = 10
for repeat in range(nrepeat):
for dim in range(1, maxdim):
a = random_ndarray(dim)
b = mx.nd.empty(a.shape)
a[:] = np.random.uniform(-10, 10, a.shape)
b[:] = np.random.uniform(-10, 10, a.shape)
a = a + b
data = pkl.dumps(a)
a2 = pkl.loads(data)
assert np.sum(a.asnumpy() != a2.asnumpy()) == 0
def test_ndarray_saveload():
np.random.seed(0)
maxdim = 5
nrepeat = 10
fname = 'tmp_list.bin'
for repeat in range(nrepeat):
data = []
for i in range(10):
data.append(random_ndarray(np.random.randint(1, 5)))
mx.nd.save(fname, data)
data2 = mx.nd.load(fname)
assert len(data) == len(data2)
for x, y in zip(data, data2):
assert np.sum(x.asnumpy() != y.asnumpy()) == 0
dmap = {'ndarray xx %s' % i : x for i, x in enumerate(data)}
mx.nd.save(fname, dmap)
dmap2 = mx.nd.load(fname)
assert len(dmap2) == len(dmap)
for k, x in dmap.items():
y = dmap2[k]
assert np.sum(x.asnumpy() != y.asnumpy()) == 0
os.remove(fname)
def test_ndarray_slice():
shape = (10,)
A = mx.nd.array(np.random.uniform(-10, 10, shape))
A2 = A.asnumpy()
assert same(A[3:8].asnumpy(), A2[3:8])
A2[3:8] *= 10;
A[3:8] = A2[3:8]
assert same(A[3:8].asnumpy(), A2[3:8])
def test_ndarray_slice_along_axis():
arr = mx.nd.array(np.random.uniform(-10, 10, (3, 4, 2, 3)))
sub_arr = arr.slice(begin=(None, 1), end=(None, 3))
# test we sliced correctly
assert same(arr.asnumpy()[:, 1:3, :, :], sub_arr.asnumpy())
# test that slice is copy, instead of shared memory
sub_arr[:] = 0
assert not same(arr.asnumpy()[:, 1:3, :, :], sub_arr.asnumpy())
def test_clip():
shape = (10,)
A = mx.random.uniform(-10, 10, shape)
B = mx.nd.clip(A, -2, 2)
B1 = B.asnumpy()
for i in range(shape[0]):
assert B1[i] >= -2
assert B1[i] <= 2
def test_dot():
a = np.random.uniform(-3, 3, (3, 4))
b = np.random.uniform(-3, 3, (4, 5))
c = np.dot(a, b)
A = mx.nd.array(a)
B = mx.nd.array(b)
C = mx.nd.dot(A, B)
assert reldiff(c, C.asnumpy()) < 1e-5
def test_reduce():
sample_num = 200
def test_reduce_inner(numpy_reduce_func, nd_reduce_func):
for i in range(sample_num):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 11, size=ndim)
axis_flags = np.random.randint(0, 2, size=ndim)
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
keepdims = np.random.randint(0, 2)
dat = np.random.rand(*shape) - 0.5
if 0 == len(axes):
axes = tuple(range(ndim))
else:
axes = tuple(axes)
numpy_ret = numpy_reduce_func(dat, axis=axes, keepdims=keepdims)
ndarray_ret = nd_reduce_func(mx.nd.array(dat), axis=axes, keepdims=keepdims)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == numpy_ret.shape) or \
(ndarray_ret.shape == (1,) and numpy_ret.shape == ()), "nd:%s, numpy:%s" \
%(ndarray_ret.shape, numpy_ret.shape)
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-4
test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.sum),
mx.nd.sum)
test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.max),
mx.nd.max)
test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.min),
mx.nd.min)
def test_broadcast():
sample_num = 1000
def test_broadcast_to():
for i in range(sample_num):
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 11, size=ndim)
shape = target_shape.copy()
axis_flags = np.random.randint(0, 2, size=ndim)
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
shape[axis] = 1
dat = np.random.rand(*shape) - 0.5
numpy_ret = dat
ndarray_ret = mx.nd.array(dat).broadcast_to(shape=target_shape)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == target_shape).all()
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-8
test_broadcast_to()
if __name__ == '__main__':
mx.profiler.set_config(profile_all=True, filename='profile_ndarray.json')
mx.profiler.set_state('run')
test_ndarray_slice_along_axis()
test_broadcast()
test_ndarray_elementwise()
test_ndarray_slice()
test_ndarray_pickle()
test_ndarray_saveload()
test_ndarray_copy()
test_ndarray_negate()
test_ndarray_scalar()
test_clip()
test_dot()
test_ndarray_choose()
test_ndarray_onehot()
test_ndarray_fill()
test_reduce()
mx.profiler.set_state('stop')
| apache-2.0 | -7,061,829,489,534,903,000 | 32.764881 | 101 | 0.571617 | false |
coreos/autotest | client/setup.py | 3 | 2278 | from distutils.core import setup
import os
try:
import autotest.common as common
except ImportError:
import common
from autotest.client.shared import version
# Mostly needed when called one level up
if os.path.isdir('client'):
client_dir = 'client'
else:
client_dir = '.'
autotest_dir = os.path.join(client_dir, "..")
def _get_files(path):
'''
Given a path, return all the files in there to package
'''
flist=[]
for root, _, files in sorted(os.walk(path)):
for name in files:
fullname = os.path.join(root, name)
flist.append(fullname)
return flist
def get_filelist():
pd_filelist=['config/*' ]
pd_filelist.extend(_get_files(os.path.join(client_dir, 'profilers')))
pd_filelist.extend(_get_files(os.path.join(client_dir, 'tools')))
return pd_filelist
def get_packages():
return ['autotest.client.shared',
'autotest.client.shared.hosts',
'autotest.client.shared.test_utils',
'autotest.client.net',
'autotest.client.tools',
'autotest.client.profilers',
'autotest.client',
'autotest']
def get_scripts():
return [os.path.join(client_dir, 'autotest-local'),
os.path.join(client_dir, 'autotest-local-streamhandler'),
os.path.join(client_dir, 'autotest-daemon'),
os.path.join(client_dir, 'autotest-daemon-monitor')]
def get_data_files():
return [('/etc/autotest', [autotest_dir + '/global_config.ini',
autotest_dir + '/shadow_config.ini',]),]
def get_package_dir():
return {'autotest.client': client_dir, 'autotest' : autotest_dir}
def get_package_data():
return {'autotest.client' : get_filelist()}
def run():
setup(name='autotest',
description='Autotest test framework - local module',
maintainer='Lucas Meneghel Rodrigues',
author_email='[email protected]',
version=version.get_version(),
url='http://autotest.github.com',
package_dir=get_package_dir(),
package_data=get_package_data(),
packages= get_packages(),
scripts=get_scripts(),
data_files=get_data_files())
if __name__ == '__main__':
run()
| gpl-2.0 | 5,488,052,051,032,430,000 | 25.8 | 73 | 0.604917 | false |
leafjungle/luigi | luigi/mock.py | 48 | 5473 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This moduel provides a class :class:`MockTarget`, an implementation of :py:class:`~luigi.target.Target`.
:class:`MockTarget` contains all data in-memory.
The main purpose is unit testing workflows without writing to disk.
"""
import multiprocessing
from io import BytesIO
import sys
import warnings
from luigi import six
from luigi import target
from luigi.format import get_default_format, MixedUnicodeBytes
class MockFileSystem(target.FileSystem):
"""
MockFileSystem inspects/modifies _data to simulate file system operations.
"""
_data = None
def get_all_data(self):
# This starts a server in the background, so we don't want to do it in the global scope
if MockFileSystem._data is None:
MockFileSystem._data = multiprocessing.Manager().dict()
return MockFileSystem._data
def get_data(self, fn):
return self.get_all_data()[fn]
def exists(self, path):
return MockTarget(path).exists()
def remove(self, path, recursive=True, skip_trash=True):
"""
Removes the given mockfile. skip_trash doesn't have any meaning.
"""
if recursive:
to_delete = []
for s in self.get_all_data().keys():
if s.startswith(path):
to_delete.append(s)
for s in to_delete:
self.get_all_data().pop(s)
else:
self.get_all_data().pop(path)
def listdir(self, path):
"""
listdir does a prefix match of self.get_all_data(), but doesn't yet support globs.
"""
return [s for s in self.get_all_data().keys()
if s.startswith(path)]
def isdir(self, path):
return any(self.listdir(path))
def mkdir(self, path, parents=True, raise_if_exists=False):
"""
mkdir is a noop.
"""
pass
def clear(self):
self.get_all_data().clear()
class MockTarget(target.FileSystemTarget):
fs = MockFileSystem()
def __init__(self, fn, is_tmp=None, mirror_on_stderr=False, format=None):
self._mirror_on_stderr = mirror_on_stderr
self._fn = fn
if format is None:
format = get_default_format()
# Allow to write unicode in file for retrocompatibility
if six.PY2:
format = format >> MixedUnicodeBytes
self.format = format
def exists(self,):
return self._fn in self.fs.get_all_data()
def rename(self, path, raise_if_exists=False):
if raise_if_exists and path in self.fs.get_all_data():
raise RuntimeError('Destination exists: %s' % path)
contents = self.fs.get_all_data().pop(self._fn)
self.fs.get_all_data()[path] = contents
@property
def path(self):
return self._fn
def open(self, mode):
fn = self._fn
mock_target = self
class Buffer(BytesIO):
# Just to be able to do writing + reading from the same buffer
_write_line = True
def set_wrapper(self, wrapper):
self.wrapper = wrapper
def write(self, data):
if six.PY3:
stderrbytes = sys.stderr.buffer
else:
stderrbytes = sys.stderr
if mock_target._mirror_on_stderr:
if self._write_line:
sys.stderr.write(fn + ": ")
stderrbytes.write(data)
if (data[-1]) == '\n':
self._write_line = True
else:
self._write_line = False
super(Buffer, self).write(data)
def close(self):
if mode == 'w':
try:
mock_target.wrapper.flush()
except AttributeError:
pass
mock_target.fs.get_all_data()[fn] = self.getvalue()
super(Buffer, self).close()
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
self.close()
def __enter__(self):
return self
def readable(self):
return mode == 'r'
def writeable(self):
return mode == 'w'
def seekable(self):
return False
if mode == 'w':
wrapper = self.format.pipe_writer(Buffer())
wrapper.set_wrapper(wrapper)
return wrapper
else:
return self.format.pipe_reader(Buffer(self.fs.get_all_data()[fn]))
class MockFile(MockTarget):
def __init__(self, *args, **kwargs):
warnings.warn("MockFile has been renamed MockTarget", DeprecationWarning, stacklevel=2)
super(MockFile, self).__init__(*args, **kwargs)
| apache-2.0 | -6,067,764,320,559,909,000 | 29.747191 | 104 | 0.565869 | false |
jsrudani/HadoopHDFSProject | dev-support/relnotes.py | 62 | 7865 | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from optparse import OptionParser
import httplib
import urllib
import cgi
try:
import json
except ImportError:
import simplejson as json
namePattern = re.compile(r' \([0-9]+\)')
def clean(str):
return quoteHtml(re.sub(namePattern, "", str))
def formatComponents(str):
str = re.sub(namePattern, '', str).replace("'", "")
if str != "":
ret = "(" + str + ")"
else:
ret = ""
return quoteHtml(ret)
def quoteHtml(str):
return cgi.escape(str).encode('ascii', 'xmlcharrefreplace')
def mstr(obj):
if (obj == None):
return ""
return unicode(obj)
class Version:
"""Represents a version number"""
def __init__(self, data):
self.mod = False
self.data = data
found = re.match('^((\d+)(\.\d+)*).*$', data)
if (found):
self.parts = [ int(p) for p in found.group(1).split('.') ]
else:
self.parts = []
# backfill version with zeroes if missing parts
self.parts.extend((0,) * (3 - len(self.parts)))
def decBugFix(self):
self.mod = True
self.parts[2] -= 1
return self
def __str__(self):
if (self.mod):
return '.'.join([ str(p) for p in self.parts ])
return self.data
def __cmp__(self, other):
return cmp(self.parts, other.parts)
class Jira:
"""A single JIRA"""
def __init__(self, data, parent):
self.key = data['key']
self.fields = data['fields']
self.parent = parent
self.notes = None
def getId(self):
return mstr(self.key)
def getDescription(self):
return mstr(self.fields['description'])
def getReleaseNote(self):
if (self.notes == None):
field = self.parent.fieldIdMap['Release Note']
if (self.fields.has_key(field)):
self.notes=mstr(self.fields[field])
else:
self.notes=self.getDescription()
return self.notes
def getPriority(self):
ret = ""
pri = self.fields['priority']
if(pri != None):
ret = pri['name']
return mstr(ret)
def getAssignee(self):
ret = ""
mid = self.fields['assignee']
if(mid != None):
ret = mid['displayName']
return mstr(ret)
def getComponents(self):
return " , ".join([ comp['name'] for comp in self.fields['components'] ])
def getSummary(self):
return self.fields['summary']
def getType(self):
ret = ""
mid = self.fields['issuetype']
if(mid != None):
ret = mid['name']
return mstr(ret)
def getReporter(self):
ret = ""
mid = self.fields['reporter']
if(mid != None):
ret = mid['displayName']
return mstr(ret)
def getProject(self):
ret = ""
mid = self.fields['project']
if(mid != None):
ret = mid['key']
return mstr(ret)
class JiraIter:
"""An Iterator of JIRAs"""
def __init__(self, versions):
self.versions = versions
resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/field")
data = json.loads(resp.read())
self.fieldIdMap = {}
for part in data:
self.fieldIdMap[part['name']] = part['id']
self.jiras = []
at=0
end=1
count=100
while (at < end):
params = urllib.urlencode({'jql': "project in (HADOOP,HDFS,MAPREDUCE,YARN) and fixVersion in ('"+"' , '".join(versions)+"') and resolution = Fixed", 'startAt':at, 'maxResults':count})
resp = urllib.urlopen("https://issues.apache.org/jira/rest/api/2/search?%s"%params)
data = json.loads(resp.read())
if (data.has_key('errorMessages')):
raise Exception(data['errorMessages'])
at = data['startAt'] + data['maxResults']
end = data['total']
self.jiras.extend(data['issues'])
self.iter = self.jiras.__iter__()
def __iter__(self):
return self
def next(self):
data = self.iter.next()
j = Jira(data, self)
return j
class Outputs:
"""Several different files to output to at the same time"""
def __init__(self, base_file_name, file_name_pattern, keys, params={}):
self.params = params
self.base = open(base_file_name%params, 'w')
self.others = {}
for key in keys:
both = dict(params)
both['key'] = key
self.others[key] = open(file_name_pattern%both, 'w')
def writeAll(self, pattern):
both = dict(self.params)
both['key'] = ''
self.base.write(pattern%both)
for key in self.others.keys():
both = dict(self.params)
both['key'] = key
self.others[key].write(pattern%both)
def writeKeyRaw(self, key, str):
self.base.write(str)
if (self.others.has_key(key)):
self.others[key].write(str)
def close(self):
self.base.close()
for fd in self.others.values():
fd.close()
def main():
parser = OptionParser(usage="usage: %prog [options] [USER-ignored] [PASSWORD-ignored] [VERSION]")
parser.add_option("-v", "--version", dest="versions",
action="append", type="string",
help="versions in JIRA to include in releasenotes", metavar="VERSION")
parser.add_option("--previousVer", dest="previousVer",
action="store", type="string",
help="previous version to include in releasenotes", metavar="VERSION")
(options, args) = parser.parse_args()
if (options.versions == None):
options.versions = []
if (len(args) > 2):
options.versions.append(args[2])
if (len(options.versions) <= 0):
parser.error("At least one version needs to be supplied")
versions = [ Version(v) for v in options.versions];
versions.sort();
maxVersion = str(versions[-1])
if(options.previousVer == None):
options.previousVer = str(versions[0].decBugFix())
print >> sys.stderr, "WARNING: no previousVersion given, guessing it is "+options.previousVer
list = JiraIter(options.versions)
version = maxVersion
outputs = Outputs("releasenotes.%(ver)s.html",
"releasenotes.%(key)s.%(ver)s.html",
["HADOOP","HDFS","MAPREDUCE","YARN"], {"ver":maxVersion, "previousVer":options.previousVer})
head = '<META http-equiv="Content-Type" content="text/html; charset=UTF-8">\n' \
'<title>Hadoop %(key)s %(ver)s Release Notes</title>\n' \
'<STYLE type="text/css">\n' \
' H1 {font-family: sans-serif}\n' \
' H2 {font-family: sans-serif; margin-left: 7mm}\n' \
' TABLE {margin-left: 7mm}\n' \
'</STYLE>\n' \
'</head>\n' \
'<body>\n' \
'<h1>Hadoop %(key)s %(ver)s Release Notes</h1>\n' \
'These release notes include new developer and user-facing incompatibilities, features, and major improvements. \n' \
'<a name="changes"/>\n' \
'<h2>Changes since Hadoop %(previousVer)s</h2>\n' \
'<ul>\n'
outputs.writeAll(head)
for jira in list:
line = '<li> <a href="https://issues.apache.org/jira/browse/%s">%s</a>.\n' \
' %s %s reported by %s and fixed by %s %s<br>\n' \
' <b>%s</b><br>\n' \
' <blockquote>%s</blockquote></li>\n' \
% (quoteHtml(jira.getId()), quoteHtml(jira.getId()), clean(jira.getPriority()), clean(jira.getType()).lower(),
quoteHtml(jira.getReporter()), quoteHtml(jira.getAssignee()), formatComponents(jira.getComponents()),
quoteHtml(jira.getSummary()), quoteHtml(jira.getReleaseNote()))
outputs.writeKeyRaw(jira.getProject(), line)
outputs.writeAll("</ul>\n</body></html>\n")
outputs.close()
if __name__ == "__main__":
main()
| apache-2.0 | 6,230,308,597,149,682,000 | 27.70438 | 189 | 0.617165 | false |
wlach/treeherder | treeherder/model/error_summary.py | 2 | 8237 | import json
import logging
import re
from django.conf import settings
from django.core.urlresolvers import reverse
logger = logging.getLogger(__name__)
LEAK_RE = re.compile(r'\d+ bytes leaked \((.+)\)$')
CRASH_RE = re.compile(r'.+ application crashed \[@ (.+)\]$')
MOZHARNESS_RE = re.compile(
r'^\d+:\d+:\d+[ ]+(?:DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL) - [ ]?'
)
def get_error_summary(all_errors):
"""
Transform the error lines into the artifact format.
Add bug suggestions if they are found.
"""
error_summary = []
bugscache_uri = '{0}{1}'.format(
settings.API_HOSTNAME,
reverse("bugscache-list")
)
terms_requested = {}
for err in all_errors:
# remove the mozharness prefix
clean_line = get_mozharness_substring(err['line'])
search_terms = []
# get a meaningful search term out of the error line
search_term = get_error_search_term(clean_line)
bugs = dict(open_recent=[], all_others=[])
# collect open recent and all other bugs suggestions
if search_term:
search_terms.append(search_term)
if search_term not in terms_requested:
# retrieve the list of suggestions from the api
bugs = get_bugs_for_search_term(
search_term,
bugscache_uri
)
terms_requested[search_term] = bugs
else:
bugs = terms_requested[search_term]
if not bugs or not (bugs['open_recent'] or
bugs['all_others']):
# no suggestions, try to use
# the crash signature as search term
crash_signature = get_crash_signature(clean_line)
if crash_signature:
search_terms.append(crash_signature)
if crash_signature not in terms_requested:
bugs = get_bugs_for_search_term(
crash_signature,
bugscache_uri
)
terms_requested[crash_signature] = bugs
else:
bugs = terms_requested[crash_signature]
# TODO: Rename 'search' to 'error_text' or similar, since that's
# closer to what it actually represents (bug 1091060).
error_summary.append({
"search": clean_line,
"search_terms": search_terms,
"bugs": bugs
})
return error_summary
def get_mozharness_substring(line):
return MOZHARNESS_RE.sub('', line).strip()
def get_error_search_term(error_line):
"""
retrieves bug suggestions from bugscache using search_term
in a full_text search.
"""
if not error_line:
return None
# This is strongly inspired by
# https://hg.mozilla.org/webtools/tbpl/file/tip/php/inc/AnnotatedSummaryGenerator.php#l73
tokens = error_line.split(" | ")
search_term = None
if len(tokens) >= 3:
# it's in the "FAILURE-TYPE | testNameOrFilePath | message" type format.
test_name_or_path = tokens[1]
message = tokens[2]
# Leak failure messages are of the form:
# leakcheck | .*\d+ bytes leaked (Object-1, Object-2, Object-3, ...)
match = LEAK_RE.search(message)
if match:
search_term = match.group(1)
else:
for splitter in ("/", "\\"):
# if this is a path, we are interested in the last part
test_name_or_path = test_name_or_path.split(splitter)[-1]
search_term = test_name_or_path
# If the failure line was not in the pipe symbol delimited format or the search term
# will likely return too many (or irrelevant) results (eg: too short or matches terms
# on the blacklist), then we fall back to searching for the entire failure line if
# it is suitable.
if not (search_term and is_helpful_search_term(search_term)):
if is_helpful_search_term(error_line):
search_term = error_line
else:
search_term = None
# Searching for extremely long search terms is undesirable, since:
# a) Bugzilla's max summary length is 256 characters, and once "Intermittent "
# and platform/suite information is prefixed, there are even fewer characters
# left for us to use for the failure string against which we need to match.
# b) For long search terms, the additional length does little to prevent against
# false positives, but means we're more susceptible to false negatives due to
# run-to-run variances in the error messages (eg paths, process IDs).
if search_term:
search_term = search_term[:100]
return search_term
def get_crash_signature(error_line):
"""
Detect if the error_line contains a crash signature
and return it if it's a helpful search term
"""
search_term = None
match = CRASH_RE.match(error_line)
if match and is_helpful_search_term(match.group(1)):
search_term = match.group(1)
return search_term
def is_helpful_search_term(search_term):
# Search terms that will match too many bug summaries
# and so not result in useful suggestions.
search_term = search_term.strip()
blacklist = [
'automation.py',
'remoteautomation.py',
'Shutdown',
'undefined',
'Main app process exited normally',
'Traceback (most recent call last):',
'Return code: 0',
'Return code: 1',
'Return code: 2',
'Return code: 9',
'Return code: 10',
'Exiting 1',
'Exiting 9',
'CrashingThread(void *)',
'libSystem.B.dylib + 0xd7a',
'linux-gate.so + 0x424',
'TypeError: content is null',
'leakcheck'
]
return len(search_term) > 4 and not (search_term in blacklist)
def get_bugs_for_search_term(search, base_uri):
"""
Fetch the base_uri endpoint filtering on search and status.
Status must be either 'open' or 'closed'
"""
from treeherder.etl.common import fetch_json
params = {
'search': search
}
return fetch_json(base_uri, params=params)
def get_artifacts_that_need_bug_suggestions(artifact_list):
"""
Return a list of ``text_log_summary`` that don't have ``Bug suggestions``
"""
bs_guid_list = [x['job_guid'] for x in artifact_list if
x['name'] == 'Bug suggestions']
tls_list = [x for x in artifact_list if
x['name'] == 'text_log_summary' and
x['job_guid'] not in bs_guid_list]
return tls_list
def get_error_summary_artifacts(artifact_list):
"""
Create bug suggestions artifact(s) for any text_log_summary artifacts.
``artifact_list`` here is a list of artifacts that may contain one or more
``text_log_artifact`` objects. If it does, we extract the error lines
from it. If there ARE error lines, then we generate the
``bug suggestions`` artifacts and return them.
"""
bug_suggestion_artifacts = []
for artifact in artifact_list:
# this is the only artifact name eligible to trigger generation of bug
# suggestions.
if artifact['name'] != 'text_log_summary':
continue
all_errors = get_all_errors(artifact)
bug_suggestion_artifacts.append({
"job_guid": artifact['job_guid'],
"name": 'Bug suggestions',
"type": 'json',
"blob": json.dumps(get_error_summary(all_errors))
})
return bug_suggestion_artifacts
def get_all_errors(artifact):
"""Extract the error lines from an artifact's blob field"""
artifact_blob = json.loads(artifact['blob'])
if isinstance(artifact_blob, dict):
return artifact_blob.get('step_data', {}).get('all_errors', [])
def load_error_summary(project, artifacts, job_id_lookup):
"""Load new bug suggestions artifacts if we generate them."""
from treeherder.model.derived import ArtifactsModel
bsa = get_error_summary_artifacts(artifacts)
if bsa:
with ArtifactsModel(project) as artifacts_model:
artifacts_model.load_job_artifacts(bsa, job_id_lookup)
| mpl-2.0 | 1,784,334,657,268,240,100 | 32.21371 | 93 | 0.606653 | false |
ambitioninc/kmatch | kmatch/tests/mixin_tests.py | 1 | 1208 | import unittest
from kmatch import KmatchTestMixin
class MixinTestUsingMixin(KmatchTestMixin, unittest.TestCase):
def test_matches(self):
"""
Test .assertMatches() using the mixin on a true match
"""
self.assertKmatches(['<=', 'f', 0], {'f': -1})
def test_matches_raises_error(self):
"""
Test .assertMatches() using the mixin on a false match
"""
with self.assertRaises(AssertionError):
self.assertKmatches(['<=', 'f', 0], {'f': 1})
def test_not_matches(self):
"""
Test .assertNotMatches() using the mixin on a false match
"""
self.assertNotKmatches(['<=', 'f', 0], {'f': 1})
def test_not_matches_no_key_error(self):
"""
Test .assertNotMatches() using the mixin on a false match
"""
self.assertNotKmatches(['<=', 'f', 0], {'g': 1})
self.assertNotKmatches(['<=', 'f', 0], {'f': 1})
def test_not_matches_raises_error(self):
"""
Test .assertNotMatches() using the mixin raises an error on a match
"""
with self.assertRaises(AssertionError):
self.assertNotKmatches(['<=', 'f', 0], {'f': -1})
| mit | -4,141,386,362,831,977,000 | 29.974359 | 75 | 0.557947 | false |
mrkm4ntr/incubator-airflow | tests/test_utils/hdfs_utils.py | 8 | 7310 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class FakeWebHDFSHook:
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient:
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False): # pylint: disable=invalid-name
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/datafile',
}
]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [
{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory',
}
]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [
{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory',
},
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file',
},
]
elif path[0] == '/datadirectory/not_empty_directory':
return [
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file',
}
]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test1file',
},
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test2file',
},
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test3file',
},
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_',
},
{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp',
},
]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook:
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
| apache-2.0 | 5,413,529,356,039,544,000 | 36.295918 | 84 | 0.452668 | false |
tima/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py | 16 | 13352 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_virtualnetwork
version_added: "2.1"
short_description: Manage Azure virtual networks.
description:
- Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges
and setting custom DNS servers. Use the azure_rm_subnet module to associate subnets with a virtual network.
options:
resource_group:
description:
- name of resource group.
required: true
address_prefixes_cidr:
description:
- List of IPv4 address ranges where each is formatted using CIDR notation. Required when creating
a new virtual network or using purge_address_prefixes.
aliases:
- address_prefixes
default: null
required: false
dns_servers:
description:
- Custom list of DNS servers. Maximum length of two. The first server in the list will be treated
as the Primary server. This is an explicit list. Existing DNS servers will be replaced with the
specified list. Use the purge_dns_servers option to remove all custom DNS servers and revert to
default Azure servers.
default: null
required: false
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
name:
description:
- name of the virtual network.
required: true
purge_address_prefixes:
description:
- Use with state present to remove any existing address_prefixes.
default: false
purge_dns_servers:
description:
- Use with state present to remove existing DNS servers, reverting to default Azure servers. Mutually
exclusive with dns_servers.
default: false
required: false
state:
description:
- Assert the state of the virtual network. Use 'present' to create or update and
'absent' to delete.
default: present
choices:
- absent
- present
required: false
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create a virtual network
azure_rm_virtualnetwork:
name: foobar
resource_group: Testing
address_prefixes_cidr:
- "10.1.0.0/16"
- "172.100.0.0/16"
dns_servers:
- "127.0.0.1"
- "127.0.0.2"
tags:
testing: testing
delete: on-exit
- name: Delete a virtual network
azure_rm_virtualnetwork:
name: foobar
resource_group: Testing
state: absent
'''
RETURN = '''
state:
description: Current state of the virtual network.
returned: always
type: dict
sample: {
"address_prefixes": [
"10.1.0.0/16",
"172.100.0.0/16"
],
"dns_servers": [
"127.0.0.1",
"127.0.0.3"
],
"etag": 'W/"0712e87c-f02f-4bb3-8b9e-2da0390a3886"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/my_test_network",
"location": "eastus",
"name": "my_test_network",
"provisioning_state": "Succeeded",
"tags": null,
"type": "Microsoft.Network/virtualNetworks"
}
'''
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, CIDR_PATTERN
def virtual_network_to_dict(vnet):
'''
Convert a virtual network object to a dict.
:param vnet: VirtualNet object
:return: dict
'''
results = dict(
id=vnet.id,
name=vnet.name,
location=vnet.location,
type=vnet.type,
tags=vnet.tags,
provisioning_state=vnet.provisioning_state,
etag=vnet.etag
)
if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:
results['dns_servers'] = []
for server in vnet.dhcp_options.dns_servers:
results['dns_servers'].append(server)
if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:
results['address_prefixes'] = []
for space in vnet.address_space.address_prefixes:
results['address_prefixes'].append(space)
return results
class AzureRMVirtualNetwork(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
location=dict(type='str'),
address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']),
dns_servers=dict(type='list',),
purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']),
purge_dns_servers=dict(type='bool', default=False),
)
mutually_exclusive = [
('dns_servers', 'purge_dns_servers')
]
required_if = [
('purge_address_prefixes', True, ['address_prefixes_cidr'])
]
self.resource_group = None
self.name = None
self.state = None
self.location = None
self.address_prefixes_cidr = None
self.purge_address_prefixes = None
self.dns_servers = None
self.purge_dns_servers = None
self.results = dict(
changed=False,
state=dict()
)
super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
self.results['check_mode'] = self.check_mode
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.state == 'present' and self.purge_address_prefixes:
for prefix in self.address_prefixes_cidr:
if not CIDR_PATTERN.match(prefix):
self.fail("Parameter error: invalid address prefix value {0}".format(prefix))
if self.dns_servers and len(self.dns_servers) > 2:
self.fail("Parameter error: You can provide a maximum of 2 DNS servers.")
changed = False
results = dict()
try:
self.log('Fetching vnet {0}'.format(self.name))
vnet = self.network_client.virtual_networks.get(self.resource_group, self.name)
results = virtual_network_to_dict(vnet)
self.log('Vnet exists {0}'.format(self.name))
self.log(results, pretty_print=True)
self.check_provisioning_state(vnet, self.state)
if self.state == 'present':
if self.address_prefixes_cidr:
existing_address_prefix_set = set(vnet.address_space.address_prefixes)
requested_address_prefix_set = set(self.address_prefixes_cidr)
missing_prefixes = requested_address_prefix_set - existing_address_prefix_set
extra_prefixes = existing_address_prefix_set - requested_address_prefix_set
if len(missing_prefixes) > 0:
self.log('CHANGED: there are missing address_prefixes')
changed = True
if not self.purge_address_prefixes:
# add the missing prefixes
for prefix in missing_prefixes:
results['address_prefixes'].append(prefix)
if len(extra_prefixes) > 0 and self.purge_address_prefixes:
self.log('CHANGED: there are address_prefixes to purge')
changed = True
# replace existing address prefixes with requested set
results['address_prefixes'] = self.address_prefixes_cidr
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
if self.dns_servers:
existing_dns_set = set(vnet.dhcp_options.dns_servers)
requested_dns_set = set(self.dns_servers)
if existing_dns_set != requested_dns_set:
self.log('CHANGED: replacing DNS servers')
changed = True
results['dns_servers'] = self.dns_servers
if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:
self.log('CHANGED: purging existing DNS servers')
changed = True
results['dns_servers'] = []
elif self.state == 'absent':
self.log("CHANGED: vnet exists but requested state is 'absent'")
changed = True
except CloudError:
self.log('Vnet {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: vnet {0} does not exist but requested state is 'present'".format(self.name))
changed = True
self.results['changed'] = changed
self.results['state'] = results
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
if not results:
# create a new virtual network
self.log("Create virtual network {0}".format(self.name))
if not self.address_prefixes_cidr:
self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network')
vnet = self.network_models.VirtualNetwork(
location=self.location,
address_space=self.network_models.AddressSpace(
address_prefixes=self.address_prefixes_cidr
)
)
if self.dns_servers:
vnet.dhcp_options = self.network_models.DhcpOptions(
dns_servers=self.dns_servers
)
if self.tags:
vnet.tags = self.tags
self.results['state'] = self.create_or_update_vnet(vnet)
else:
# update existing virtual network
self.log("Update virtual network {0}".format(self.name))
vnet = self.network_models.VirtualNetwork(
location=results['location'],
address_space=self.network_models.AddressSpace(
address_prefixes=results['address_prefixes']
),
tags=results['tags']
)
if results.get('dns_servers'):
vnet.dhcp_options = self.network_models.DhcpOptions(
dns_servers=results['dns_servers']
)
self.results['state'] = self.create_or_update_vnet(vnet)
elif self.state == 'absent':
self.delete_virtual_network()
self.results['state']['status'] = 'Deleted'
return self.results
def create_or_update_vnet(self, vnet):
try:
poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet)
new_vnet = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating virtual network {0} - {1}".format(self.name, str(exc)))
return virtual_network_to_dict(new_vnet)
def delete_virtual_network(self):
try:
poller = self.network_client.virtual_networks.delete(self.resource_group, self.name)
result = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting virtual network {0} - {1}".format(self.name, str(exc)))
return result
def main():
AzureRMVirtualNetwork()
if __name__ == '__main__':
main()
| gpl-3.0 | -3,439,006,784,755,109,000 | 36.931818 | 148 | 0.56059 | false |
fredrik-johansson/mpmath | mpmath/calculus/quadrature.py | 1 | 42371 | import math
from ..libmp.backend import xrange
class QuadratureRule(object):
"""
Quadrature rules are implemented using this class, in order to
simplify the code and provide a common infrastructure
for tasks such as error estimation and node caching.
You can implement a custom quadrature rule by subclassing
:class:`QuadratureRule` and implementing the appropriate
methods. The subclass can then be used by :func:`~mpmath.quad` by
passing it as the *method* argument.
:class:`QuadratureRule` instances are supposed to be singletons.
:class:`QuadratureRule` therefore implements instance caching
in :func:`~mpmath.__new__`.
"""
def __init__(self, ctx):
self.ctx = ctx
self.standard_cache = {}
self.transformed_cache = {}
self.interval_count = {}
def clear(self):
"""
Delete cached node data.
"""
self.standard_cache = {}
self.transformed_cache = {}
self.interval_count = {}
def calc_nodes(self, degree, prec, verbose=False):
r"""
Compute nodes for the standard interval `[-1, 1]`. Subclasses
should probably implement only this method, and use
:func:`~mpmath.get_nodes` method to retrieve the nodes.
"""
raise NotImplementedError
def get_nodes(self, a, b, degree, prec, verbose=False):
"""
Return nodes for given interval, degree and precision. The
nodes are retrieved from a cache if already computed;
otherwise they are computed by calling :func:`~mpmath.calc_nodes`
and are then cached.
Subclasses should probably not implement this method,
but just implement :func:`~mpmath.calc_nodes` for the actual
node computation.
"""
key = (a, b, degree, prec)
if key in self.transformed_cache:
return self.transformed_cache[key]
orig = self.ctx.prec
try:
self.ctx.prec = prec+20
# Get nodes on standard interval
if (degree, prec) in self.standard_cache:
nodes = self.standard_cache[degree, prec]
else:
nodes = self.calc_nodes(degree, prec, verbose)
self.standard_cache[degree, prec] = nodes
# Transform to general interval
nodes = self.transform_nodes(nodes, a, b, verbose)
if key in self.interval_count:
self.transformed_cache[key] = nodes
else:
self.interval_count[key] = True
finally:
self.ctx.prec = orig
return nodes
def transform_nodes(self, nodes, a, b, verbose=False):
r"""
Rescale standardized nodes (for `[-1, 1]`) to a general
interval `[a, b]`. For a finite interval, a simple linear
change of variables is used. Otherwise, the following
transformations are used:
.. math ::
\lbrack a, \infty \rbrack : t = \frac{1}{x} + (a-1)
\lbrack -\infty, b \rbrack : t = (b+1) - \frac{1}{x}
\lbrack -\infty, \infty \rbrack : t = \frac{x}{\sqrt{1-x^2}}
"""
ctx = self.ctx
a = ctx.convert(a)
b = ctx.convert(b)
one = ctx.one
if (a, b) == (-one, one):
return nodes
half = ctx.mpf(0.5)
new_nodes = []
if ctx.isinf(a) or ctx.isinf(b):
if (a, b) == (ctx.ninf, ctx.inf):
p05 = -half
for x, w in nodes:
x2 = x*x
px1 = one-x2
spx1 = px1**p05
x = x*spx1
w *= spx1/px1
new_nodes.append((x, w))
elif a == ctx.ninf:
b1 = b+1
for x, w in nodes:
u = 2/(x+one)
x = b1-u
w *= half*u**2
new_nodes.append((x, w))
elif b == ctx.inf:
a1 = a-1
for x, w in nodes:
u = 2/(x+one)
x = a1+u
w *= half*u**2
new_nodes.append((x, w))
elif a == ctx.inf or b == ctx.ninf:
return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)]
else:
raise NotImplementedError
else:
# Simple linear change of variables
C = (b-a)/2
D = (b+a)/2
for x, w in nodes:
new_nodes.append((D+C*x, C*w))
return new_nodes
def guess_degree(self, prec):
"""
Given a desired precision `p` in bits, estimate the degree `m`
of the quadrature required to accomplish full accuracy for
typical integrals. By default, :func:`~mpmath.quad` will perform up
to `m` iterations. The value of `m` should be a slight
overestimate, so that "slightly bad" integrals can be dealt
with automatically using a few extra iterations. On the
other hand, it should not be too big, so :func:`~mpmath.quad` can
quit within a reasonable amount of time when it is given
an "unsolvable" integral.
The default formula used by :func:`~mpmath.guess_degree` is tuned
for both :class:`TanhSinh` and :class:`GaussLegendre`.
The output is roughly as follows:
+---------+---------+
| `p` | `m` |
+=========+=========+
| 50 | 6 |
+---------+---------+
| 100 | 7 |
+---------+---------+
| 500 | 10 |
+---------+---------+
| 3000 | 12 |
+---------+---------+
This formula is based purely on a limited amount of
experimentation and will sometimes be wrong.
"""
# Expected degree
# XXX: use mag
g = int(4 + max(0, self.ctx.log(prec/30.0, 2)))
# Reasonable "worst case"
g += 2
return g
def estimate_error(self, results, prec, epsilon):
r"""
Given results from integrations `[I_1, I_2, \ldots, I_k]` done
with a quadrature of rule of degree `1, 2, \ldots, k`, estimate
the error of `I_k`.
For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`.
For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|`
from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption
that each degree increment roughly doubles the accuracy of
the quadrature rule (this is true for both :class:`TanhSinh`
and :class:`GaussLegendre`). The extrapolation formula is given
by Borwein, Bailey & Girgensohn. Although not very conservative,
this method seems to be very robust in practice.
"""
if len(results) == 2:
return abs(results[0]-results[1])
try:
if results[-1] == results[-2] == results[-3]:
return self.ctx.zero
D1 = self.ctx.log(abs(results[-1]-results[-2]), 10)
D2 = self.ctx.log(abs(results[-1]-results[-3]), 10)
except ValueError:
return epsilon
D3 = -prec
D4 = min(0, max(D1**2/D2, 2*D1, D3))
return self.ctx.mpf(10) ** int(D4)
def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
"""
Main integration function. Computes the 1D integral over
the interval specified by *points*. For each subinterval,
performs quadrature of degree from 1 up to *max_degree*
until :func:`~mpmath.estimate_error` signals convergence.
:func:`~mpmath.summation` transforms each subintegration to
the standard interval and then calls :func:`~mpmath.sum_next`.
"""
ctx = self.ctx
I = total_err = ctx.zero
for i in xrange(len(points)-1):
a, b = points[i], points[i+1]
if a == b:
continue
# XXX: we could use a single variable transformation,
# but this is not good in practice. We get better accuracy
# by having 0 as an endpoint.
if (a, b) == (ctx.ninf, ctx.inf):
_f = f
f = lambda x: _f(-x) + _f(x)
a, b = (ctx.zero, ctx.inf)
results = []
err = ctx.zero
for degree in xrange(1, max_degree+1):
nodes = self.get_nodes(a, b, degree, prec, verbose)
if verbose:
print("Integrating from %s to %s (degree %s of %s)" % \
(ctx.nstr(a), ctx.nstr(b), degree, max_degree))
result = self.sum_next(f, nodes, degree, prec, results, verbose)
results.append(result)
if degree > 1:
err = self.estimate_error(results, prec, epsilon)
if verbose:
print("Estimated error:", ctx.nstr(err), " epsilon:", ctx.nstr(epsilon), " result: ", ctx.nstr(result))
if err <= epsilon:
break
I += results[-1]
total_err += err
if total_err > epsilon:
if verbose:
print("Failed to reach full accuracy. Estimated error:", ctx.nstr(total_err))
return I, total_err
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
r"""
Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list
contains the `(w_k, x_k)` pairs.
:func:`~mpmath.summation` will supply the list *results* of
values computed by :func:`~mpmath.sum_next` at previous degrees, in
case the quadrature rule is able to reuse them.
"""
return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
class TanhSinh(QuadratureRule):
r"""
This class implements "tanh-sinh" or "doubly exponential"
quadrature. This quadrature rule is based on the Euler-Maclaurin
integral formula. By performing a change of variables involving
nested exponentials / hyperbolic functions (hence the name), the
derivatives at the endpoints vanish rapidly. Since the error term
in the Euler-Maclaurin formula depends on the derivatives at the
endpoints, a simple step sum becomes extremely accurate. In
practice, this means that doubling the number of evaluation
points roughly doubles the number of accurate digits.
Comparison to Gauss-Legendre:
* Initial computation of nodes is usually faster
* Handles endpoint singularities better
* Handles infinite integration intervals better
* Is slower for smooth integrands once nodes have been computed
The implementation of the tanh-sinh algorithm is based on the
description given in Borwein, Bailey & Girgensohn, "Experimentation
in Mathematics - Computational Paths to Discovery", A K Peters,
2003, pages 312-313. In the present implementation, a few
improvements have been made:
* A more efficient scheme is used to compute nodes (exploiting
recurrence for the exponential function)
* The nodes are computed successively instead of all at once
**References**
* [Bailey]_
* http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf
"""
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
"""
Step sum for tanh-sinh quadrature of degree `m`. We exploit the
fact that half of the abscissas at degree `m` are precisely the
abscissas from degree `m-1`. Thus reusing the result from
the previous level allows a 2x speedup.
"""
h = self.ctx.mpf(2)**(-degree)
# Abscissas overlap, so reusing saves half of the time
if previous:
S = previous[-1]/(h*2)
else:
S = self.ctx.zero
S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)
return h*S
def calc_nodes(self, degree, prec, verbose=False):
r"""
The abscissas and weights for tanh-sinh quadrature of degree
`m` are given by
.. math::
x_k = \tanh(\pi/2 \sinh(t_k))
w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2
where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
list of nodes is actually infinite, but the weights die off so
rapidly that only a few are needed.
"""
ctx = self.ctx
nodes = []
extra = 20
ctx.prec += extra
tol = ctx.ldexp(1, -prec-10)
pi4 = ctx.pi/4
# For simplicity, we work in steps h = 1/2^n, with the first point
# offset so that we can reuse the sum from the previous degree
# We define degree 1 to include the "degree 0" steps, including
# the point x = 0. (It doesn't work well otherwise; not sure why.)
t0 = ctx.ldexp(1, -degree)
if degree == 1:
#nodes.append((mpf(0), pi4))
#nodes.append((-mpf(0), pi4))
nodes.append((ctx.zero, ctx.pi/2))
h = t0
else:
h = t0*2
# Since h is fixed, we can compute the next exponential
# by simply multiplying by exp(h)
expt0 = ctx.exp(t0)
a = pi4 * expt0
b = pi4 / expt0
udelta = ctx.exp(h)
urdelta = 1/udelta
for k in xrange(0, 20*2**degree+1):
# Reference implementation:
# t = t0 + k*h
# x = tanh(pi/2 * sinh(t))
# w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2
# Fast implementation. Note that c = exp(pi/2 * sinh(t))
c = ctx.exp(a-b)
d = 1/c
co = (c+d)/2
si = (c-d)/2
x = si / co
w = (a+b) / co**2
diff = abs(x-1)
if diff <= tol:
break
nodes.append((x, w))
nodes.append((-x, w))
a *= udelta
b *= urdelta
if verbose and k % 300 == 150:
# Note: the number displayed is rather arbitrary. Should
# figure out how to print something that looks more like a
# percentage
print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec))
ctx.prec -= extra
return nodes
class GaussLegendre(QuadratureRule):
r"""
This class implements Gauss-Legendre quadrature, which is
exceptionally efficient for polynomials and polynomial-like (i.e.
very smooth) integrands.
The abscissas and weights are given by roots and values of
Legendre polynomials, which are the orthogonal polynomials
on `[-1, 1]` with respect to the unit weight
(see :func:`~mpmath.legendre`).
In this implementation, we take the "degree" `m` of the quadrature
to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following
Borwein, Bailey & Girgensohn). This way we get quadratic, rather
than linear, convergence as the degree is incremented.
Comparison to tanh-sinh quadrature:
* Is faster for smooth integrands once nodes have been computed
* Initial computation of nodes is usually slower
* Handles endpoint singularities worse
* Handles infinite integration intervals worse
"""
def calc_nodes(self, degree, prec, verbose=False):
r"""
Calculates the abscissas and weights for Gauss-Legendre
quadrature of degree of given degree (actually `3 \cdot 2^m`).
"""
ctx = self.ctx
# It is important that the epsilon is set lower than the
# "real" epsilon
epsilon = ctx.ldexp(1, -prec-8)
# Fairly high precision might be required for accurate
# evaluation of the roots
orig = ctx.prec
ctx.prec = int(prec*1.5)
if degree == 1:
x = ctx.sqrt(ctx.mpf(3)/5)
w = ctx.mpf(5)/9
nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)]
ctx.prec = orig
return nodes
nodes = []
n = 3*2**(degree-1)
upto = n//2 + 1
for j in xrange(1, upto):
# Asymptotic formula for the roots
r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5)))
# Newton iteration
while 1:
t1, t2 = 1, 0
# Evaluates the Legendre polynomial using its defining
# recurrence relation
for j1 in xrange(1,n+1):
t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1
t4 = n*(r*t1-t2)/(r**2-1)
a = t1/t4
r = r - a
if abs(a) < epsilon:
break
x = r
w = 2/((1-r**2)*t4**2)
if verbose and j % 30 == 15:
print("Computing nodes (%i of %i)" % (j, upto))
nodes.append((x, w))
nodes.append((-x, w))
ctx.prec = orig
return nodes
class QuadratureMethods(object):
def __init__(ctx, *args, **kwargs):
ctx._gauss_legendre = GaussLegendre(ctx)
ctx._tanh_sinh = TanhSinh(ctx)
def quad(ctx, f, *points, **kwargs):
r"""
Computes a single, double or triple integral over a given
1D interval, 2D rectangle, or 3D cuboid. A basic example::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quad(sin, [0, pi])
2.0
A basic 2D integral::
>>> f = lambda x, y: cos(x+y/2)
>>> quad(f, [-pi/2, pi/2], [0, pi])
4.0
**Interval format**
The integration range for each dimension may be specified
using a list or tuple. Arguments are interpreted as follows:
``quad(f, [x1, x2])`` -- calculates
`\int_{x_1}^{x_2} f(x) \, dx`
``quad(f, [x1, x2], [y1, y2])`` -- calculates
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx`
``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z)
\, dz \, dy \, dx`
Endpoints may be finite or infinite. An interval descriptor
may also contain more than two points. In this
case, the integration is split into subintervals, between
each pair of consecutive points. This is useful for
dealing with mid-interval discontinuities, or integrating
over large intervals where the function is irregular or
oscillates.
**Options**
:func:`~mpmath.quad` recognizes the following keyword arguments:
*method*
Chooses integration algorithm (described below).
*error*
If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the
integral and `e` is the estimated error.
*maxdegree*
Maximum degree of the quadrature rule to try before
quitting.
*verbose*
Print details about progress.
**Algorithms**
Mpmath presently implements two integration algorithms: tanh-sinh
quadrature and Gauss-Legendre quadrature. These can be selected
using *method='tanh-sinh'* or *method='gauss-legendre'* or by
passing the classes *method=TanhSinh*, *method=GaussLegendre*.
The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available
as shortcuts.
Both algorithms have the property that doubling the number of
evaluation points roughly doubles the accuracy, so both are ideal
for high precision quadrature (hundreds or thousands of digits).
At high precision, computing the nodes and weights for the
integration can be expensive (more expensive than computing the
function values). To make repeated integrations fast, nodes
are automatically cached.
The advantages of the tanh-sinh algorithm are that it tends to
handle endpoint singularities well, and that the nodes are cheap
to compute on the first run. For these reasons, it is used by
:func:`~mpmath.quad` as the default algorithm.
Gauss-Legendre quadrature often requires fewer function
evaluations, and is therefore often faster for repeated use, but
the algorithm does not handle endpoint singularities as well and
the nodes are more expensive to compute. Gauss-Legendre quadrature
can be a better choice if the integrand is smooth and repeated
integrations are required (e.g. for multiple integrals).
See the documentation for :class:`TanhSinh` and
:class:`GaussLegendre` for additional details.
**Examples of 1D integrals**
Intervals may be infinite or half-infinite. The following two
examples evaluate the limits of the inverse tangent function
(`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral
`\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`::
>>> mp.dps = 15
>>> quad(lambda x: 2/(x**2+1), [0, inf])
3.14159265358979
>>> quad(lambda x: exp(-x**2), [-inf, inf])**2
3.14159265358979
Integrals can typically be resolved to high precision.
The following computes 50 digits of `\pi` by integrating the
area of the half-circle defined by `x^2 + y^2 \le 1`,
`-1 \le x \le 1`, `y \ge 0`::
>>> mp.dps = 50
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1])
3.1415926535897932384626433832795028841971693993751
One can just as well compute 1000 digits (output truncated)::
>>> mp.dps = 1000
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS
3.141592653589793238462643383279502884...216420199
Complex integrals are supported. The following computes
a residue at `z = 0` by integrating counterclockwise along the
diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`::
>>> mp.dps = 15
>>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1]))
(0.0 + 6.28318530717959j)
**Examples of 2D and 3D integrals**
Here are several nice examples of analytically solvable
2D integrals (taken from MathWorld [1]) that can be evaluated
to high precision fairly rapidly by :func:`~mpmath.quad`::
>>> mp.dps = 30
>>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y))
>>> quad(f, [0, 1], [0, 1])
0.577215664901532860606512090082
>>> +euler
0.577215664901532860606512090082
>>> f = lambda x, y: 1/sqrt(1+x**2+y**2)
>>> quad(f, [-1, 1], [-1, 1])
3.17343648530607134219175646705
>>> 4*log(2+sqrt(3))-2*pi/3
3.17343648530607134219175646705
>>> f = lambda x, y: 1/(1-x**2 * y**2)
>>> quad(f, [0, 1], [0, 1])
1.23370055013616982735431137498
>>> pi**2 / 8
1.23370055013616982735431137498
>>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1])
1.64493406684822643647241516665
>>> pi**2 / 6
1.64493406684822643647241516665
Multiple integrals may be done over infinite ranges::
>>> mp.dps = 15
>>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf]))
0.367879441171442
>>> print(1/e)
0.367879441171442
For nonrectangular areas, one can call :func:`~mpmath.quad` recursively.
For example, we can replicate the earlier example of calculating
`\pi` by integrating over the unit-circle, and actually use double
quadrature to actually measure the area circle::
>>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)])
>>> quad(f, [-1, 1])
3.14159265358979
Here is a simple triple integral::
>>> mp.dps = 15
>>> f = lambda x,y,z: x*y/(1+z)
>>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre')
0.101366277027041
>>> (log(3)-log(2))/4
0.101366277027041
**Singularities**
Both tanh-sinh and Gauss-Legendre quadrature are designed to
integrate smooth (infinitely differentiable) functions. Neither
algorithm copes well with mid-interval singularities (such as
mid-interval discontinuities in `f(x)` or `f'(x)`).
The best solution is to split the integral into parts::
>>> mp.dps = 15
>>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad
3.99900894176779
>>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good
4.0
The tanh-sinh rule often works well for integrands having a
singularity at one or both endpoints::
>>> mp.dps = 15
>>> quad(log, [0, 1], method='tanh-sinh') # Good
-1.0
>>> quad(log, [0, 1], method='gauss-legendre') # Bad
-0.999932197413801
However, the result may still be inaccurate for some functions::
>>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
1.99999999946942
This problem is not due to the quadrature rule per se, but to
numerical amplification of errors in the nodes. The problem can be
circumvented by temporarily increasing the precision::
>>> mp.dps = 30
>>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
>>> mp.dps = 15
>>> +a
2.0
**Highly variable functions**
For functions that are smooth (in the sense of being infinitely
differentiable) but contain sharp mid-interval peaks or many
"bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For
example, with default settings, :func:`~mpmath.quad` is able to integrate
`\sin(x)` accurately over an interval of length 100 but not over
length 1000::
>>> quad(sin, [0, 100]); 1-cos(100) # Good
0.137681127712316
0.137681127712316
>>> quad(sin, [0, 1000]); 1-cos(1000) # Bad
-37.8587612408485
0.437620923709297
One solution is to break the integration into 10 intervals of
length 100::
>>> quad(sin, linspace(0, 1000, 10)) # Good
0.437620923709297
Another is to increase the degree of the quadrature::
>>> quad(sin, [0, 1000], maxdegree=10) # Also good
0.437620923709297
Whether splitting the interval or increasing the degree is
more efficient differs from case to case. Another example is the
function `1/(1+x^2)`, which has a sharp peak centered around
`x = 0`::
>>> f = lambda x: 1/(1+x**2)
>>> quad(f, [-100, 100]) # Bad
3.64804647105268
>>> quad(f, [-100, 100], maxdegree=10) # Good
3.12159332021646
>>> quad(f, [-100, 0, 100]) # Also good
3.12159332021646
**References**
1. http://mathworld.wolfram.com/DoubleIntegral.html
"""
rule = kwargs.get('method', 'tanh-sinh')
if type(rule) is str:
if rule == 'tanh-sinh':
rule = ctx._tanh_sinh
elif rule == 'gauss-legendre':
rule = ctx._gauss_legendre
else:
raise ValueError("unknown quadrature rule: %s" % rule)
else:
rule = rule(ctx)
verbose = kwargs.get('verbose')
dim = len(points)
orig = prec = ctx.prec
epsilon = ctx.eps/8
m = kwargs.get('maxdegree') or rule.guess_degree(prec)
points = [ctx._as_points(p) for p in points]
try:
ctx.prec += 20
if dim == 1:
v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
elif dim == 2:
v, err = rule.summation(lambda x: \
rule.summation(lambda y: f(x,y), \
points[1], prec, epsilon, m)[0],
points[0], prec, epsilon, m, verbose)
elif dim == 3:
v, err = rule.summation(lambda x: \
rule.summation(lambda y: \
rule.summation(lambda z: f(x,y,z), \
points[2], prec, epsilon, m)[0],
points[1], prec, epsilon, m)[0],
points[0], prec, epsilon, m, verbose)
else:
raise NotImplementedError("quadrature must have dim 1, 2 or 3")
finally:
ctx.prec = orig
if kwargs.get("error"):
return +v, err
return +v
def quadts(ctx, *args, **kwargs):
"""
Performs tanh-sinh quadrature. The call
quadts(func, *points, ...)
is simply a shortcut for:
quad(func, *points, ..., method=TanhSinh)
For example, a single integral and a double integral:
quadts(lambda x: exp(cos(x)), [0, 1])
quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
See the documentation for quad for information about how points
arguments and keyword arguments are parsed.
See documentation for TanhSinh for algorithmic information about
tanh-sinh quadrature.
"""
kwargs['method'] = 'tanh-sinh'
return ctx.quad(*args, **kwargs)
def quadgl(ctx, *args, **kwargs):
"""
Performs Gauss-Legendre quadrature. The call
quadgl(func, *points, ...)
is simply a shortcut for:
quad(func, *points, ..., method=GaussLegendre)
For example, a single integral and a double integral:
quadgl(lambda x: exp(cos(x)), [0, 1])
quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
See the documentation for quad for information about how points
arguments and keyword arguments are parsed.
See documentation for TanhSinh for algorithmic information about
tanh-sinh quadrature.
"""
kwargs['method'] = 'gauss-legendre'
return ctx.quad(*args, **kwargs)
def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
r"""
Calculates
.. math ::
I = \int_a^b f(x) dx
where at least one of `a` and `b` is infinite and where
`f(x) = g(x) \cos(\omega x + \phi)` for some slowly
decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc`
can also handle oscillatory integrals where the oscillation
rate is different from a pure sine or cosine wave.
In the standard case when `|a| < \infty, b = \infty`,
:func:`~mpmath.quadosc` works by evaluating the infinite series
.. math ::
I = \int_a^{x_1} f(x) dx +
\sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx
where `x_k` are consecutive zeros (alternatively
some other periodic reference point) of `f(x)`.
Accordingly, :func:`~mpmath.quadosc` requires information about the
zeros of `f(x)`. For a periodic function, you can specify
the zeros by either providing the angular frequency `\omega`
(*omega*) or the *period* `2 \pi/\omega`. In general, you can
specify the `n`-th zero by providing the *zeros* arguments.
Below is an example of each::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> f = lambda x: sin(3*x)/(x**2+1)
>>> quadosc(f, [0,inf], omega=3)
0.37833007080198
>>> quadosc(f, [0,inf], period=2*pi/3)
0.37833007080198
>>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
0.37833007080198
>>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica
0.37833007080198
Note that *zeros* was specified to multiply `n` by the
*half-period*, not the full period. In theory, it does not matter
whether each partial integral is done over a half period or a full
period. However, if done over half-periods, the infinite series
passed to :func:`~mpmath.nsum` becomes an *alternating series* and this
typically makes the extrapolation much more efficient.
Here is an example of an integration over the entire real line,
and a half-infinite integration starting at `-\infty`::
>>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
1.15572734979092
>>> pi/e
1.15572734979092
>>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
-0.0844109505595739
>>> cos(1)+si(1)-pi/2
-0.0844109505595738
Of course, the integrand may contain a complex exponential just as
well as a real sine or cosine::
>>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
(0.156410688228254 + 0.0j)
>>> pi/e**3
0.156410688228254
>>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
(0.00317486988463794 - 0.0447701735209082j)
>>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
(0.00317486988463794 - 0.0447701735209082j)
**Non-periodic functions**
If `f(x) = g(x) h(x)` for some function `h(x)` that is not
strictly periodic, *omega* or *period* might not work, and it might
be necessary to use *zeros*.
A notable exception can be made for Bessel functions which, though not
periodic, are "asymptotically periodic" in a sufficiently strong sense
that the sum extrapolation will work out::
>>> quadosc(j0, [0, inf], period=2*pi)
1.0
>>> quadosc(j1, [0, inf], period=2*pi)
1.0
More properly, one should provide the exact Bessel function zeros::
>>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
>>> quadosc(j0, [0, inf], zeros=j0zero)
1.0
For an example where *zeros* becomes necessary, consider the
complete Fresnel integrals
.. math ::
\int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
= \sqrt{\frac{\pi}{8}}.
Although the integrands do not decrease in magnitude as
`x \to \infty`, the integrals are convergent since the oscillation
rate increases (causing consecutive periods to asymptotically
cancel out). These integrals are virtually impossible to calculate
to any kind of accuracy using standard quadrature rules. However,
if one provides the correct asymptotic distribution of zeros
(`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works::
>>> mp.dps = 30
>>> f = lambda x: cos(x**2)
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
0.626657068657750125603941321203
>>> f = lambda x: sin(x**2)
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
0.626657068657750125603941321203
>>> sqrt(pi/8)
0.626657068657750125603941321203
(Interestingly, these integrals can still be evaluated if one
places some other constant than `\pi` in the square root sign.)
In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
the inverse-function distribution `h^{-1}(x)`::
>>> mp.dps = 15
>>> f = lambda x: sin(exp(x))
>>> quadosc(f, [1,inf], zeros=lambda n: log(n))
-0.25024394235267
>>> pi/2-si(e)
-0.250243942352671
**Non-alternating functions**
If the integrand oscillates around a positive value, without
alternating signs, the extrapolation might fail. A simple trick
that sometimes works is to multiply or divide the frequency by 2::
>>> f = lambda x: 1/x**2+sin(x)/x**4
>>> quadosc(f, [1,inf], omega=1) # Bad
1.28642190869861
>>> quadosc(f, [1,inf], omega=0.5) # Perfect
1.28652953559617
>>> 1+(cos(1)+ci(1)+sin(1))/6
1.28652953559617
**Fast decay**
:func:`~mpmath.quadosc` is primarily useful for slowly decaying
integrands. If the integrand decreases exponentially or faster,
:func:`~mpmath.quad` will likely handle it without trouble (and generally be
much faster than :func:`~mpmath.quadosc`)::
>>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
0.5
>>> quad(lambda x: cos(x)/exp(x), [0, inf])
0.5
"""
a, b = ctx._as_points(interval)
a = ctx.convert(a)
b = ctx.convert(b)
if [omega, period, zeros].count(None) != 2:
raise ValueError( \
"must specify exactly one of omega, period, zeros")
if a == ctx.ninf and b == ctx.inf:
s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
return s1 + s2
if a == ctx.ninf:
if zeros:
return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
else:
return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
if b != ctx.inf:
raise ValueError("quadosc requires an infinite integration interval")
if not zeros:
if omega:
period = 2*ctx.pi/omega
zeros = lambda n: n*period/2
#for n in range(1,10):
# p = zeros(n)
# if p > a:
# break
#if n >= 9:
# raise ValueError("zeros do not appear to be correctly indexed")
n = 1
s = ctx.quadgl(f, [a, zeros(n)])
def term(k):
return ctx.quadgl(f, [zeros(k), zeros(k+1)])
s += ctx.nsum(term, [n, ctx.inf])
return s
def quadsubdiv(ctx, f, interval, tol=None, maxintervals=None, **kwargs):
"""
Computes the integral of *f* over the interval or path specified
by *interval*, using :func:`~mpmath.quad` together with adaptive
subdivision of the interval.
This function gives an accurate answer for some integrals where
:func:`~mpmath.quad` fails::
>>> mp.dps = 15; mp.pretty = True
>>> quad(lambda x: abs(sin(x)), [0, 2*pi])
3.99900894176779
>>> quadsubdiv(lambda x: abs(sin(x)), [0, 2*pi])
4.0
>>> quadsubdiv(sin, [0, 1000])
0.437620923709297
>>> quadsubdiv(lambda x: 1/(1+x**2), [-100, 100])
3.12159332021646
>>> quadsubdiv(lambda x: ceil(x), [0, 100])
5050.0
>>> quadsubdiv(lambda x: sin(x+exp(x)), [0,8])
0.347400172657248
The argument *maxintervals* can be set to limit the permissible
subdivision::
>>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=5, error=True)
(-5.40487904307774, 5.011)
>>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=100, error=True)
(0.631417921866934, 1.10101120134116e-17)
Subdivision does not guarantee a correct answer since, the error
estimate on subintervals may be inaccurate::
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
(0.209736068833883, 1.00011000000001e-18)
>>> mp.dps = 20
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
(0.21080273550054927738, 2.200000001e-24)
The second answer is correct. We can get an accurate result at lower
precision by forcing a finer initial subdivision::
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, linspace(0,1,5))
0.210802735500549
The following integral is too oscillatory for convergence, but we can get a
reasonable estimate::
>>> v, err = fp.quadsubdiv(lambda x: fp.sin(1/x), [0,1], error=True)
>>> round(v, 6), round(err, 6)
(0.504067, 1e-06)
>>> sin(1) - ci(1)
0.504067061906928
"""
queue = []
for i in range(len(interval)-1):
queue.append((interval[i], interval[i+1]))
total = ctx.zero
total_error = ctx.zero
if maxintervals is None:
maxintervals = 10 * ctx.prec
count = 0
quad_args = kwargs.copy()
quad_args["verbose"] = False
quad_args["error"] = True
if tol is None:
tol = +ctx.eps
orig = ctx.prec
try:
ctx.prec += 5
while queue:
a, b = queue.pop()
s, err = ctx.quad(f, [a, b], **quad_args)
if kwargs.get("verbose"):
print("subinterval", count, a, b, err)
if err < tol or count > maxintervals:
total += s
total_error += err
else:
count += 1
if count == maxintervals and kwargs.get("verbose"):
print("warning: number of intervals exceeded maxintervals")
if a == -ctx.inf and b == ctx.inf:
m = 0
elif a == -ctx.inf:
m = min(b-1, 2*b)
elif b == ctx.inf:
m = max(a+1, 2*a)
else:
m = a + (b - a) / 2
queue.append((a, m))
queue.append((m, b))
finally:
ctx.prec = orig
if kwargs.get("error"):
return +total, +total_error
else:
return +total
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause | -3,030,196,217,709,281,000 | 37.069182 | 127 | 0.54103 | false |
Endika/OpenUpgrade | addons/l10n_fr/report/compute_resultant_report.py | 374 | 4004 | # -*- coding: utf-8 -*-
#
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import base_report
from openerp.osv import osv
class cdr(base_report.base_report):
def __init__(self, cr, uid, name, context):
super(cdr, self).__init__(cr, uid, name, context)
def set_context(self, objects, data, ids):
super(cdr, self).set_context(objects, data, ids)
self._load('cdr', self.localcontext['data']['form'])
self._set_variable(
'ct1',
self.localcontext['cdrc1']+self.localcontext['cdrc2']+self.localcontext['cdrc3']+
self.localcontext['cdrc4']+self.localcontext['cdrc5']+self.localcontext['cdrc6']+
self.localcontext['cdrc7']+self.localcontext['cdrc8']+self.localcontext['cdrc9']+
self.localcontext['cdrc10']+self.localcontext['cdrc11']+self.localcontext['cdrc12']+
self.localcontext['cdrc13']+self.localcontext['cdrc14']+self.localcontext['cdrc15']
)
self._set_variable(
'ct3',
self.localcontext['cdrc17']+self.localcontext['cdrc18']+self.localcontext['cdrc19']+
self.localcontext['cdrc20']
)
self._set_variable(
'ct4',
self.localcontext['cdrc21']+self.localcontext['cdrc22']+self.localcontext['cdrc23']
)
self._set_variable(
'charges',
self.localcontext['ct1']+self.localcontext['cdrc16']+self.localcontext['ct3']+
self.localcontext['ct4']+self.localcontext['cdrc24']+self.localcontext['cdrc25']
)
self._set_variable(
'pta',
self.localcontext['cdrp1']+self.localcontext['cdrp2']
)
self._set_variable(
'ptb',
self.localcontext['cdrp3']+self.localcontext['cdrp4']+self.localcontext['cdrp5']+
self.localcontext['cdrp6']+self.localcontext['cdrp7']
)
self._set_variable(
'pt1',
self.localcontext['pta']+self.localcontext['ptb']
)
self._set_variable(
'pt3',
self.localcontext['cdrp9']+self.localcontext['cdrp10']+self.localcontext['cdrp11']+
self.localcontext['cdrp12']+self.localcontext['cdrp13']+self.localcontext['cdrp14']
)
self._set_variable(
'pt4',
self.localcontext['cdrp15']+self.localcontext['cdrp16']+self.localcontext['cdrp17']
)
self._set_variable(
'produits',
self.localcontext['pt1']+self.localcontext['cdrp8']+self.localcontext['pt3']+
self.localcontext['pt4']
)
class wrapped_report_resultat(osv.AbstractModel):
_name = 'report.l10n_fr.report_l10nfrresultat'
_inherit = 'report.abstract_report'
_template = 'l10n_fr.report_l10nfrresultat'
_wrapped_report_class = cdr
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,236,081,410,930,613,000 | 39.857143 | 96 | 0.648352 | false |
justathoughtor2/atomicApe | cygwin/lib/python2.7/site-packages/pylint/checkers/newstyle.py | 3 | 6974 | # Copyright (c) 2005-2014 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""check for new / old style related problems
"""
import sys
import astroid
from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE, HIGH
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
check_messages,
node_frame_class,
has_known_bases
)
MSGS = {
'E1001': ('Use of __slots__ on an old style class',
'slots-on-old-class',
'Used when an old style class uses the __slots__ attribute.',
{'maxversion': (3, 0)}),
'E1002': ('Use of super on an old style class',
'super-on-old-class',
'Used when an old style class uses the super builtin.',
{'maxversion': (3, 0)}),
'E1003': ('Bad first argument %r given to super()',
'bad-super-call',
'Used when another argument than the current class is given as \
first argument of the super builtin.'),
'E1004': ('Missing argument to super()',
'missing-super-argument',
'Used when the super builtin didn\'t receive an \
argument.',
{'maxversion': (3, 0)}),
'W1001': ('Use of "property" on an old style class',
'property-on-old-class',
'Used when Pylint detect the use of the builtin "property" \
on an old style class while this is relying on new style \
classes features.',
{'maxversion': (3, 0)}),
'C1001': ('Old-style class defined.',
'old-style-class',
'Used when a class is defined that does not inherit from another'
'class and does not inherit explicitly from "object".',
{'maxversion': (3, 0)})
}
class NewStyleConflictChecker(BaseChecker):
"""checks for usage of new style capabilities on old style classes and
other new/old styles conflicts problems
* use of property, __slots__, super
* "super" usage
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = 'newstyle'
# messages
msgs = MSGS
priority = -2
# configuration options
options = ()
@check_messages('slots-on-old-class', 'old-style-class')
def visit_classdef(self, node):
""" Check __slots__ in old style classes and old
style class definition.
"""
if '__slots__' in node and not node.newstyle:
confidence = (INFERENCE if has_known_bases(node)
else INFERENCE_FAILURE)
self.add_message('slots-on-old-class', node=node,
confidence=confidence)
# The node type could be class, exception, metaclass, or
# interface. Presumably, the non-class-type nodes would always
# have an explicit base class anyway.
if not node.bases and node.type == 'class' and not node.metaclass():
# We use confidence HIGH here because this message should only ever
# be emitted for classes at the root of the inheritance hierarchyself.
self.add_message('old-style-class', node=node, confidence=HIGH)
@check_messages('property-on-old-class')
def visit_call(self, node):
"""check property usage"""
parent = node.parent.frame()
if (isinstance(parent, astroid.ClassDef) and
not parent.newstyle and
isinstance(node.func, astroid.Name)):
confidence = (INFERENCE if has_known_bases(parent)
else INFERENCE_FAILURE)
name = node.func.name
if name == 'property':
self.add_message('property-on-old-class', node=node,
confidence=confidence)
@check_messages('super-on-old-class', 'bad-super-call', 'missing-super-argument')
def visit_functiondef(self, node):
"""check use of super"""
# ignore actual functions or method within a new style class
if not node.is_method():
return
klass = node.parent.frame()
for stmt in node.nodes_of_class(astroid.Call):
if node_frame_class(stmt) != node_frame_class(node):
# Don't look down in other scopes.
continue
expr = stmt.func
if not isinstance(expr, astroid.Attribute):
continue
call = expr.expr
# skip the test if using super
if not (isinstance(call, astroid.Call) and
isinstance(call.func, astroid.Name) and
call.func.name == 'super'):
continue
if not klass.newstyle and has_known_bases(klass):
# super should not be used on an old style class
self.add_message('super-on-old-class', node=node)
else:
# super first arg should be the class
if not call.args and sys.version_info[0] == 3:
# unless Python 3
continue
try:
supcls = (call.args and next(call.args[0].infer())
or None)
except astroid.InferenceError:
continue
if supcls is None:
self.add_message('missing-super-argument', node=call)
continue
if klass is not supcls:
name = None
# if supcls is not YES, then supcls was infered
# and use its name. Otherwise, try to look
# for call.args[0].name
if supcls is not astroid.YES:
name = supcls.name
else:
if hasattr(call.args[0], 'name'):
name = call.args[0].name
if name is not None:
self.add_message('bad-super-call', node=call, args=(name, ))
visit_asyncfunctiondef = visit_functiondef
def register(linter):
"""required method to auto register this checker """
linter.register_checker(NewStyleConflictChecker(linter))
| gpl-3.0 | -1,876,628,342,176,921,000 | 40.266272 | 85 | 0.575136 | false |
piffey/ansible | lib/ansible/modules/cloud/amazon/aws_ses_identity_policy.py | 78 | 7303 | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_ses_identity_policy
short_description: Manages SES sending authorization policies
description:
- This module allows the user to manage sending authorization policies associated with an SES identity (email or domain).
- SES authorization sending policies can be used to control what actors are able to send email
on behalf of the validated identity and what conditions must be met by the sent emails.
version_added: "2.6"
author: Ed Costello (@orthanc)
options:
identity:
description: |
The SES identity to attach or remove a policy from. This can be either the full ARN or just
the verified email or domain.
required: true
policy_name:
description: The name used to identify the policy within the scope of the identity it's attached to.
required: true
policy:
description: A properly formated JSON sending authorization policy. Required when I(state=present).
state:
description: Whether to create(or update) or delete the authorization policy on the identity.
default: present
choices: [ 'present', 'absent' ]
requirements: [ 'botocore', 'boto3' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: add sending authorization policy to domain identity
aws_ses_identity_policy:
identity: example.com
policy_name: ExamplePolicy
policy: "{{ lookup('template', 'policy.json.j2') }}"
state: present
- name: add sending authorization policy to email identity
aws_ses_identity_policy:
identity: [email protected]
policy_name: ExamplePolicy
policy: "{{ lookup('template', 'policy.json.j2') }}"
state: present
- name: add sending authorization policy to identity using ARN
aws_ses_identity_policy:
identity: "arn:aws:ses:us-east-1:12345678:identity/example.com"
policy_name: ExamplePolicy
policy: "{{ lookup('template', 'policy.json.j2') }}"
state: present
- name: remove sending authorization policy
aws_ses_identity_policy:
identity: example.com
policy_name: ExamplePolicy
state: absent
'''
RETURN = '''
policies:
description: A list of all policies present on the identity after the operation.
returned: success
type: list
sample: [ExamplePolicy]
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import compare_policies, AWSRetry
import json
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
def get_identity_policy(connection, module, identity, policy_name):
try:
response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve identity policy {policy}'.format(policy=policy_name))
policies = response['Policies']
if policy_name in policies:
return policies[policy_name]
return None
def create_or_update_identity_policy(connection, module):
identity = module.params.get('identity')
policy_name = module.params.get('policy_name')
required_policy = module.params.get('policy')
required_policy_dict = json.loads(required_policy)
changed = False
policy = get_identity_policy(connection, module, identity, policy_name)
policy_dict = json.loads(policy) if policy else None
if compare_policies(policy_dict, required_policy_dict):
changed = True
try:
if not module.check_mode:
connection.put_identity_policy(Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to put identity policy {policy}'.format(policy=policy_name))
# Load the list of applied policies to include in the response.
# In principle we should be able to just return the response, but given
# eventual consistency behaviours in AWS it's plausible that we could
# end up with a list that doesn't contain the policy we just added.
# So out of paranoia check for this case and if we're missing the policy
# just make sure it's present.
#
# As a nice side benefit this also means the return is correct in check mode
try:
policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to list identity policies')
if policy_name is not None and policy_name not in policies_present:
policies_present = list(policies_present)
policies_present.append(policy_name)
module.exit_json(
changed=changed,
policies=policies_present,
)
def delete_identity_policy(connection, module):
identity = module.params.get('identity')
policy_name = module.params.get('policy_name')
changed = False
try:
policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to list identity policies')
if policy_name in policies_present:
try:
if not module.check_mode:
connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to delete identity policy {policy}'.format(policy=policy_name))
changed = True
policies_present = list(policies_present)
policies_present.remove(policy_name)
module.exit_json(
changed=changed,
policies=policies_present,
)
def main():
module = AnsibleAWSModule(
argument_spec={
'identity': dict(required=True, type='str'),
'state': dict(default='present', choices=['present', 'absent']),
'policy_name': dict(required=True, type='str'),
'policy': dict(type='json', default=None),
},
required_if=[['state', 'present', ['policy']]],
supports_check_mode=True,
)
# SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
# Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
# the ansible build runs multiple instances of the test in parallel that's caused throttling
# failures so apply a jittered backoff to call SES calls.
connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
state = module.params.get("state")
if state == 'present':
create_or_update_identity_policy(connection, module)
else:
delete_identity_policy(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 | 216,714,438,025,741,980 | 36.64433 | 129 | 0.689853 | false |
oktie/linkedct | ctdjango/chardet/universaldetector.py | 190 | 6635 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from latin1prober import Latin1Prober # windows-1252
from mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from sbcsgroupprober import SBCSGroupProber # single-byte character sets
from escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(r'[\x80-\xFF]')
self._escDetector = re.compile(r'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = constants.False
self._mStart = constants.True
self._mGotData = constants.False
self._mInputState = ePureAscii
self._mLastChar = ''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done: return
aLen = len(aBuf)
if not aLen: return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == '\xEF\xBB\xBF':
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] == '\xFF\xFE\x00\x00':
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == '\x00\x00\xFE\xFF':
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == '\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {'encoding': "X-ISO-10646-UCS-4-3412", 'confidence': 1.0}
elif aBuf[:4] == '\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {'encoding': "X-ISO-10646-UCS-4-2143", 'confidence': 1.0}
elif aBuf[:2] == '\xFF\xFE':
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == '\xFE\xFF':
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = constants.True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = constants.True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif (self._mInputState == ePureAscii) and self._escDetector.search(self._mLastChar + aBuf):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = constants.True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(), Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = constants.True
break
def close(self):
if self.done: return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = constants.True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober: continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober: continue
sys.stderr.write('%s confidence = %s\n' % \
(prober.get_charset_name(), \
prober.get_confidence()))
| apache-2.0 | 8,797,377,409,774,949,000 | 42.084416 | 104 | 0.577091 | false |
OBIGOGIT/etch | binding-python/runtime/src/test/python/tests/binding/support/TestValidator_string.py | 6 | 1079 | # Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
import unittest
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 8,494,853,113,510,465,000 | 48.045455 | 65 | 0.595922 | false |
Observer-Wu/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py | 121 | 3180 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.checkout.changelog import ChangeLog
from webkitpy.common.config import urls
from webkitpy.tool.grammar import join_with_separators
from webkitpy.tool.steps.abstractstep import AbstractStep
class PrepareChangeLogForRevert(AbstractStep):
@classmethod
def _message_for_revert(cls, revision_list, reason, bug_url=None):
message = "Unreviewed, rolling out %s.\n" % join_with_separators(['r' + str(revision) for revision in revision_list])
for revision in revision_list:
message += "%s\n" % urls.view_revision_url(revision)
if bug_url:
message += "%s\n" % bug_url
# Add an extra new line after the rollout links, before any reason.
message += "\n"
if reason:
message += "%s\n\n" % reason
return message
def run(self, state):
# This could move to prepare-ChangeLog by adding a --revert= option.
self._tool.executive.run_and_throw_if_fail(self._tool.deprecated_port().prepare_changelog_command(), cwd=self._tool.scm().checkout_root)
changelog_paths = self._tool.checkout().modified_changelogs(git_commit=None)
bug_url = self._tool.bugs.bug_url_for_bug_id(state["bug_id"]) if state["bug_id"] else None
message = self._message_for_revert(state["revision_list"], state["reason"], bug_url)
for changelog_path in changelog_paths:
# FIXME: Seems we should prepare the message outside of changelogs.py and then just pass in
# text that we want to use to replace the reviewed by line.
ChangeLog(changelog_path).update_with_unreviewed_message(message)
| bsd-3-clause | 4,755,179,142,633,721,000 | 53.827586 | 144 | 0.726101 | false |
skarra/CalDAVClientLibrary | caldavclientlibrary/client/account.py | 1 | 1580 | ##
# Copyright (c) 2006-2013 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.client.clientsession import CalDAVSession
from caldavclientlibrary.client.principal import principalCache
class CalDAVAccount(object):
def __init__(self, server, port=None, ssl=False, user="", pswd="", principal=None, root=None, logging=False):
self.session = CalDAVSession(server, port, ssl, user, pswd, principal, root, logging)
self.principal = principalCache.getPrincipal(self.session, self.session.principalPath)
def setUserPswd(self, user, pswd):
self.session.setUserPswd(user, pswd)
self.principal = principalCache.getPrincipal(self.session, self.session.principalPath)
def getPrincipal(self, path=None, refresh=False):
if path:
return principalCache.getPrincipal(self.session, path, refresh=refresh)
elif refresh:
self.principal = principalCache.getPrincipal(self.session, self.session.principalPath, refresh=refresh)
return self.principal
| apache-2.0 | -2,167,797,317,913,153,500 | 39.512821 | 115 | 0.739873 | false |
pierreg/tensorflow | tensorflow/python/kernel_tests/summary_ops_test.py | 10 | 3656 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for summary ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class SummaryOpsTest(tf.test.TestCase):
def _AsSummary(self, s):
summ = tf.Summary()
summ.ParseFromString(s)
return summ
def testScalarSummary(self):
with self.test_session() as sess:
const = tf.constant([10.0, 20.0])
summ = tf.scalar_summary(["c1", "c2"], const, name="mysumm")
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
self.assertProtoEquals("""
value { tag: "c1" simple_value: 10.0 }
value { tag: "c2" simple_value: 20.0 }
""", self._AsSummary(value))
def testScalarSummaryDefaultName(self):
with self.test_session() as sess:
const = tf.constant([10.0, 20.0])
summ = tf.scalar_summary(["c1", "c2"], const)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
self.assertProtoEquals("""
value { tag: "c1" simple_value: 10.0 }
value { tag: "c2" simple_value: 20.0 }
""", self._AsSummary(value))
def testMergeSummary(self):
with self.test_session() as sess:
const = tf.constant(10.0)
summ1 = tf.summary.histogram("h", const)
summ2 = tf.scalar_summary("c", const)
merge = tf.summary.merge([summ1, summ2])
value = sess.run(merge)
self.assertEqual([], merge.get_shape())
self.assertProtoEquals("""
value {
tag: "h"
histo {
min: 10.0
max: 10.0
num: 1.0
sum: 10.0
sum_squares: 100.0
bucket_limit: 9.93809490288
bucket_limit: 10.9319043932
bucket_limit: 1.7976931348623157e+308
bucket: 0.0
bucket: 1.0
bucket: 0.0
}
}
value { tag: "c" simple_value: 10.0 }
""", self._AsSummary(value))
def testMergeAllSummaries(self):
with tf.Graph().as_default():
const = tf.constant(10.0)
summ1 = tf.summary.histogram("h", const)
summ2 = tf.summary.scalar("o", const, collections=["foo_key"])
summ3 = tf.summary.scalar("c", const)
merge = tf.summary.merge_all()
self.assertEqual("MergeSummary", merge.op.type)
self.assertEqual(2, len(merge.op.inputs))
self.assertEqual(summ1, merge.op.inputs[0])
self.assertEqual(summ3, merge.op.inputs[1])
merge = tf.merge_all_summaries("foo_key")
self.assertEqual("MergeSummary", merge.op.type)
self.assertEqual(1, len(merge.op.inputs))
self.assertEqual(summ2, merge.op.inputs[0])
self.assertTrue(tf.merge_all_summaries("bar_key") is None)
def testHistogramSummaryTypes(self):
with tf.Graph().as_default():
for dtype in (tf.int8, tf.uint8, tf.int16, tf.int32,
tf.float32, tf.float64):
const = tf.constant(10, dtype=dtype)
tf.summary.histogram("h", const)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 7,716,234,374,586,960,000 | 33.490566 | 80 | 0.61488 | false |
marc-sensenich/ansible | lib/ansible/modules/database/influxdb/influxdb_user.py | 15 | 5857 | #!/usr/bin/python
# (c) 2017, Vitaliy Zhhuta <zhhuta () gmail.com>
# insipred by Kamil Szczygiel <kamil.szczygiel () intel.com> influxdb_database module
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: influxdb_user
short_description: Manage InfluxDB users
description:
- Manage InfluxDB users
version_added: 2.5
author: "Vitaliy Zhhuta (@zhhuta)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
options:
user_name:
description:
- Name of the user.
required: True
user_password:
description:
- Password to be set for the user.
required: false
admin:
description:
- Whether the user should be in the admin role or not.
- Since version 2.8, the role will also be updated.
default: no
type: bool
state:
description:
- State of the user.
choices: [ present, absent ]
default: present
extends_documentation_fragment: influxdb
'''
EXAMPLES = '''
- name: Create a user on localhost using default login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
- name: Create a user on localhost using custom login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
- name: Create an admin user on a remote host using custom login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
admin: yes
hostname: "{{ influxdb_hostname }}"
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
- name: Destroy a user using custom login credentials
influxdb_user:
user_name: john
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
state: absent
'''
RETURN = '''
#only defaults
'''
import ansible.module_utils.urls
from ansible.module_utils.basic import AnsibleModule
import ansible.module_utils.influxdb as influx
def find_user(module, client, user_name):
user_result = None
try:
users = client.get_list_users()
for user in users:
if user['user'] == user_name:
user_result = user
break
except (ansible.module_utils.urls.ConnectionError, influx.exceptions.InfluxDBClientError) as e:
module.fail_json(msg=str(e))
return user_result
def check_user_password(module, client, user_name, user_password):
try:
client.switch_user(user_name, user_password)
client.get_list_users()
except influx.exceptions.InfluxDBClientError as e:
if e.code == 401:
return False
except ansible.module_utils.urls.ConnectionError as e:
module.fail_json(msg=str(e))
finally:
# restore previous user
client.switch_user(module.params['username'], module.params['password'])
return True
def set_user_password(module, client, user_name, user_password):
if not module.check_mode:
try:
client.set_user_password(user_name, user_password)
except ansible.module_utils.urls.ConnectionError as e:
module.fail_json(msg=str(e))
def create_user(module, client, user_name, user_password, admin):
if not module.check_mode:
try:
client.create_user(user_name, user_password, admin)
except ansible.module_utils.urls.ConnectionError as e:
module.fail_json(msg=str(e))
module.exit_json(changed=True)
def drop_user(module, client, user_name):
if not module.check_mode:
try:
client.drop_user(user_name)
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
module.exit_json(changed=True)
def main():
argument_spec = influx.InfluxDb.influxdb_argument_spec()
argument_spec.update(
state=dict(default='present', type='str', choices=['present', 'absent']),
user_name=dict(required=True, type='str'),
user_password=dict(required=False, type='str', no_log=True),
admin=dict(default='False', type='bool')
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
state = module.params['state']
user_name = module.params['user_name']
user_password = module.params['user_password']
admin = module.params['admin']
influxdb = influx.InfluxDb(module)
client = influxdb.connect_to_influxdb()
user = find_user(module, client, user_name)
if state == 'present':
if user:
changed = False
if not check_user_password(module, client, user_name, user_password) and user_password is not None:
set_user_password(module, client, user_name, user_password)
changed = True
try:
if admin and not user['admin']:
client.grant_admin_privileges(user_name)
changed = True
elif not admin and user['admin']:
client.revoke_admin_privileges(user_name)
changed = True
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed)
else:
user_password = user_password or ''
create_user(module, client, user_name, user_password, admin)
if state == 'absent':
if user:
drop_user(module, client, user_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 | -6,453,436,341,432,425,000 | 28.730964 | 111 | 0.632406 | false |
googleinterns/learnbase | learnbase/src/main/webapp/WEB-INF/Lib/macurl2path.py | 332 | 3275 | """Macintosh-specific module for conversion between pathnames and URLs.
Do not import directly; use urllib instead."""
import urllib
import os
__all__ = ["url2pathname","pathname2url"]
def url2pathname(pathname):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
#
# XXXX The .. handling should be fixed...
#
tp = urllib.splittype(pathname)[0]
if tp and tp != 'file':
raise RuntimeError, 'Cannot convert non-local URL to pathname'
# Turn starting /// into /, an empty hostname means current host
if pathname[:3] == '///':
pathname = pathname[2:]
elif pathname[:2] == '//':
raise RuntimeError, 'Cannot convert non-local URL to pathname'
components = pathname.split('/')
# Remove . and embedded ..
i = 0
while i < len(components):
if components[i] == '.':
del components[i]
elif components[i] == '..' and i > 0 and \
components[i-1] not in ('', '..'):
del components[i-1:i+1]
i = i-1
elif components[i] == '' and i > 0 and components[i-1] != '':
del components[i]
else:
i = i+1
if not components[0]:
# Absolute unix path, don't start with colon
rv = ':'.join(components[1:])
else:
# relative unix path, start with colon. First replace
# leading .. by empty strings (giving ::file)
i = 0
while i < len(components) and components[i] == '..':
components[i] = ''
i = i + 1
rv = ':' + ':'.join(components)
# and finally unquote slashes and other funny characters
return urllib.unquote(rv)
def pathname2url(pathname):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
if '/' in pathname:
raise RuntimeError, "Cannot convert pathname containing slashes"
components = pathname.split(':')
# Remove empty first and/or last component
if components[0] == '':
del components[0]
if components[-1] == '':
del components[-1]
# Replace empty string ('::') by .. (will result in '/../' later)
for i in range(len(components)):
if components[i] == '':
components[i] = '..'
# Truncate names longer than 31 bytes
components = map(_pncomp2url, components)
if os.path.isabs(pathname):
return '/' + '/'.join(components)
else:
return '/'.join(components)
def _pncomp2url(component):
component = urllib.quote(component[:31], safe='') # We want to quote slashes
return component
def test():
for url in ["index.html",
"bar/index.html",
"/foo/bar/index.html",
"/foo/bar/",
"/"]:
print '%r -> %r' % (url, url2pathname(url))
for path in ["drive:",
"drive:dir:",
"drive:dir:file",
"drive:file",
"file",
":file",
":dir:",
":dir:file"]:
print '%r -> %r' % (path, pathname2url(path))
if __name__ == '__main__':
test()
| apache-2.0 | 3,826,687,261,916,262,400 | 32.762887 | 81 | 0.544122 | false |
cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/scripts/addons/netrender/slave.py | 2 | 17536 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import sys, os, platform, shutil
import http, http.client, http.server
import subprocess, time, threading
import json
import bpy
from netrender.utils import *
import netrender.model
import netrender.repath
import netrender.baking
import netrender.thumbnail as thumbnail
BLENDER_PATH = sys.argv[0]
CANCEL_POLL_SPEED = 2
MAX_TIMEOUT = 10
INCREMENT_TIMEOUT = 1
MAX_CONNECT_TRY = 10
def clearSlave(path):
shutil.rmtree(path)
def slave_Info(netsettings):
sysname, nodename, release, version, machine, processor = platform.uname()
slave = netrender.model.RenderSlave()
slave.name = nodename
slave.stats = sysname + " " + release + " " + machine + " " + processor
if netsettings.slave_tags:
slave.tags = set(netsettings.slave_tags.split(";"))
if netsettings.slave_bake:
slave.tags.add(netrender.model.TAG_BAKING)
if netsettings.slave_render:
slave.tags.add(netrender.model.TAG_RENDER)
return slave
def testCancel(conn, job_id, frame_number):
with ConnectionContext():
conn.request("HEAD", "/status", headers={"job-id":job_id, "job-frame": str(frame_number)})
# canceled if job isn't found anymore
if responseStatus(conn) == http.client.NO_CONTENT:
return True
else:
return False
def testFile(conn, job_id, slave_id, rfile, job_prefix, main_path=None):
job_full_path = createLocalPath(rfile, job_prefix, main_path, rfile.force)
found = os.path.exists(job_full_path)
if found and rfile.signature != None:
found_signature = hashFile(job_full_path)
found = found_signature == rfile.signature
if not found:
print("Found file %s at %s but signature mismatch!" % (rfile.filepath, job_full_path))
os.remove(job_full_path)
if not found:
# Force prefix path if not found
job_full_path = createLocalPath(rfile, job_prefix, main_path, True)
print("Downloading", job_full_path)
temp_path = os.path.join(job_prefix, "slave.temp")
with ConnectionContext():
conn.request("GET", fileURL(job_id, rfile.index), headers={"slave-id":slave_id})
response = conn.getresponse()
if response.status != http.client.OK:
return None # file for job not returned by server, need to return an error code to server
f = open(temp_path, "wb")
buf = response.read(1024)
while buf:
f.write(buf)
buf = response.read(1024)
f.close()
os.renames(temp_path, job_full_path)
rfile.filepath = job_full_path
return job_full_path
def breakable_timeout(timeout):
for i in range(timeout):
time.sleep(1)
if engine.test_break():
break
def render_slave(engine, netsettings, threads):
bisleep = BreakableIncrementedSleep(INCREMENT_TIMEOUT, 1, MAX_TIMEOUT, engine.test_break)
engine.update_stats("", "Network render node initiation")
slave_path = bpy.path.abspath(netsettings.path)
if not os.path.exists(slave_path):
print("Slave working path ( %s ) doesn't exist" % netsettings.path)
return
if not os.access(slave_path, os.W_OK):
print("Slave working path ( %s ) is not writable" % netsettings.path)
return
conn = clientConnection(netsettings)
if not conn:
print("Connection failed, will try connecting again at most %i times" % MAX_CONNECT_TRY)
bisleep.reset()
for i in range(MAX_CONNECT_TRY):
bisleep.sleep()
conn = clientConnection(netsettings)
if conn or engine.test_break():
break
print("Retry %i failed, waiting %is before retrying" % (i + 1, bisleep.current))
if conn:
with ConnectionContext():
conn.request("POST", "/slave", json.dumps(slave_Info(netsettings).serialize()))
response = conn.getresponse()
response.read()
slave_id = response.getheader("slave-id")
NODE_PREFIX = os.path.join(slave_path, "slave_" + slave_id)
verifyCreateDir(NODE_PREFIX)
engine.update_stats("", "Network render connected to master, waiting for jobs")
while not engine.test_break():
with ConnectionContext():
conn.request("GET", "/job", headers={"slave-id":slave_id})
response = conn.getresponse()
if response.status == http.client.OK:
bisleep.reset()
job = netrender.model.RenderJob.materialize(json.loads(str(response.read(), encoding='utf8')))
engine.update_stats("", "Network render processing job from master")
job_prefix = os.path.join(NODE_PREFIX, "job_" + job.id)
verifyCreateDir(job_prefix)
# set tempdir for fsaa temp files
# have to set environ var because render is done in a subprocess and that's the easiest way to propagate the setting
os.environ["TMP"] = job_prefix
if job.type == netrender.model.JOB_BLENDER:
job_path = job.files[0].original_path # original path of the first file
main_path, main_file = os.path.split(job_path)
job_full_path = testFile(conn, job.id, slave_id, job.files[0], job_prefix)
print("Fullpath", job_full_path)
print("File:", main_file, "and %i other files" % (len(job.files) - 1,))
for rfile in job.files[1:]:
testFile(conn, job.id, slave_id, rfile, job_prefix, main_path)
print("\t", rfile.filepath)
netrender.repath.update(job)
engine.update_stats("", "Render File " + main_file + " for job " + job.id)
elif job.type == netrender.model.JOB_VCS:
if not job.version_info:
# Need to return an error to server, incorrect job type
pass
job_path = job.files[0].filepath # path of main file
main_path, main_file = os.path.split(job_path)
job.version_info.update()
# For VCS jobs, file path is relative to the working copy path
job_full_path = os.path.join(job.version_info.wpath, job_path)
engine.update_stats("", "Render File " + main_file + " for job " + job.id)
# announce log to master
logfile = netrender.model.LogFile(job.id, slave_id, [frame.number for frame in job.frames])
with ConnectionContext():
conn.request("POST", "/log", bytes(json.dumps(logfile.serialize()), encoding='utf8'))
response = conn.getresponse()
response.read()
first_frame = job.frames[0].number
# start render
start_t = time.time()
if job.rendersWithBlender():
frame_args = []
for frame in job.frames:
print("frame", frame.number)
frame_args += ["-f", str(frame.number)]
with NoErrorDialogContext():
process = subprocess.Popen([BLENDER_PATH, "-b", "-noaudio", job_full_path, "-t", str(threads), "-o", os.path.join(job_prefix, "######"), "-E", job.render, "-F", "MULTILAYER"] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
elif job.subtype == netrender.model.JOB_SUB_BAKING:
tasks = []
for frame in job.frames:
tasks.append(netrender.baking.commandToTask(frame.command))
with NoErrorDialogContext():
process = netrender.baking.bake(job, tasks)
elif job.type == netrender.model.JOB_PROCESS:
command = job.frames[0].command
with NoErrorDialogContext():
process = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
headers = {"slave-id":slave_id}
results = []
line = ""
class ProcessData:
def __init__(self):
self.lock = threading.Lock()
self.stdout = bytes()
self.cancelled = False
self.start_time = time.time()
self.last_time = time.time()
data = ProcessData()
def run_process(process, data):
while not data.cancelled and process.poll() is None:
buf = process.stdout.read(1024)
data.lock.acquire()
data.stdout += buf
data.lock.release()
process_thread = threading.Thread(target=run_process, args=(process, data))
process_thread.start()
while not data.cancelled and process_thread.is_alive():
time.sleep(CANCEL_POLL_SPEED / 2)
current_time = time.time()
data.cancelled = engine.test_break()
if current_time - data.last_time > CANCEL_POLL_SPEED:
data.lock.acquire()
# update logs if needed
if data.stdout:
# (only need to update on one frame, they are linked
with ConnectionContext():
conn.request("PUT", logURL(job.id, first_frame), data.stdout, headers=headers)
responseStatus(conn)
stdout_text = str(data.stdout, encoding='utf8')
# Also output on console
if netsettings.use_slave_output_log:
print(stdout_text, end="")
lines = stdout_text.split("\n")
lines[0] = line + lines[0]
line = lines.pop()
if job.subtype == netrender.model.JOB_SUB_BAKING:
results.extend(netrender.baking.resultsFromOuput(lines))
data.stdout = bytes()
data.lock.release()
data.last_time = current_time
if testCancel(conn, job.id, first_frame):
engine.update_stats("", "Job canceled by Master")
data.cancelled = True
process_thread.join()
del process_thread
if job.type == netrender.model.JOB_BLENDER:
netrender.repath.reset(job)
# read leftovers if needed
data.stdout += process.stdout.read()
if data.cancelled:
# kill process if needed
if process.poll() is None:
try:
process.terminate()
except OSError:
pass
continue # to next frame
# flush the rest of the logs
if data.stdout:
stdout_text = str(data.stdout, encoding='utf8')
# Also output on console
if netsettings.use_slave_output_log:
print(stdout_text, end="")
lines = stdout_text.split("\n")
lines[0] = line + lines[0]
if job.subtype == netrender.model.JOB_SUB_BAKING:
results.extend(netrender.baking.resultsFromOuput(lines))
# (only need to update on one frame, they are linked
with ConnectionContext():
conn.request("PUT", logURL(job.id, first_frame), data.stdout, headers=headers)
if responseStatus(conn) == http.client.NO_CONTENT:
continue
total_t = time.time() - data.start_time
avg_t = total_t / len(job.frames)
status = process.returncode
print("status", status)
headers = {"job-id":job.id, "slave-id":slave_id, "job-time":str(avg_t)}
if status == 0: # non zero status is error
headers["job-result"] = str(netrender.model.FRAME_DONE)
for frame in job.frames:
headers["job-frame"] = str(frame.number)
if job.hasRenderResult():
# send image back to server
filename = os.path.join(job_prefix, "%06d.exr" % frame.number)
# thumbnail first
if netsettings.use_slave_thumb:
thumbname = thumbnail.generate(filename)
if thumbname:
f = open(thumbname, 'rb')
with ConnectionContext():
conn.request("PUT", "/thumb", f, headers=headers)
f.close()
responseStatus(conn)
f = open(filename, 'rb')
with ConnectionContext():
conn.request("PUT", "/render", f, headers=headers)
f.close()
if responseStatus(conn) == http.client.NO_CONTENT:
continue
elif job.subtype == netrender.model.JOB_SUB_BAKING:
index = job.frames.index(frame)
frame_results = [result_filepath for task_index, result_filepath in results if task_index == index]
for result_filepath in frame_results:
result_path, result_filename = os.path.split(result_filepath)
headers["result-filename"] = result_filename
headers["job-finished"] = str(result_filepath == frame_results[-1])
f = open(result_filepath, 'rb')
with ConnectionContext():
conn.request("PUT", "/result", f, headers=headers)
f.close()
if responseStatus(conn) == http.client.NO_CONTENT:
continue
elif job.type == netrender.model.JOB_PROCESS:
with ConnectionContext():
conn.request("PUT", "/render", headers=headers)
if responseStatus(conn) == http.client.NO_CONTENT:
continue
else:
headers["job-result"] = str(netrender.model.FRAME_ERROR)
for frame in job.frames:
headers["job-frame"] = str(frame.number)
# send error result back to server
with ConnectionContext():
conn.request("PUT", "/render", headers=headers)
if responseStatus(conn) == http.client.NO_CONTENT:
continue
engine.update_stats("", "Network render connected to master, waiting for jobs")
else:
bisleep.sleep()
conn.close()
if netsettings.use_slave_clear:
clearSlave(NODE_PREFIX)
if __name__ == "__main__":
pass
| gpl-3.0 | -6,414,949,882,943,779,000 | 40.261176 | 262 | 0.495096 | false |
diegoguimaraes/django | tests/validators/tests.py | 14 | 14725 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime, timedelta
import re
import types
from unittest import TestCase
from django.core.exceptions import ValidationError
from django.core.validators import (
BaseValidator, EmailValidator, MaxLengthValidator, MaxValueValidator,
MinLengthValidator, MinValueValidator, RegexValidator, URLValidator,
validate_comma_separated_integer_list, validate_email, validate_integer,
validate_ipv46_address, validate_ipv4_address, validate_ipv6_address,
validate_slug,
)
from django.test import SimpleTestCase
from django.test.utils import str_prefix
NOW = datetime.now()
EXTENDED_SCHEMES = ['http', 'https', 'ftp', 'ftps', 'git', 'file']
TEST_DATA = (
# (validator, value, expected),
(validate_integer, '42', None),
(validate_integer, '-42', None),
(validate_integer, -42, None),
(validate_integer, -42.5, None),
(validate_integer, None, ValidationError),
(validate_integer, 'a', ValidationError),
(validate_email, '[email protected]', None),
(validate_email, '[email protected]', None),
(validate_email, 'email@[127.0.0.1]', None),
(validate_email, 'email@[2001:dB8::1]', None),
(validate_email, 'email@[2001:dB8:0:0:0:0:0:1]', None),
(validate_email, 'email@[::fffF:127.0.0.1]', None),
(validate_email, '[email protected]', None),
(validate_email, '[email protected]', None),
(validate_email, '[email protected].उदाहरण.परीक्षा', None),
(validate_email, 'email@localhost', None),
(EmailValidator(whitelist=['localdomain']), 'email@localdomain', None),
(validate_email, '"test@test"@example.com', None),
(validate_email, None, ValidationError),
(validate_email, '', ValidationError),
(validate_email, 'abc', ValidationError),
(validate_email, 'abc@', ValidationError),
(validate_email, 'abc@bar', ValidationError),
(validate_email, 'a @x.cz', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, 'something@@somewhere.com', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, 'email@[127.0.0.256]', ValidationError),
(validate_email, 'email@[2001:db8::12345]', ValidationError),
(validate_email, 'email@[2001:db8:0:0:0:0:1]', ValidationError),
(validate_email, 'email@[::ffff:127.0.0.256]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]', ValidationError),
(validate_email, '[email protected]\n\n<script src="x.js">', ValidationError),
# Quoted-string format (CR not allowed)
(validate_email, '"\\\011"@here.com', None),
(validate_email, '"\\\012"@here.com', ValidationError),
(validate_email, '[email protected].', ValidationError),
# Max length of domain name in email is 249 (see validator for calculation)
(validate_email, 'a@%s.us' % ('a' * 249), None),
(validate_email, 'a@%s.us' % ('a' * 250), ValidationError),
(validate_slug, 'slug-ok', None),
(validate_slug, 'longer-slug-still-ok', None),
(validate_slug, '--------', None),
(validate_slug, 'nohyphensoranything', None),
(validate_slug, '', ValidationError),
(validate_slug, ' text ', ValidationError),
(validate_slug, ' ', ValidationError),
(validate_slug, '[email protected]', ValidationError),
(validate_slug, '你好', ValidationError),
(validate_slug, '\n', ValidationError),
(validate_ipv4_address, '1.1.1.1', None),
(validate_ipv4_address, '255.0.0.0', None),
(validate_ipv4_address, '0.0.0.0', None),
(validate_ipv4_address, '256.1.1.1', ValidationError),
(validate_ipv4_address, '25.1.1.', ValidationError),
(validate_ipv4_address, '25,1,1,1', ValidationError),
(validate_ipv4_address, '25.1 .1.1', ValidationError),
# validate_ipv6_address uses django.utils.ipv6, which
# is tested in much greater detail in its own testcase
(validate_ipv6_address, 'fe80::1', None),
(validate_ipv6_address, '::1', None),
(validate_ipv6_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv6_address, '1:2', ValidationError),
(validate_ipv6_address, '::zzz', ValidationError),
(validate_ipv6_address, '12345::', ValidationError),
(validate_ipv46_address, '1.1.1.1', None),
(validate_ipv46_address, '255.0.0.0', None),
(validate_ipv46_address, '0.0.0.0', None),
(validate_ipv46_address, 'fe80::1', None),
(validate_ipv46_address, '::1', None),
(validate_ipv46_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv46_address, '256.1.1.1', ValidationError),
(validate_ipv46_address, '25.1.1.', ValidationError),
(validate_ipv46_address, '25,1,1,1', ValidationError),
(validate_ipv46_address, '25.1 .1.1', ValidationError),
(validate_ipv46_address, '1:2', ValidationError),
(validate_ipv46_address, '::zzz', ValidationError),
(validate_ipv46_address, '12345::', ValidationError),
(validate_comma_separated_integer_list, '1', None),
(validate_comma_separated_integer_list, '1,2,3', None),
(validate_comma_separated_integer_list, '1,2,3,', None),
(validate_comma_separated_integer_list, '', ValidationError),
(validate_comma_separated_integer_list, 'a,b,c', ValidationError),
(validate_comma_separated_integer_list, '1, 2, 3', ValidationError),
(MaxValueValidator(10), 10, None),
(MaxValueValidator(10), -10, None),
(MaxValueValidator(10), 0, None),
(MaxValueValidator(NOW), NOW, None),
(MaxValueValidator(NOW), NOW - timedelta(days=1), None),
(MaxValueValidator(0), 1, ValidationError),
(MaxValueValidator(NOW), NOW + timedelta(days=1), ValidationError),
(MinValueValidator(-10), -10, None),
(MinValueValidator(-10), 10, None),
(MinValueValidator(-10), 0, None),
(MinValueValidator(NOW), NOW, None),
(MinValueValidator(NOW), NOW + timedelta(days=1), None),
(MinValueValidator(0), -1, ValidationError),
(MinValueValidator(NOW), NOW - timedelta(days=1), ValidationError),
(MaxLengthValidator(10), '', None),
(MaxLengthValidator(10), 10 * 'x', None),
(MaxLengthValidator(10), 15 * 'x', ValidationError),
(MinLengthValidator(10), 15 * 'x', None),
(MinLengthValidator(10), 10 * 'x', None),
(MinLengthValidator(10), '', ValidationError),
(URLValidator(), 'http://www.djangoproject.com/', None),
(URLValidator(), 'HTTP://WWW.DJANGOPROJECT.COM/', None),
(URLValidator(), 'http://localhost/', None),
(URLValidator(), 'http://example.com/', None),
(URLValidator(), 'http://www.example.com/', None),
(URLValidator(), 'http://www.example.com:8000/test', None),
(URLValidator(), 'http://valid-with-hyphens.com/', None),
(URLValidator(), 'http://subdomain.example.com/', None),
(URLValidator(), 'http://200.8.9.10/', None),
(URLValidator(), 'http://200.8.9.10:8000/test', None),
(URLValidator(), 'http://valid-----hyphens.com/', None),
(URLValidator(), 'http://example.com?something=value', None),
(URLValidator(), 'http://example.com/index.php?something=value&another=value2', None),
(URLValidator(), 'https://example.com/', None),
(URLValidator(), 'ftp://example.com/', None),
(URLValidator(), 'ftps://example.com/', None),
(URLValidator(EXTENDED_SCHEMES), 'file://localhost/path', None),
(URLValidator(EXTENDED_SCHEMES), 'git://example.com/', None),
(URLValidator(), 'foo', ValidationError),
(URLValidator(), 'http://', ValidationError),
(URLValidator(), 'http://example', ValidationError),
(URLValidator(), 'http://example.', ValidationError),
(URLValidator(), 'http://.com', ValidationError),
(URLValidator(), 'http://invalid-.com', ValidationError),
(URLValidator(), 'http://-invalid.com', ValidationError),
(URLValidator(), 'http://invalid.com-', ValidationError),
(URLValidator(), 'http://inv-.alid-.com', ValidationError),
(URLValidator(), 'http://inv-.-alid.com', ValidationError),
(URLValidator(), 'file://localhost/path', ValidationError),
(URLValidator(), 'git://example.com/', ValidationError),
(URLValidator(EXTENDED_SCHEMES), 'git://-invalid.com', ValidationError),
(BaseValidator(True), True, None),
(BaseValidator(True), False, ValidationError),
(RegexValidator(), '', None),
(RegexValidator(), 'x1x2', None),
(RegexValidator('[0-9]+'), 'xxxxxx', ValidationError),
(RegexValidator('[0-9]+'), '1234', None),
(RegexValidator(re.compile('[0-9]+')), '1234', None),
(RegexValidator('.*'), '', None),
(RegexValidator(re.compile('.*')), '', None),
(RegexValidator('.*'), 'xxxxx', None),
(RegexValidator('x'), 'y', ValidationError),
(RegexValidator(re.compile('x')), 'y', ValidationError),
(RegexValidator('x', inverse_match=True), 'y', None),
(RegexValidator(re.compile('x'), inverse_match=True), 'y', None),
(RegexValidator('x', inverse_match=True), 'x', ValidationError),
(RegexValidator(re.compile('x'), inverse_match=True), 'x', ValidationError),
(RegexValidator('x', flags=re.IGNORECASE), 'y', ValidationError),
(RegexValidator('a'), 'A', ValidationError),
(RegexValidator('a', flags=re.IGNORECASE), 'A', None),
)
def create_simple_test_method(validator, expected, value, num):
if expected is not None and issubclass(expected, Exception):
test_mask = 'test_%s_raises_error_%d'
def test_func(self):
# assertRaises not used, so as to be able to produce an error message
# containing the tested value
try:
validator(value)
except expected:
pass
else:
self.fail("%s not raised when validating '%s'" % (
expected.__name__, value))
else:
test_mask = 'test_%s_%d'
def test_func(self):
try:
self.assertEqual(expected, validator(value))
except ValidationError as e:
self.fail("Validation of '%s' failed. Error message was: %s" % (
value, str(e)))
if isinstance(validator, types.FunctionType):
val_name = validator.__name__
else:
val_name = validator.__class__.__name__
test_name = test_mask % (val_name, num)
return test_name, test_func
# Dynamically assemble a test class with the contents of TEST_DATA
class TestSimpleValidators(SimpleTestCase):
def test_single_message(self):
v = ValidationError('Not Valid')
self.assertEqual(str(v), str_prefix("[%(_)s'Not Valid']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'Not Valid'])"))
def test_message_list(self):
v = ValidationError(['First Problem', 'Second Problem'])
self.assertEqual(str(v), str_prefix("[%(_)s'First Problem', %(_)s'Second Problem']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'First Problem', %(_)s'Second Problem'])"))
def test_message_dict(self):
v = ValidationError({'first': ['First Problem']})
self.assertEqual(str(v), str_prefix("{%(_)s'first': [%(_)s'First Problem']}"))
self.assertEqual(repr(v), str_prefix("ValidationError({%(_)s'first': [%(_)s'First Problem']})"))
def test_regex_validator_flags(self):
try:
RegexValidator(re.compile('a'), flags=re.IGNORECASE)
except TypeError:
pass
else:
self.fail("TypeError not raised when flags and pre-compiled regex in RegexValidator")
def test_max_length_validator_message(self):
v = MaxLengthValidator(16, message='"%(value)s" has more than %(limit_value)d characters.')
with self.assertRaisesMessage(ValidationError, '"djangoproject.com" has more than 16 characters.'):
v('djangoproject.com')
test_counter = 0
for validator, value, expected in TEST_DATA:
name, method = create_simple_test_method(validator, expected, value, test_counter)
setattr(TestSimpleValidators, name, method)
test_counter += 1
class TestValidatorEquality(TestCase):
"""
Tests that validators have valid equality operators (#21638)
"""
def test_regex_equality(self):
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[0-9\.\-]*)://'),
)
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator('', flags=re.IGNORECASE),
RegexValidator(''),
)
self.assertNotEqual(
RegexValidator(''),
RegexValidator('', inverse_match=True),
)
def test_regex_equality_nocache(self):
pattern = r'^(?:[a-z0-9\.\-]*)://'
left = RegexValidator(pattern)
re.purge()
right = RegexValidator(pattern)
self.assertEqual(
left,
right,
)
def test_regex_equality_blank(self):
self.assertEqual(
RegexValidator(),
RegexValidator(),
)
def test_email_equality(self):
self.assertEqual(
EmailValidator(),
EmailValidator(),
)
self.assertNotEqual(
EmailValidator(message="BAD EMAIL"),
EmailValidator(),
)
self.assertEqual(
EmailValidator(message="BAD EMAIL", code="bad"),
EmailValidator(message="BAD EMAIL", code="bad"),
)
def test_basic_equality(self):
self.assertEqual(
MaxValueValidator(44),
MaxValueValidator(44),
)
self.assertNotEqual(
MaxValueValidator(44),
MinValueValidator(44),
)
self.assertNotEqual(
MinValueValidator(45),
MinValueValidator(11),
)
| bsd-3-clause | 3,520,677,181,863,817,700 | 39.482094 | 111 | 0.618033 | false |
heli522/scikit-learn | examples/model_selection/plot_roc.py | 96 | 4487 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause | -6,613,291,094,440,805,000 | 34.054688 | 79 | 0.646534 | false |
yinsu/grpc | src/python/grpcio_test/grpc_test/_junkdrawer/math_pb2.py | 47 | 8463 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(nathaniel): Remove this from source control after having made
# generation from the math.proto source part of GRPC's build-and-test
# process.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: math.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='math.proto',
package='math',
serialized_pb=_b('\n\nmath.proto\x12\x04math\",\n\x07\x44ivArgs\x12\x10\n\x08\x64ividend\x18\x01 \x02(\x03\x12\x0f\n\x07\x64ivisor\x18\x02 \x02(\x03\"/\n\x08\x44ivReply\x12\x10\n\x08quotient\x18\x01 \x02(\x03\x12\x11\n\tremainder\x18\x02 \x02(\x03\"\x18\n\x07\x46ibArgs\x12\r\n\x05limit\x18\x01 \x01(\x03\"\x12\n\x03Num\x12\x0b\n\x03num\x18\x01 \x02(\x03\"\x19\n\x08\x46ibReply\x12\r\n\x05\x63ount\x18\x01 \x02(\x03\x32\xa4\x01\n\x04Math\x12&\n\x03\x44iv\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00\x12.\n\x07\x44ivMany\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00(\x01\x30\x01\x12#\n\x03\x46ib\x12\r.math.FibArgs\x1a\t.math.Num\"\x00\x30\x01\x12\x1f\n\x03Sum\x12\t.math.Num\x1a\t.math.Num\"\x00(\x01')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DIVARGS = _descriptor.Descriptor(
name='DivArgs',
full_name='math.DivArgs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dividend', full_name='math.DivArgs.dividend', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='divisor', full_name='math.DivArgs.divisor', index=1,
number=2, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=20,
serialized_end=64,
)
_DIVREPLY = _descriptor.Descriptor(
name='DivReply',
full_name='math.DivReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='quotient', full_name='math.DivReply.quotient', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='remainder', full_name='math.DivReply.remainder', index=1,
number=2, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=66,
serialized_end=113,
)
_FIBARGS = _descriptor.Descriptor(
name='FibArgs',
full_name='math.FibArgs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='limit', full_name='math.FibArgs.limit', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=115,
serialized_end=139,
)
_NUM = _descriptor.Descriptor(
name='Num',
full_name='math.Num',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num', full_name='math.Num.num', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=141,
serialized_end=159,
)
_FIBREPLY = _descriptor.Descriptor(
name='FibReply',
full_name='math.FibReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='count', full_name='math.FibReply.count', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=161,
serialized_end=186,
)
DESCRIPTOR.message_types_by_name['DivArgs'] = _DIVARGS
DESCRIPTOR.message_types_by_name['DivReply'] = _DIVREPLY
DESCRIPTOR.message_types_by_name['FibArgs'] = _FIBARGS
DESCRIPTOR.message_types_by_name['Num'] = _NUM
DESCRIPTOR.message_types_by_name['FibReply'] = _FIBREPLY
DivArgs = _reflection.GeneratedProtocolMessageType('DivArgs', (_message.Message,), dict(
DESCRIPTOR = _DIVARGS,
__module__ = 'math_pb2'
# @@protoc_insertion_point(class_scope:math.DivArgs)
))
_sym_db.RegisterMessage(DivArgs)
DivReply = _reflection.GeneratedProtocolMessageType('DivReply', (_message.Message,), dict(
DESCRIPTOR = _DIVREPLY,
__module__ = 'math_pb2'
# @@protoc_insertion_point(class_scope:math.DivReply)
))
_sym_db.RegisterMessage(DivReply)
FibArgs = _reflection.GeneratedProtocolMessageType('FibArgs', (_message.Message,), dict(
DESCRIPTOR = _FIBARGS,
__module__ = 'math_pb2'
# @@protoc_insertion_point(class_scope:math.FibArgs)
))
_sym_db.RegisterMessage(FibArgs)
Num = _reflection.GeneratedProtocolMessageType('Num', (_message.Message,), dict(
DESCRIPTOR = _NUM,
__module__ = 'math_pb2'
# @@protoc_insertion_point(class_scope:math.Num)
))
_sym_db.RegisterMessage(Num)
FibReply = _reflection.GeneratedProtocolMessageType('FibReply', (_message.Message,), dict(
DESCRIPTOR = _FIBREPLY,
__module__ = 'math_pb2'
# @@protoc_insertion_point(class_scope:math.FibReply)
))
_sym_db.RegisterMessage(FibReply)
# @@protoc_insertion_point(module_scope)
| bsd-3-clause | -5,199,041,853,896,959,000 | 30.815789 | 709 | 0.707551 | false |
Nowheresly/odoo | addons/l10n_at/account_wizard.py | 379 | 1234 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) conexus.at
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import osv
from openerp import addons
class AccountWizard_cd(osv.osv_memory):
_inherit='wizard.multi.charts.accounts'
_defaults = {
'code_digits' : 0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,378,851,601,850,910,700 | 34.257143 | 79 | 0.628039 | false |
blitzagency/flowbee | flowbee/deciders/events.py | 1 | 13786 | """SWF Event Types
Possible Decider Events:
http://boto3.readthedocs.org/en/latest/reference/services/swf.html#SWF.Client.poll_for_decision_task
WorkflowExecutionStarted
WorkflowExecutionCancelRequested
WorkflowExecutionCompleted
CompleteWorkflowExecutionFailed
WorkflowExecutionFailed
FailWorkflowExecutionFailed
WorkflowExecutionTimedOut
WorkflowExecutionCanceled
CancelWorkflowExecutionFailed
WorkflowExecutionContinuedAsNew
ContinueAsNewWorkflowExecutionFailed
WorkflowExecutionTerminated
DecisionTaskScheduled
DecisionTaskStarted
DecisionTaskCompleted
DecisionTaskTimedOut
ActivityTaskScheduled
ScheduleActivityTaskFailed
ActivityTaskStarted
ActivityTaskCompleted
ActivityTaskFailed
ActivityTaskTimedOut
ActivityTaskCanceled
ActivityTaskCancelRequested
RequestCancelActivityTaskFailed
WorkflowExecutionSignaled
MarkerRecorded
RecordMarkerFailed
TimerStarted
StartTimerFailed
TimerFired
TimerCanceled
CancelTimerFailed
StartChildWorkflowExecutionInitiated
StartChildWorkflowExecutionFailed
ChildWorkflowExecutionStarted
ChildWorkflowExecutionCompleted
ChildWorkflowExecutionFailed
ChildWorkflowExecutionTimedOut
ChildWorkflowExecutionCanceled
ChildWorkflowExecutionTerminated
SignalExternalWorkflowExecutionInitiated
SignalExternalWorkflowExecutionFailed
ExternalWorkflowExecutionSignaled
RequestCancelExternalWorkflowExecutionInitiated
RequestCancelExternalWorkflowExecutionFailed
ExternalWorkflowExecutionCancelRequested
LambdaFunctionScheduled
LambdaFunctionStarted
LambdaFunctionCompleted
LambdaFunctionFailed
LambdaFunctionTimedOut
ScheduleLambdaFunctionFailed
StartLambdaFunctionFailed
"""
import logging
from .. import exceptions
from .. import compression
from .. import utils
log = logging.getLogger(__name__)
class DeciderEvent(object):
def __init__(
self, meta, event, event_history):
self.client = utils.get_client()
self.meta = meta
self.type = event["eventType"]
self.event = event
self.event_history = event_history
self.payload = None
self.prepare_event()
def prepare_event(self, event):
raise NotImplementedError()
def deserialize(self, data):
return compression.decompress_b64_json(data)
def serialize(self, data):
return compression.compress_b64_json(data)
class WorkflowExecutionStarted(DeciderEvent):
"""WorkflowExecutionStarted Event
{
u'eventId': 1,
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 20, 7, 17000, tzinfo=tzlocal()),
u'eventType': u'WorkflowExecutionStarted',
u'workflowExecutionStartedEventAttributes': {
u'childPolicy': u'TERMINATE',
u'executionStartToCloseTimeout': u'60',
u'input': u'H4sIADdu91YC/6tWyk0tSVSyUqiu1VFQSkmEsJVApFKSkg6IsFIwrK0FALiLFCcoAAAA',
u'parentInitiatedEventId': 0,
u'taskList': {u'name': u'flowbee-test-tasks'},
u'taskPriority': u'0',
u'taskStartToCloseTimeout': u'10',
u'workflowType': {
u'name': u'MyWorkflow.MyActivities',
u'version': u'0.0.1'}
}
}
"""
def prepare_event(self):
try:
attributes = self.event["workflowExecutionStartedEventAttributes"]
except KeyError as e:
message = "Unable to lookup '{0}' in {1}".format(e.message, self.event)
log.error(message)
raise exceptions.EventException(message=message)
data = attributes.get("input", None)
try:
self.workflow_name = attributes["workflowType"]["name"]
self.workflow_version = attributes["workflowType"]["version"]
except KeyError as e:
message = "Unable to lookup '{0}' in {1}".format(e.message, attributes)
log.error(message)
raise exceptions.EventException(message=message)
if data is not None:
self.payload = self.deserialize(data)
else:
self.payload = None
class ActivityAbstractFailure(DeciderEvent):
def retry(self):
log.info(
"Retrying task '%s@%s'. Retry attempt: %s",
self.task_name, self.task_version, self.num_retries
)
utils.schedule_activity(
client=self.client,
tasklist=self.tasklist,
activity_id=self.activity_id,
task_token=self.meta.task_token,
name=self.task_name,
version=self.task_version,
payload=self.payload,
attempt=self.num_retries
)
def process_history(self, attributes):
try:
scheduled_event_id = attributes["scheduledEventId"]
except KeyError as e:
message = "Unable to lookup '{0}' in {1}".format(e.message, self.event)
log.error(message)
raise exceptions.EventException(message=message)
try:
scheduled_activity_event = [evt for evt in self.event_history if evt["eventId"] == scheduled_event_id][0]
except IndexError:
message = "Unable to find event id '{0}' in event_history".format(scheduled_event_id)
log.error(message)
raise exceptions.EventException(message=message)
try:
activity = scheduled_activity_event["activityTaskScheduledEventAttributes"]
except KeyError as e:
message = "Unable to lookup '{0}' in {1}".format(e.message, scheduled_activity_event)
log.error(message)
raise exceptions.EventException(message=message)
try:
self.activity_id = activity["activityId"].rsplit("-", 1)[0]
self.tasklist = activity["taskList"]["name"]
self.task_name = activity["activityType"]["name"]
self.task_version = activity["activityType"]["version"]
self.payload = activity["input"]
except KeyError as e:
message = "Unable to find key '{0}' in 'activityTaskScheduledEventAttributes'".format(e.message)
log.error(message)
raise exceptions.EventException(message=message)
self.num_retries = sum([
1 for evt in self.event_history
if evt["eventType"] == "ActivityTaskScheduled" and
evt["activityTaskScheduledEventAttributes"]["activityId"].startswith(self.activity_id)
])
class ActivityTaskScheduled(DeciderEvent):
"""ActivityTaskScheduled Event
{
u'activityTaskScheduledEventAttributes': {
u'activityId': u'com.flowbee-test.MyWorkflow.MyWorkflow.MyActivities-eb4d44a2c088452a8de053caf50209f7.23gHXuoeTXnzl8Xts+14bNNscjpxZaCJmit8tr2y2Ofzs=.stage1@0.0.1-0',
u'activityType': {
u'name': u'stage1',
u'version': u'0.0.1'},
u'decisionTaskCompletedEventId': 9,
u'heartbeatTimeout': u'NONE',
u'input': u'H4sIADxu91YC/6tWSixKL1ayUohWyilNrlSK1VFQyi6HilUrpeXng+lEIKmUpKQDIqwUDGtrawHg8m1aOQAAAA==',
u'scheduleToCloseTimeout': u'10',
u'scheduleToStartTimeout': u'10',
u'startToCloseTimeout': u'NONE',
u'taskList': {u'name': u'flowbee-test-tasks'},
u'taskPriority': u'0'},
u'eventId': 10,
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 20, 12, 560000, tzinfo=tzlocal()),
u'eventType': u'ActivityTaskScheduled'
}
"""
def prepare_event(self):
try:
attributes = self.event["activityTaskScheduledEventAttributes"]
except KeyError:
message = "Unable to lookup '{0}' in {1}".format(e.message, self.event)
log.error(message)
raise exceptions.EventException(message=message)
self.tasklist = attributes["taskList"]["name"]
self.priority = attributes["taskPriority"]
self.name = attributes["activityType"]["name"]
self.version = attributes["activityType"]["version"]
self.activity_id = attributes["activityId"]
data = attributes.get("input", None)
if data is not None:
self.payload = self.deserialize(data)
else:
self.payload = None
class ActivityTaskStarted(DeciderEvent):
"""ActivityTaskStarted
{
u'activityTaskStartedEventAttributes': {
u'identity': u'MyWorkflow',
u'scheduledEventId': 10},
u'eventId': 11,
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 20, 12, 599000, tzinfo=tzlocal()),
u'eventType': u'ActivityTaskStarted'
}
"""
def prepare_event(self):
return
class ActivityTaskCompleted(DeciderEvent):
"""ActivityTaskCompleted Event
{
u'eventId': 15,
u'eventType': u'ActivityTaskCompleted',
u'activityTaskCompletedEventAttributes': {
u'startedEventId': 14,
u'scheduledEventId': 13,
u'result': u'H4sIABZt91YC/1MqLilKLE9KLSrKTC1WAgBhRJKGDgAAAA=='},
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 15, 17, 771000, tzinfo=tzlocal())
}
"""
def prepare_event(self):
data = self.event \
.get("activityTaskCompletedEventAttributes", {}) \
.get("result", None)
if data is not None:
self.payload = self.deserialize(data)
else:
self.payload = None
class ActivityTaskTimedOut(ActivityAbstractFailure):
"""ActivityTaskTimedOut Event
{
u'activityTaskTimedOutEventAttributes': {
u'scheduledEventId': 16,
u'startedEventId': 17,
u'timeoutType': u'SCHEDULE_TO_CLOSE'},
u'eventId': 18,
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 29, 57, 609000, tzinfo=tzlocal()),
u'eventType': u'ActivityTaskTimedOut'
}
"""
def prepare_event(self):
self.num_retries = 0
attributes = self.event.get("activityTaskTimedOutEventAttributes")
self.process_history(attributes)
class ActivityTaskFailed(ActivityAbstractFailure):
def prepare_event(self):
self.num_retries = 0
attributes = self.event.get("activityTaskFailedEventAttributes")
self.process_history(attributes)
class ScheduleActivityTaskFailed(DeciderEvent):
def prepare_event(self):
attributes = self.event["scheduleActivityTaskFailed"]
activity_id = attributes.get("activityId", "unknown activity id")
activity_name = attributes.get("activityType", {}).get("name", "unknown name")
activity_version = attributes.get("activityType", {}).get("version", "unknown version")
cause = attributes.get("cause", "unknown")
message = "Failed to schedule activity[%s@%s]: %s - %s" \
.format(cause, activity_name, activity_version, activity_id)
log.error(message)
raise exceptions.EventException(message=message)
class TimerStarted(DeciderEvent):
"""TimerStarted Event
{
u'eventId': 5,
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 20, 7, 363000, tzinfo=tzlocal()),
u'eventType': u'TimerStarted',
u'timerStartedEventAttributes': {
u'decisionTaskCompletedEventId': 4,
u'startToFireTimeout': u'5',
u'timerId': u'com.flowbee-test.MyWorkflow.MyWorkflow.MyActivities-eb4d44a2c088452a8de053caf50209f7.23gHXuoeTXnzl8Xts+14bNNscjpxZaCJmit8tr2y2Ofzs='}
}
"""
def prepare_event(self):
try:
attributes = self.event["timerStartedEventAttributes"]
except KeyError:
message = "Unable to locate 'timerStartedEventAttributes' on {0}".format(self.event)
log.error(message)
raise exceptions.EventException(message=message)
self.timer_id = attributes["timerId"]
self.seconds = int(attributes["startToFireTimeout"])
try:
data = attributes["control"]
except KeyError:
data = None
if data is None:
self.payload = None
else:
self.payload = self.deserialize(data)
class TimerFired(DeciderEvent):
"""TimerFired Event
{
u'eventId': 6,
u'eventTimestamp': datetime.datetime(2016, 3, 26, 22, 20, 12, 367000, tzinfo=tzlocal()),
u'eventType': u'TimerFired',
u'timerFiredEventAttributes': {
u'startedEventId': 5,
u'timerId': u'com.flowbee-test.MyWorkflow.MyWorkflow.MyActivities-eb4d44a2c088452a8de053caf50209f7.23gHXuoeTXnzl8Xts+14bNNscjpxZaCJmit8tr2y2Ofzs='}
}
"""
def prepare_event(self):
timer_id = self.event.get("timerFiredEventAttributes", {}).get("timerId")
self.timer_id = timer_id
if timer_id is None:
message = "Unable to locate 'timerId' on 'timerFiredEventAttributes'"
log.error(message)
raise exceptions.EventException(message=message)
try:
timer_started_event = [
x for x in self.event_history
if x["eventType"] == "TimerStarted" and
x["timerStartedEventAttributes"]["timerId"] == timer_id][0]
except KeyError as e:
message = "Failed to find key in event_history '{0}'".format(e.message)
log.error(message)
raise exceptions.EventException(message=message)
except IndexError as e:
message = "Failed to locate corresponding 'TimerStarted' event with id '{0}'".format(timer_id)
log.error(message)
raise exceptions.EventException(message=message)
data = timer_started_event \
.get("timerStartedEventAttributes", {}) \
.get("control", None)
if data is not None:
self.payload = self.deserialize(data)
else:
self.payload = None
| mit | 6,722,834,459,077,588,000 | 33.123762 | 177 | 0.65066 | false |
afaheem88/tempest_neutron | tempest/api/compute/admin/test_networks.py | 8 | 2055 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
CONF = config.CONF
class NetworksTest(base.BaseComputeAdminTest):
_api_version = 2
"""
Tests Nova Networks API that usually requires admin privileges.
API docs:
http://developer.openstack.org/api-ref-compute-v2-ext.html#ext-os-networks
"""
@classmethod
def resource_setup(cls):
super(NetworksTest, cls).resource_setup()
cls.client = cls.os_adm.networks_client
def test_get_network(self):
resp, networks = self.client.list_networks()
configured_network = [x for x in networks if x['label'] ==
CONF.compute.fixed_network_name]
self.assertEqual(1, len(configured_network),
"{0} networks with label {1}".format(
len(configured_network),
CONF.compute.fixed_network_name))
configured_network = configured_network[0]
_, network = self.client.get_network(configured_network['id'])
self.assertEqual(configured_network['label'], network['label'])
def test_list_all_networks(self):
_, networks = self.client.list_networks()
# Check the configured network is in the list
configured_network = CONF.compute.fixed_network_name
self.assertIn(configured_network, [x['label'] for x in networks])
| apache-2.0 | -1,241,243,410,981,666,800 | 38.519231 | 78 | 0.6618 | false |
Davidjohnwilson/sympy | sympy/functions/special/tests/test_delta_functions.py | 32 | 2856 | from sympy import (
adjoint, conjugate, DiracDelta, Heaviside, nan, pi, sign, sqrt,
symbols, transpose, Symbol, Piecewise, I, S, Eq
)
from sympy.utilities.pytest import raises
from sympy.core.function import ArgumentIndexError
x, y = symbols('x y')
def test_DiracDelta():
assert DiracDelta(1) == 0
assert DiracDelta(5.1) == 0
assert DiracDelta(-pi) == 0
assert DiracDelta(5, 7) == 0
assert DiracDelta(nan) == nan
assert DiracDelta(0).func is DiracDelta
assert DiracDelta(x).func is DiracDelta
assert adjoint(DiracDelta(x)) == DiracDelta(x)
assert adjoint(DiracDelta(x - y)) == DiracDelta(x - y)
assert conjugate(DiracDelta(x)) == DiracDelta(x)
assert conjugate(DiracDelta(x - y)) == DiracDelta(x - y)
assert transpose(DiracDelta(x)) == DiracDelta(x)
assert transpose(DiracDelta(x - y)) == DiracDelta(x - y)
assert DiracDelta(x).diff(x) == DiracDelta(x, 1)
assert DiracDelta(x, 1).diff(x) == DiracDelta(x, 2)
assert DiracDelta(x).is_simple(x) is True
assert DiracDelta(3*x).is_simple(x) is True
assert DiracDelta(x**2).is_simple(x) is False
assert DiracDelta(sqrt(x)).is_simple(x) is False
assert DiracDelta(x).is_simple(y) is False
assert DiracDelta(x*y).simplify(x) == DiracDelta(x)/abs(y)
assert DiracDelta(x*y).simplify(y) == DiracDelta(y)/abs(x)
assert DiracDelta(x**2*y).simplify(x) == DiracDelta(x**2*y)
assert DiracDelta(y).simplify(x) == DiracDelta(y)
assert DiracDelta((x - 1)*(x - 2)*(x - 3)).simplify(x) == \
DiracDelta(x - 3)/2 + DiracDelta(x - 2) + DiracDelta(x - 1)/2
raises(ArgumentIndexError, lambda: DiracDelta(x).fdiff(2))
raises(ValueError, lambda: DiracDelta(x, -1))
def test_heaviside():
assert Heaviside(0).func == Heaviside
assert Heaviside(-5) == 0
assert Heaviside(1) == 1
assert Heaviside(nan) == nan
assert adjoint(Heaviside(x)) == Heaviside(x)
assert adjoint(Heaviside(x - y)) == Heaviside(x - y)
assert conjugate(Heaviside(x)) == Heaviside(x)
assert conjugate(Heaviside(x - y)) == Heaviside(x - y)
assert transpose(Heaviside(x)) == Heaviside(x)
assert transpose(Heaviside(x - y)) == Heaviside(x - y)
assert Heaviside(x).diff(x) == DiracDelta(x)
assert Heaviside(x + I).is_Function is True
assert Heaviside(I*x).is_Function is True
raises(ArgumentIndexError, lambda: Heaviside(x).fdiff(2))
raises(ValueError, lambda: Heaviside(I))
raises(ValueError, lambda: Heaviside(2 + 3*I))
def test_rewrite():
x, y = Symbol('x', real=True), Symbol('y')
assert Heaviside(x).rewrite(Piecewise) == \
Piecewise((1, x > 0), (S(1)/2, Eq(x, 0)), (0, True))
assert Heaviside(y).rewrite(Piecewise) == Heaviside(y)
assert Heaviside(x).rewrite(sign) == (sign(x)+1)/2
assert Heaviside(y).rewrite(sign) == Heaviside(y)
| bsd-3-clause | 8,411,965,287,998,585,000 | 35.615385 | 69 | 0.655812 | false |
zubron/servo | tests/wpt/web-platform-tests/cors/resources/status.py | 220 | 1223 | def main(request, response):
response.headers.set("Access-Control-Allow-Origin", request.headers.get("origin") )
response.headers.set("Access-Control-Expose-Headers", "X-Request-Method")
if request.method == 'OPTIONS':
response.headers.set("Access-Control-Allow-Methods", "GET, CHICKEN, HEAD, POST, PUT")
if 'headers' in request.GET:
response.headers.set("Access-Control-Allow-Headers", request.GET.first('headers'))
response.headers.set("X-Request-Method", request.method)
response.headers.set("X-A-C-Request-Method", request.headers.get("Access-Control-Request-Method", ""));
#This should reasonably work for most response codes.
try:
code = int(request.GET.first("code", 200))
except ValueError:
code = 200
text = request.GET.first("text", "OMG")
if request.method == "OPTIONS" and "preflight" in request.GET:
try:
code = int(request.GET.first('preflight'))
except KeyError, ValueError:
pass
status = code, text
if "type" in request.GET:
response.headers.set("Content-Type", request.GET.first('type'))
body = request.GET.first('content', "")
return status, [], body
| mpl-2.0 | -7,800,282,483,235,899,000 | 32.054054 | 107 | 0.649223 | false |
xodus7/tensorflow | tensorflow/contrib/bigtable/python/ops/bigtable_api.py | 4 | 28480 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Python API for TensorFlow's Cloud Bigtable integration.
TensorFlow has support for reading from and writing to Cloud Bigtable. To use
TensorFlow + Cloud Bigtable integration, first create a BigtableClient to
configure your connection to Cloud Bigtable, and then create a BigtableTable
object to allow you to create numerous `tf.data.Dataset`s to read data, or
write a `tf.data.Dataset` object to the underlying Cloud Bigtable table.
For background on Cloud Bigtable, see: https://cloud.google.com/bigtable .
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import iteritems
from six import string_types
from tensorflow.contrib.bigtable.ops import gen_bigtable_ops
from tensorflow.contrib.data.python.ops import interleave_ops
from tensorflow.contrib.util import loader
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import resource_loader
_bigtable_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_bigtable.so"))
class BigtableClient(object):
"""BigtableClient is the entrypoint for interacting with Cloud Bigtable in TF.
BigtableClient encapsulates a connection to Cloud Bigtable, and exposes the
`table` method to open a Bigtable table.
"""
def __init__(self,
project_id,
instance_id,
connection_pool_size=None,
max_receive_message_size=None):
"""Creates a BigtableClient that can be used to open connections to tables.
Args:
project_id: A string representing the GCP project id to connect to.
instance_id: A string representing the Bigtable instance to connect to.
connection_pool_size: (Optional.) A number representing the number of
concurrent connections to the Cloud Bigtable service to make.
max_receive_message_size: (Optional.) The maximum bytes received in a
single gRPC response.
Raises:
ValueError: if the arguments are invalid (e.g. wrong type, or out of
expected ranges (e.g. negative).)
"""
if not isinstance(project_id, str):
raise ValueError("`project_id` must be a string")
self._project_id = project_id
if not isinstance(instance_id, str):
raise ValueError("`instance_id` must be a string")
self._instance_id = instance_id
if connection_pool_size is None:
connection_pool_size = -1
elif connection_pool_size < 1:
raise ValueError("`connection_pool_size` must be positive")
if max_receive_message_size is None:
max_receive_message_size = -1
elif max_receive_message_size < 1:
raise ValueError("`max_receive_message_size` must be positive")
self._connection_pool_size = connection_pool_size
self._resource = gen_bigtable_ops.bigtable_client(
project_id, instance_id, connection_pool_size, max_receive_message_size)
def table(self, name, snapshot=None):
"""Opens a table and returns a `tf.contrib.bigtable.BigtableTable` object.
Args:
name: A `tf.string` `tf.Tensor` name of the table to open.
snapshot: Either a `tf.string` `tf.Tensor` snapshot id, or `True` to
request the creation of a snapshot. (Note: currently unimplemented.)
Returns:
A `tf.contrib.bigtable.BigtableTable` Python object representing the
operations available on the table.
"""
# TODO(saeta): Implement snapshot functionality.
table = gen_bigtable_ops.bigtable_table(self._resource, name)
return BigtableTable(name, snapshot, table)
class BigtableTable(object):
"""BigtableTable is the entrypoint for reading and writing data in Cloud
Bigtable.
This BigtableTable class is the Python representation of the Cloud Bigtable
table within TensorFlow. Methods on this class allow data to be read from and
written to the Cloud Bigtable service in flexible and high performance
manners.
"""
# TODO(saeta): Investigate implementing tf.contrib.lookup.LookupInterface.
# TODO(saeta): Consider variant tensors instead of resources (while supporting
# connection pooling).
def __init__(self, name, snapshot, resource):
self._name = name
self._snapshot = snapshot
self._resource = resource
def lookup_columns(self, *args, **kwargs):
"""Retrieves the values of columns for a dataset of keys.
Example usage:
```python
table = bigtable_client.table("my_table")
key_dataset = table.get_keys_prefix("imagenet")
images = key_dataset.apply(table.lookup_columns(("cf1", "image"),
("cf2", "label"),
("cf2", "boundingbox")))
training_data = images.map(parse_and_crop, num_parallel_calls=64).batch(128)
```
Alternatively, you can use keyword arguments to specify the columns to
capture. Example (same as above, rewritten):
```python
table = bigtable_client.table("my_table")
key_dataset = table.get_keys_prefix("imagenet")
images = key_dataset.apply(table.lookup_columns(
cf1="image", cf2=("label", "boundingbox")))
training_data = images.map(parse_and_crop, num_parallel_calls=64).batch(128)
```
Note: certain `kwargs` keys are reserved, and thus, some column families
cannot be identified using the `kwargs` syntax. Instead, please use the
`args` syntax. This list includes:
- 'name'
Note: this list can change at any time.
Args:
*args: A list of tuples containing (column family, column name) pairs.
**kwargs: Column families (keys) and column qualifiers (values).
Returns:
A function that can be passed to `tf.data.Dataset.apply` to retrieve the
values of columns for the rows.
"""
table = self # Capture self
normalized = args
if normalized is None:
normalized = []
if isinstance(normalized, tuple):
normalized = list(normalized)
for key, value in iteritems(kwargs):
if key == "name":
continue
if isinstance(value, str):
normalized.append((key, value))
continue
for col in value:
normalized.append((key, col))
def _apply_fn(dataset):
# TODO(saeta): Verify dataset's types are correct!
return _BigtableLookupDataset(dataset, table, normalized)
return _apply_fn
def keys_by_range_dataset(self, start, end):
"""Retrieves all row keys between start and end.
Note: it does NOT retrieve the values of columns.
Args:
start: The start row key. The row keys for rows after start (inclusive)
will be retrieved.
end: (Optional.) The end row key. Rows up to (but not including) end will
be retrieved. If end is None, all subsequent row keys will be retrieved.
Returns:
A `tf.data.Dataset` containing `tf.string` Tensors corresponding to all
of the row keys between `start` and `end`.
"""
# TODO(saeta): Make inclusive / exclusive configurable?
if end is None:
end = ""
return _BigtableRangeKeyDataset(self, start, end)
def keys_by_prefix_dataset(self, prefix):
"""Retrieves the row keys matching a given prefix.
Args:
prefix: All row keys that begin with `prefix` in the table will be
retrieved.
Returns:
A `tf.data.Dataset`. containing `tf.string` Tensors corresponding to all
of the row keys matching that prefix.
"""
return _BigtablePrefixKeyDataset(self, prefix)
def sample_keys(self):
"""Retrieves a sampling of row keys from the Bigtable table.
This dataset is most often used in conjunction with
`tf.contrib.data.parallel_interleave` to construct a set of ranges for
scanning in parallel.
Returns:
A `tf.data.Dataset` returning string row keys.
"""
return _BigtableSampleKeysDataset(self)
def scan_prefix(self, prefix, probability=None, columns=None, **kwargs):
"""Retrieves row (including values) from the Bigtable service.
Rows with row-key prefixed by `prefix` will be retrieved.
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.scan_prefix("row_prefix", columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.scan_prefix("row_prefix", cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
prefix: The prefix all row keys must match to be retrieved for prefix-
based scans.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
return _BigtableScanDataset(self, prefix, "", "", normalized, probability)
def scan_range(self, start, end, probability=None, columns=None, **kwargs):
"""Retrieves rows (including values) from the Bigtable service.
Rows with row-keys between `start` and `end` will be retrieved.
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.scan_range("row_start", "row_end", columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.scan_range("row_start", "row_end", cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
start: The start of the range when scanning by range.
end: (Optional.) The end of the range when scanning by range.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
return _BigtableScanDataset(self, "", start, end, normalized, probability)
def parallel_scan_prefix(self,
prefix,
num_parallel_scans=None,
probability=None,
columns=None,
**kwargs):
"""Retrieves row (including values) from the Bigtable service at high speed.
Rows with row-key prefixed by `prefix` will be retrieved. This method is
similar to `scan_prefix`, but by contrast performs multiple sub-scans in
parallel in order to achieve higher performance.
Note: The dataset produced by this method is not deterministic!
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.parallel_scan_prefix("row_prefix", columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.parallel_scan_prefix("row_prefix", cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
prefix: The prefix all row keys must match to be retrieved for prefix-
based scans.
num_parallel_scans: (Optional.) The number of concurrent scans against the
Cloud Bigtable instance.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
ds = _BigtableSampleKeyPairsDataset(self, prefix, "", "")
return self._make_parallel_scan_dataset(ds, num_parallel_scans, probability,
normalized)
def parallel_scan_range(self,
start,
end,
num_parallel_scans=None,
probability=None,
columns=None,
**kwargs):
"""Retrieves rows (including values) from the Bigtable service.
Rows with row-keys between `start` and `end` will be retrieved. This method
is similar to `scan_range`, but by contrast performs multiple sub-scans in
parallel in order to achieve higher performance.
Note: The dataset produced by this method is not deterministic!
Specifying the columns to retrieve for each row is done by either using
kwargs or in the columns parameter. To retrieve values of the columns "c1",
and "c2" from the column family "cfa", and the value of the column "c3"
from column family "cfb", the following datasets (`ds1`, and `ds2`) are
equivalent:
```
table = # ...
ds1 = table.parallel_scan_range("row_start",
"row_end",
columns=[("cfa", "c1"),
("cfa", "c2"),
("cfb", "c3")])
ds2 = table.parallel_scan_range("row_start", "row_end",
cfa=["c1", "c2"], cfb="c3")
```
Note: only the latest value of a cell will be retrieved.
Args:
start: The start of the range when scanning by range.
end: (Optional.) The end of the range when scanning by range.
num_parallel_scans: (Optional.) The number of concurrent scans against the
Cloud Bigtable instance.
probability: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
A non-1 value indicates to probabilistically sample rows with the
provided probability.
columns: The columns to read. Note: most commonly, they are expressed as
kwargs. Use the columns value if you are using column families that are
reserved. The value of columns and kwargs are merged. Columns is a list
of tuples of strings ("column_family", "column_qualifier").
**kwargs: The column families and columns to read. Keys are treated as
column_families, and values can be either lists of strings, or strings
that are treated as the column qualifier (column name).
Returns:
A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
"""
probability = _normalize_probability(probability)
normalized = _normalize_columns(columns, kwargs)
ds = _BigtableSampleKeyPairsDataset(self, "", start, end)
return self._make_parallel_scan_dataset(ds, num_parallel_scans, probability,
normalized)
def write(self, dataset, column_families, columns, timestamp=None):
"""Writes a dataset to the table.
Args:
dataset: A `tf.data.Dataset` to be written to this table. It must produce
a list of number-of-columns+1 elements, all of which must be strings.
The first value will be used as the row key, and subsequent values will
be used as cell values for the corresponding columns from the
corresponding column_families and columns entries.
column_families: A `tf.Tensor` of `tf.string`s corresponding to the
column names to store the dataset's elements into.
columns: A `tf.Tensor` of `tf.string`s corresponding to the column names
to store the dataset's elements into.
timestamp: (Optional.) An int64 timestamp to write all the values at.
Leave as None to use server-provided timestamps.
Returns:
A `tf.Operation` that can be run to perform the write.
Raises:
ValueError: If there are unexpected or incompatible types, or if the
number of columns and column_families does not match the output of
`dataset`.
"""
if timestamp is None:
timestamp = -1 # Bigtable server provided timestamp.
for tensor_type in nest.flatten(dataset.output_types):
if tensor_type != dtypes.string:
raise ValueError("Not all elements of the dataset were `tf.string`")
for shape in nest.flatten(dataset.output_shapes):
if not shape.is_compatible_with(tensor_shape.scalar()):
raise ValueError("Not all elements of the dataset were scalars")
if len(column_families) != len(columns):
raise ValueError("len(column_families) != len(columns)")
if len(nest.flatten(dataset.output_types)) != len(columns) + 1:
raise ValueError("A column name must be specified for every component of "
"the dataset elements. (e.g.: len(columns) != "
"len(dataset.output_types))")
return gen_bigtable_ops.dataset_to_bigtable(
self._resource,
dataset._as_variant_tensor(), # pylint: disable=protected-access
column_families,
columns,
timestamp)
def _make_parallel_scan_dataset(self, ds, num_parallel_scans,
normalized_probability, normalized_columns):
"""Builds a parallel dataset from a given range.
Args:
ds: A `_BigtableSampleKeyPairsDataset` returning ranges of keys to use.
num_parallel_scans: The number of concurrent parallel scans to use.
normalized_probability: A number between 0 and 1 for the keep probability.
normalized_columns: The column families and column qualifiers to retrieve.
Returns:
A `tf.data.Dataset` representing the result of the parallel scan.
"""
if num_parallel_scans is None:
num_parallel_scans = 50
ds = ds.shuffle(buffer_size=10000) # TODO(saeta): Make configurable.
def _interleave_fn(start, end):
return _BigtableScanDataset(
self,
prefix="",
start=start,
end=end,
normalized=normalized_columns,
probability=normalized_probability)
# Note prefetch_input_elements must be set in order to avoid rpc timeouts.
ds = ds.apply(
interleave_ops.parallel_interleave(
_interleave_fn,
cycle_length=num_parallel_scans,
sloppy=True,
prefetch_input_elements=1))
return ds
def _normalize_probability(probability):
if probability is None:
probability = 1.0
if isinstance(probability, float) and (probability <= 0.0 or
probability > 1.0):
raise ValueError("probability must be in the range (0, 1].")
return probability
def _normalize_columns(columns, provided_kwargs):
"""Converts arguments (columns, and kwargs dict) to C++ representation.
Args:
columns: a datastructure containing the column families and qualifier to
retrieve. Valid types include (1) None, (2) list of tuples, (3) a tuple of
strings.
provided_kwargs: a dictionary containing the column families and qualifiers
to retrieve
Returns:
A list of pairs of column family+qualifier to retrieve.
Raises:
ValueError: If there are no cells to retrieve or the columns are in an
incorrect format.
"""
normalized = columns
if normalized is None:
normalized = []
if isinstance(normalized, tuple):
if len(normalized) == 2:
normalized = [normalized]
else:
raise ValueError("columns was a tuple of inappropriate length")
for key, value in iteritems(provided_kwargs):
if key == "name":
continue
if isinstance(value, string_types):
normalized.append((key, value))
continue
for col in value:
normalized.append((key, col))
if not normalized:
raise ValueError("At least one column + column family must be specified.")
return normalized
class _BigtableKeyDataset(dataset_ops.Dataset):
"""_BigtableKeyDataset is an abstract class representing the keys of a table.
"""
def __init__(self, table):
"""Constructs a _BigtableKeyDataset.
Args:
table: a Bigtable class.
"""
super(_BigtableKeyDataset, self).__init__()
self._table = table
@property
def output_classes(self):
return ops.Tensor
@property
def output_shapes(self):
return tensor_shape.TensorShape([])
@property
def output_types(self):
return dtypes.string
class _BigtablePrefixKeyDataset(_BigtableKeyDataset):
"""_BigtablePrefixKeyDataset represents looking up keys by prefix.
"""
def __init__(self, table, prefix):
super(_BigtablePrefixKeyDataset, self).__init__(table)
self._prefix = prefix
def _as_variant_tensor(self):
return gen_bigtable_ops.bigtable_prefix_key_dataset(
table=self._table._resource, # pylint: disable=protected-access
prefix=self._prefix)
class _BigtableRangeKeyDataset(_BigtableKeyDataset):
"""_BigtableRangeKeyDataset represents looking up keys by range.
"""
def __init__(self, table, start, end):
super(_BigtableRangeKeyDataset, self).__init__(table)
self._start = start
self._end = end
def _as_variant_tensor(self):
return gen_bigtable_ops.bigtable_range_key_dataset(
table=self._table._resource, # pylint: disable=protected-access
start_key=self._start,
end_key=self._end)
class _BigtableSampleKeysDataset(_BigtableKeyDataset):
"""_BigtableSampleKeysDataset represents a sampling of row keys.
"""
# TODO(saeta): Expose the data size offsets into the keys.
def __init__(self, table):
super(_BigtableSampleKeysDataset, self).__init__(table)
def _as_variant_tensor(self):
return gen_bigtable_ops.bigtable_sample_keys_dataset(
table=self._table._resource) # pylint: disable=protected-access
class _BigtableLookupDataset(dataset_ops.Dataset):
"""_BigtableLookupDataset represents a dataset that retrieves values for keys.
"""
def __init__(self, dataset, table, normalized):
self._num_outputs = len(normalized) + 1 # 1 for row key
self._dataset = dataset
self._table = table
self._normalized = normalized
self._column_families = [i[0] for i in normalized]
self._columns = [i[1] for i in normalized]
@property
def output_classes(self):
return tuple([ops.Tensor] * self._num_outputs)
@property
def output_shapes(self):
return tuple([tensor_shape.TensorShape([])] * self._num_outputs)
@property
def output_types(self):
return tuple([dtypes.string] * self._num_outputs)
def _as_variant_tensor(self):
# pylint: disable=protected-access
return gen_bigtable_ops.bigtable_lookup_dataset(
keys_dataset=self._dataset._as_variant_tensor(),
table=self._table._resource,
column_families=self._column_families,
columns=self._columns)
class _BigtableScanDataset(dataset_ops.Dataset):
"""_BigtableScanDataset represents a dataset that retrieves keys and values.
"""
def __init__(self, table, prefix, start, end, normalized, probability):
self._table = table
self._prefix = prefix
self._start = start
self._end = end
self._column_families = [i[0] for i in normalized]
self._columns = [i[1] for i in normalized]
self._probability = probability
self._num_outputs = len(normalized) + 1 # 1 for row key
@property
def output_classes(self):
return tuple([ops.Tensor] * self._num_outputs)
@property
def output_shapes(self):
return tuple([tensor_shape.TensorShape([])] * self._num_outputs)
@property
def output_types(self):
return tuple([dtypes.string] * self._num_outputs)
def _as_variant_tensor(self):
return gen_bigtable_ops.bigtable_scan_dataset(
table=self._table._resource, # pylint: disable=protected-access
prefix=self._prefix,
start_key=self._start,
end_key=self._end,
column_families=self._column_families,
columns=self._columns,
probability=self._probability)
class _BigtableSampleKeyPairsDataset(dataset_ops.Dataset):
"""_BigtableSampleKeyPairsDataset returns key pairs from a Bigtable table.
"""
def __init__(self, table, prefix, start, end):
self._table = table
self._prefix = prefix
self._start = start
self._end = end
@property
def output_classes(self):
return (ops.Tensor, ops.Tensor)
@property
def output_shapes(self):
return (tensor_shape.TensorShape([]), tensor_shape.TensorShape([]))
@property
def output_types(self):
return (dtypes.string, dtypes.string)
def _as_variant_tensor(self):
# pylint: disable=protected-access
return gen_bigtable_ops.bigtable_sample_key_pairs_dataset(
table=self._table._resource,
prefix=self._prefix,
start_key=self._start,
end_key=self._end)
| apache-2.0 | -6,786,309,422,389,752,000 | 37.176944 | 80 | 0.65934 | false |
yanchen036/tensorflow | tensorflow/python/kernel_tests/check_ops_test.py | 2 | 53746 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.check_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.platform import test
class AssertProperIterableTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_single_tensor_raises(self):
tensor = constant_op.constant(1)
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(tensor)
@test_util.run_in_graph_and_eager_modes()
def test_single_sparse_tensor_raises(self):
ten = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(ten)
@test_util.run_in_graph_and_eager_modes()
def test_single_ndarray_raises(self):
array = np.array([1, 2, 3])
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(array)
@test_util.run_in_graph_and_eager_modes()
def test_single_string_raises(self):
mystr = "hello"
with self.assertRaisesRegexp(TypeError, "proper"):
check_ops.assert_proper_iterable(mystr)
@test_util.run_in_graph_and_eager_modes()
def test_non_iterable_object_raises(self):
non_iterable = 1234
with self.assertRaisesRegexp(TypeError, "to be iterable"):
check_ops.assert_proper_iterable(non_iterable)
@test_util.run_in_graph_and_eager_modes()
def test_list_does_not_raise(self):
list_of_stuff = [
constant_op.constant([11, 22]), constant_op.constant([1, 2])
]
check_ops.assert_proper_iterable(list_of_stuff)
@test_util.run_in_graph_and_eager_modes()
def test_generator_does_not_raise(self):
generator_of_stuff = (constant_op.constant([11, 22]), constant_op.constant(
[1, 2]))
check_ops.assert_proper_iterable(generator_of_stuff)
class AssertEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies([check_ops.assert_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_scalar_comparison(self):
const_true = constant_op.constant(True, name="true")
const_false = constant_op.constant(False, name="false")
with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(const_true, const_false, message="fail")
def test_returns_none_with_eager(self):
with context.eager_mode():
small = constant_op.constant([1, 2], name="small")
x = check_ops.assert_equal(small, small)
assert x is None
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_greater(self):
# Static check
static_small = constant_op.constant([1, 2], name="small")
static_big = constant_op.constant([3, 4], name="big")
with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(static_big, static_small, message="fail")
def test_raises_when_greater_dynamic(self):
with self.test_session():
small = array_ops.placeholder(dtypes.int32, name="small")
big = array_ops.placeholder(dtypes.int32, name="big")
with ops.control_dependencies(
[check_ops.assert_equal(big, small, message="fail")]):
out = array_ops.identity(small)
with self.assertRaisesOpError("fail.*big.*small"):
out.eval(feed_dict={small: [1, 2], big: [3, 4]})
def test_error_message_eager(self):
expected_error_msg_full = r"""big does not equal small
Condition x == y did not hold.
Indices of first 3 different values:
\[\[0 0\]
\[1 1\]
\[2 0\]\]
Corresponding x values:
\[2 3 6\]
Corresponding y values:
\[20 30 60\]
First 6 elements of x:
\[2 2 3 3 6 6\]
First 6 elements of y:
\[20 2 3 30 60 6\]
"""
expected_error_msg_default = r"""big does not equal small
Condition x == y did not hold.
Indices of first 3 different values:
\[\[0 0\]
\[1 1\]
\[2 0\]\]
Corresponding x values:
\[2 3 6\]
Corresponding y values:
\[20 30 60\]
First 3 elements of x:
\[2 2 3\]
First 3 elements of y:
\[20 2 3\]
"""
expected_error_msg_short = r"""big does not equal small
Condition x == y did not hold.
Indices of first 2 different values:
\[\[0 0\]
\[1 1\]\]
Corresponding x values:
\[2 3\]
Corresponding y values:
\[20 30\]
First 2 elements of x:
\[2 2\]
First 2 elements of y:
\[20 2\]
"""
with context.eager_mode():
big = constant_op.constant([[2, 2], [3, 3], [6, 6]])
small = constant_op.constant([[20, 2], [3, 30], [60, 6]])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg_full):
check_ops.assert_equal(big, small, message="big does not equal small",
summarize=10)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg_default):
check_ops.assert_equal(big, small, message="big does not equal small")
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg_short):
check_ops.assert_equal(big, small, message="big does not equal small",
summarize=2)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_less(self):
# Static check
static_small = constant_op.constant([3, 1], name="small")
static_big = constant_op.constant([4, 2], name="big")
with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(static_big, static_small, message="fail")
def test_raises_when_less_dynamic(self):
with self.test_session():
small = array_ops.placeholder(dtypes.int32, name="small")
big = array_ops.placeholder(dtypes.int32, name="big")
with ops.control_dependencies([check_ops.assert_equal(small, big)]):
out = array_ops.identity(small)
with self.assertRaisesOpError("small.*big"):
out.eval(feed_dict={small: [3, 1], big: [4, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_equal_and_broadcastable_shapes(self):
small = constant_op.constant([[1, 2], [1, 2]], name="small")
small_2 = constant_op.constant([1, 2], name="small_2")
with ops.control_dependencies([check_ops.assert_equal(small, small_2)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
small_2 = constant_op.constant([1, 1], name="small_2")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[3\] vs. \[2\]|"
r"Dimensions must be equal, but are 3 and 2")):
with ops.control_dependencies([check_ops.assert_equal(small, small_2)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_not_equal_and_broadcastable_shapes(self):
cond = constant_op.constant([True, False], name="small")
with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
check_ops.assert_equal(cond, False, message="fail")
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
class AssertNoneEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_not_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([10, 20], name="small")
with ops.control_dependencies(
[check_ops.assert_none_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_equal(self):
small = constant_op.constant([3, 1], name="small")
with self.assertRaisesOpError("x != y did not hold"):
with ops.control_dependencies(
[check_ops.assert_none_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_not_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3], name="big")
with ops.control_dependencies(
[check_ops.assert_none_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_not_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = constant_op.constant([1, 1, 1], name="small")
big = constant_op.constant([10, 10], name="big")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(ValueError, errors.InvalidArgumentError),
(r"Incompatible shapes: \[3\] vs. \[2\]|"
r"Dimensions must be equal, but are 3 and 2")):
with ops.control_dependencies(
[check_ops.assert_none_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies(
[check_ops.assert_none_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
def test_returns_none_with_eager(self):
with context.eager_mode():
t1 = constant_op.constant([1, 2])
t2 = constant_op.constant([3, 4])
x = check_ops.assert_none_equal(t1, t2)
assert x is None
class AssertAllCloseTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_equal(self):
x = constant_op.constant(1., name="x")
y = constant_op.constant(1., name="y")
with ops.control_dependencies(
[check_ops.assert_near(x, y, message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_close_enough_32_bit_due_to_default_rtol(self):
eps = np.finfo(np.float32).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(1., name="x")
y = constant_op.constant(1. + 2 * eps, name="y", dtype=np.float32)
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_close_enough_32_bit_due_to_default_atol(self):
eps = np.finfo(np.float32).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(0., name="x")
y = constant_op.constant(0. + 2 * eps, name="y", dtype=np.float32)
with ops.control_dependencies(
[check_ops.assert_near(x, y, rtol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_close_enough_64_bit_due_to_default_rtol(self):
eps = np.finfo(np.float64).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(1., name="x", dtype=np.float64)
y = constant_op.constant(1. + 2 * eps, name="y", dtype=np.float64)
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_close_enough_64_bit_due_to_default_atol(self):
eps = np.finfo(np.float64).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(0., name="x", dtype=np.float64)
y = constant_op.constant(0. + 2 * eps, name="y", dtype=np.float64)
with ops.control_dependencies(
[check_ops.assert_near(x, y, rtol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_close_enough_due_to_custom_rtol(self):
x = constant_op.constant(1., name="x")
y = constant_op.constant(1.1, name="y")
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0., rtol=0.5,
message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_close_enough_due_to_custom_atol(self):
x = constant_op.constant(0., name="x")
y = constant_op.constant(0.1, name="y", dtype=np.float32)
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0.5, rtol=0.,
message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_near(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_atol_violated(self):
x = constant_op.constant(10., name="x")
y = constant_op.constant(10.2, name="y")
with self.assertRaisesOpError("x and y not equal to tolerance"):
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0.1,
message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_default_rtol_violated(self):
x = constant_op.constant(0.1, name="x")
y = constant_op.constant(0.0, name="y")
with self.assertRaisesOpError("x and y not equal to tolerance"):
with ops.control_dependencies(
[check_ops.assert_near(x, y, message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
def test_returns_none_with_eager(self):
with context.eager_mode():
t1 = constant_op.constant([1., 2.])
t2 = constant_op.constant([1., 2.])
x = check_ops.assert_near(t1, t2)
assert x is None
class AssertLessTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with self.assertRaisesOpError("failure message.*\n*.* x < y did not hold"):
with ops.control_dependencies(
[check_ops.assert_less(
small, small, message="failure message")]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_greater(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("x < y did not hold"):
with ops.control_dependencies([check_ops.assert_less(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_less(self):
small = constant_op.constant([3, 1], name="small")
big = constant_op.constant([4, 2], name="big")
with ops.control_dependencies([check_ops.assert_less(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_less_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 2], name="big")
with ops.control_dependencies([check_ops.assert_less(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_less_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
big = constant_op.constant([3, 2], name="big")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(ValueError, errors.InvalidArgumentError),
(r"Incompatible shapes: \[3\] vs. \[2\]|"
"Dimensions must be equal, but are 3 and 2")):
with ops.control_dependencies([check_ops.assert_less(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_less(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
def test_returns_none_with_eager(self):
with context.eager_mode():
t1 = constant_op.constant([1, 2])
t2 = constant_op.constant([3, 4])
x = check_ops.assert_less(t1, t2)
assert x is None
class AssertLessEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies(
[check_ops.assert_less_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_greater(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_less_equal(
big, small, message="fail")]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_less_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 2], name="big")
with ops.control_dependencies([check_ops.assert_less_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_less_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 1], name="big")
with ops.control_dependencies([check_ops.assert_less_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([3, 1], name="small")
big = constant_op.constant([1, 1, 1], name="big")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[2\] vs. \[3\]|"
r"Dimensions must be equal, but are 2 and 3")):
with ops.control_dependencies(
[check_ops.assert_less_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies(
[check_ops.assert_less_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
class AssertGreaterTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_greater(
small, small, message="fail")]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_less(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("x > y did not hold"):
with ops.control_dependencies([check_ops.assert_greater(small, big)]):
out = array_ops.identity(big)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_greater(self):
small = constant_op.constant([3, 1], name="small")
big = constant_op.constant([4, 2], name="big")
with ops.control_dependencies([check_ops.assert_greater(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_greater_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 2], name="big")
with ops.control_dependencies([check_ops.assert_greater(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_greater_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
big = constant_op.constant([3, 2], name="big")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[2\] vs. \[3\]|"
r"Dimensions must be equal, but are 2 and 3")):
with ops.control_dependencies([check_ops.assert_greater(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_greater(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
class AssertGreaterEqualTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies(
[check_ops.assert_greater_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_less(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_greater_equal(
small, big, message="fail")]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_greater_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 2], name="big")
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_greater_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 1], name="big")
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="big")
big = constant_op.constant([3, 1], name="small")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegexp(
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[2\] vs. \[3\]|"
r"Dimensions must be equal, but are 2 and 3")):
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies(
[check_ops.assert_greater_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
class AssertNegativeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_negative(self):
frank = constant_op.constant([-1, -2], name="frank")
with ops.control_dependencies([check_ops.assert_negative(frank)]):
out = array_ops.identity(frank)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_positive(self):
doug = constant_op.constant([1, 2], name="doug")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_negative(
doug, message="fail")]):
out = array_ops.identity(doug)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_zero(self):
claire = constant_op.constant([0], name="claire")
with self.assertRaisesOpError("x < 0 did not hold"):
with ops.control_dependencies([check_ops.assert_negative(claire)]):
out = array_ops.identity(claire)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_empty_tensor_doesnt_raise(self):
# A tensor is negative when it satisfies:
# For every element x_i in x, x_i < 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_negative(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
class AssertPositiveTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_negative(self):
freddie = constant_op.constant([-1, -2], name="freddie")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_positive(
freddie, message="fail")]):
out = array_ops.identity(freddie)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_positive(self):
remmy = constant_op.constant([1, 2], name="remmy")
with ops.control_dependencies([check_ops.assert_positive(remmy)]):
out = array_ops.identity(remmy)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_zero(self):
meechum = constant_op.constant([0], name="meechum")
with self.assertRaisesOpError("x > 0 did not hold"):
with ops.control_dependencies([check_ops.assert_positive(meechum)]):
out = array_ops.identity(meechum)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_empty_tensor_doesnt_raise(self):
# A tensor is positive when it satisfies:
# For every element x_i in x, x_i > 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_positive(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
class AssertRankTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError,
"fail.*must have rank 1"):
with ops.control_dependencies(
[check_ops.assert_rank(
tensor, desired_rank, message="fail")]):
self.evaluate(array_ops.identity(tensor))
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank(
tensor, desired_rank, message="fail")]):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_raises_if_rank_too_large_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 0
with self.assertRaisesRegexp(ValueError, "rank"):
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_one_tensor_raises_if_rank_too_large_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "rank"):
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 2
with ops.control_dependencies(
[check_ops.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_raises_if_rank_is_not_scalar_static(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
check_ops.assert_rank(tensor, np.array([], dtype=np.int32))
def test_raises_if_rank_is_not_scalar_dynamic(self):
with self.test_session():
tensor = constant_op.constant(
[1, 2], dtype=dtypes.float32, name="my_tensor")
rank_tensor = array_ops.placeholder(dtypes.int32, name="rank_tensor")
with self.assertRaisesOpError("Rank must be a scalar"):
with ops.control_dependencies(
[check_ops.assert_rank(tensor, rank_tensor)]):
array_ops.identity(tensor).eval(feed_dict={rank_tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_raises_if_rank_is_not_integer_static(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
check_ops.assert_rank(tensor, .5)
def test_raises_if_rank_is_not_integer_dynamic(self):
with self.test_session():
tensor = constant_op.constant(
[1, 2], dtype=dtypes.float32, name="my_tensor")
rank_tensor = array_ops.placeholder(dtypes.float32, name="rank_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
with ops.control_dependencies(
[check_ops.assert_rank(tensor, rank_tensor)]):
array_ops.identity(tensor).eval(feed_dict={rank_tensor: .5})
class AssertRankInTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_rank_zero_tensor_raises_if_rank_mismatch_static_rank(self):
tensor_rank0 = constant_op.constant(42, name="my_tensor")
with self.assertRaisesRegexp(
ValueError, "fail.*must have rank.*in.*1.*2"):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, (1, 2), message="fail")]):
self.evaluate(array_ops.identity(tensor_rank0))
def test_rank_zero_tensor_raises_if_rank_mismatch_dynamic_rank(self):
with self.test_session():
tensor_rank0 = array_ops.placeholder(dtypes.float32, name="my_tensor")
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, (1, 2), message="fail")]):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
array_ops.identity(tensor_rank0).eval(feed_dict={tensor_rank0: 42.0})
@test_util.run_in_graph_and_eager_modes()
def test_rank_zero_tensor_doesnt_raise_if_rank_matches_static_rank(self):
tensor_rank0 = constant_op.constant(42, name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, desired_ranks)]):
self.evaluate(array_ops.identity(tensor_rank0))
def test_rank_zero_tensor_doesnt_raise_if_rank_matches_dynamic_rank(self):
with self.test_session():
tensor_rank0 = array_ops.placeholder(dtypes.float32, name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, desired_ranks)]):
array_ops.identity(tensor_rank0).eval(feed_dict={tensor_rank0: 42.0})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_doesnt_raise_if_rank_matches_static_rank(self):
tensor_rank1 = constant_op.constant([42, 43], name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, desired_ranks)]):
self.evaluate(array_ops.identity(tensor_rank1))
def test_rank_one_tensor_doesnt_raise_if_rank_matches_dynamic_rank(self):
with self.test_session():
tensor_rank1 = array_ops.placeholder(dtypes.float32, name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, desired_ranks)]):
array_ops.identity(tensor_rank1).eval(feed_dict={
tensor_rank1: (42.0, 43.0)
})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_raises_if_rank_mismatches_static_rank(self):
tensor_rank1 = constant_op.constant((42, 43), name="my_tensor")
with self.assertRaisesRegexp(ValueError, "rank"):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, (0, 2))]):
self.evaluate(array_ops.identity(tensor_rank1))
def test_rank_one_tensor_raises_if_rank_mismatches_dynamic_rank(self):
with self.test_session():
tensor_rank1 = array_ops.placeholder(dtypes.float32, name="my_tensor")
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, (0, 2))]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor_rank1).eval(feed_dict={
tensor_rank1: (42.0, 43.0)
})
@test_util.run_in_graph_and_eager_modes()
def test_raises_if_rank_is_not_scalar_static(self):
tensor = constant_op.constant((42, 43), name="my_tensor")
desired_ranks = (
np.array(1, dtype=np.int32),
np.array((2, 1), dtype=np.int32))
with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
check_ops.assert_rank_in(tensor, desired_ranks)
def test_raises_if_rank_is_not_scalar_dynamic(self):
with self.test_session():
tensor = constant_op.constant(
(42, 43), dtype=dtypes.float32, name="my_tensor")
desired_ranks = (
array_ops.placeholder(dtypes.int32, name="rank0_tensor"),
array_ops.placeholder(dtypes.int32, name="rank1_tensor"))
with self.assertRaisesOpError("Rank must be a scalar"):
with ops.control_dependencies(
(check_ops.assert_rank_in(tensor, desired_ranks),)):
array_ops.identity(tensor).eval(feed_dict={
desired_ranks[0]: 1,
desired_ranks[1]: [2, 1],
})
@test_util.run_in_graph_and_eager_modes()
def test_raises_if_rank_is_not_integer_static(self):
tensor = constant_op.constant((42, 43), name="my_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
check_ops.assert_rank_in(tensor, (1, .5,))
def test_raises_if_rank_is_not_integer_dynamic(self):
with self.test_session():
tensor = constant_op.constant(
(42, 43), dtype=dtypes.float32, name="my_tensor")
rank_tensor = array_ops.placeholder(dtypes.float32, name="rank_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
with ops.control_dependencies(
[check_ops.assert_rank_in(tensor, (1, rank_tensor))]):
array_ops.identity(tensor).eval(feed_dict={rank_tensor: .5})
class AssertRankAtLeastTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError, "rank at least 1"):
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant(1, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: 0})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_ten_doesnt_raise_raise_if_rank_too_large_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_one_ten_doesnt_raise_if_rank_too_large_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 0
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 1
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
@test_util.run_in_graph_and_eager_modes()
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
tensor = constant_op.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "rank at least 2"):
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
self.evaluate(array_ops.identity(tensor))
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = array_ops.placeholder(dtypes.float32, name="my_tensor")
desired_rank = 2
with ops.control_dependencies(
[check_ops.assert_rank_at_least(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor).eval(feed_dict={tensor: [1, 2]})
class AssertNonNegativeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_negative(self):
zoe = constant_op.constant([-1, -2], name="zoe")
with self.assertRaisesOpError("x >= 0 did not hold"):
with ops.control_dependencies([check_ops.assert_non_negative(zoe)]):
out = array_ops.identity(zoe)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_zero_and_positive(self):
lucas = constant_op.constant([0, 2], name="lucas")
with ops.control_dependencies([check_ops.assert_non_negative(lucas)]):
out = array_ops.identity(lucas)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-negative when it satisfies:
# For every element x_i in x, x_i >= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_non_negative(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
class AssertNonPositiveTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_zero_and_negative(self):
tom = constant_op.constant([0, -2], name="tom")
with ops.control_dependencies([check_ops.assert_non_positive(tom)]):
out = array_ops.identity(tom)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_positive(self):
rachel = constant_op.constant([0, 2], name="rachel")
with self.assertRaisesOpError("x <= 0 did not hold"):
with ops.control_dependencies([check_ops.assert_non_positive(rachel)]):
out = array_ops.identity(rachel)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-positive when it satisfies:
# For every element x_i in x, x_i <= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_non_positive(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
class AssertIntegerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_integer(self):
integers = constant_op.constant([1, 2], name="integers")
with ops.control_dependencies([check_ops.assert_integer(integers)]):
out = array_ops.identity(integers)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_float(self):
floats = constant_op.constant([1.0, 2.0], name="floats")
with self.assertRaisesRegexp(TypeError, "Expected.*integer"):
check_ops.assert_integer(floats)
class AssertTypeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_doesnt_raise_when_correct_type(self):
integers = constant_op.constant([1, 2], dtype=dtypes.int64)
with ops.control_dependencies([
check_ops.assert_type(integers, dtypes.int64)]):
out = array_ops.identity(integers)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes()
def test_raises_when_wrong_type(self):
floats = constant_op.constant([1.0, 2.0], dtype=dtypes.float16)
with self.assertRaisesRegexp(TypeError, "must be of type.*float32"):
check_ops.assert_type(floats, dtypes.float32)
class IsStrictlyIncreasingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_constant_tensor_is_not_strictly_increasing(self):
self.assertFalse(self.evaluate(check_ops.is_strictly_increasing([1, 1, 1])))
@test_util.run_in_graph_and_eager_modes()
def test_decreasing_tensor_is_not_strictly_increasing(self):
self.assertFalse(self.evaluate(
check_ops.is_strictly_increasing([1, 0, -1])))
@test_util.run_in_graph_and_eager_modes()
def test_2d_decreasing_tensor_is_not_strictly_increasing(self):
self.assertFalse(
self.evaluate(check_ops.is_strictly_increasing([[1, 3], [2, 4]])))
@test_util.run_in_graph_and_eager_modes()
def test_increasing_tensor_is_increasing(self):
self.assertTrue(self.evaluate(check_ops.is_strictly_increasing([1, 2, 3])))
@test_util.run_in_graph_and_eager_modes()
def test_increasing_rank_two_tensor(self):
self.assertTrue(
self.evaluate(check_ops.is_strictly_increasing([[-1, 2], [3, 4]])))
@test_util.run_in_graph_and_eager_modes()
def test_tensor_with_one_element_is_strictly_increasing(self):
self.assertTrue(self.evaluate(check_ops.is_strictly_increasing([1])))
@test_util.run_in_graph_and_eager_modes()
def test_empty_tensor_is_strictly_increasing(self):
self.assertTrue(self.evaluate(check_ops.is_strictly_increasing([])))
class IsNonDecreasingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_constant_tensor_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([1, 1, 1])))
@test_util.run_in_graph_and_eager_modes()
def test_decreasing_tensor_is_not_non_decreasing(self):
self.assertFalse(self.evaluate(check_ops.is_non_decreasing([3, 2, 1])))
@test_util.run_in_graph_and_eager_modes()
def test_2d_decreasing_tensor_is_not_non_decreasing(self):
self.assertFalse(self.evaluate(
check_ops.is_non_decreasing([[1, 3], [2, 4]])))
@test_util.run_in_graph_and_eager_modes()
def test_increasing_rank_one_tensor_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([1, 2, 3])))
@test_util.run_in_graph_and_eager_modes()
def test_increasing_rank_two_tensor(self):
self.assertTrue(self.evaluate(
check_ops.is_non_decreasing([[-1, 2], [3, 3]])))
@test_util.run_in_graph_and_eager_modes()
def test_tensor_with_one_element_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([1])))
@test_util.run_in_graph_and_eager_modes()
def test_empty_tensor_is_non_decreasing(self):
self.assertTrue(self.evaluate(check_ops.is_non_decreasing([])))
class FloatDTypeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_assert_same_float_dtype(self):
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype(None, None))
self.assertIs(dtypes.float32, check_ops.assert_same_float_dtype([], None))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype([], dtypes.float32))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype(None, dtypes.float32))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype([None, None], None))
self.assertIs(
dtypes.float32,
check_ops.assert_same_float_dtype([None, None], dtypes.float32))
const_float = constant_op.constant(3.0, dtype=dtypes.float32)
self.assertIs(
dtypes.float32,
check_ops.assert_same_float_dtype([const_float], dtypes.float32))
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[const_float], dtypes.int32)
sparse_float = sparse_tensor.SparseTensor(
constant_op.constant([[111], [232]], dtypes.int64),
constant_op.constant([23.4, -43.2], dtypes.float32),
constant_op.constant([500], dtypes.int64))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype([sparse_float],
dtypes.float32))
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[sparse_float], dtypes.int32)
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[const_float, None, sparse_float], dtypes.float64)
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype(
[const_float, sparse_float]))
self.assertIs(dtypes.float32,
check_ops.assert_same_float_dtype(
[const_float, sparse_float], dtypes.float32))
const_int = constant_op.constant(3, dtype=dtypes.int32)
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[sparse_float, const_int])
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[sparse_float, const_int], dtypes.int32)
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[sparse_float, const_int], dtypes.float32)
self.assertRaises(ValueError, check_ops.assert_same_float_dtype,
[const_int])
class AssertScalarTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_assert_scalar(self):
check_ops.assert_scalar(constant_op.constant(3))
check_ops.assert_scalar(constant_op.constant("foo"))
check_ops.assert_scalar(3)
check_ops.assert_scalar("foo")
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
check_ops.assert_scalar(constant_op.constant([3, 4]))
if __name__ == "__main__":
test.main()
| apache-2.0 | -4,060,775,781,880,462,000 | 40.343077 | 80 | 0.66418 | false |
andalexo/bgv | bgvDataDisp/guiqt.py | 1 | 8538 | """
The PyQt4 GUI classes for the bgvdata package.
"""
from __future__ import print_function, division
import logging
from os.path import splitext
try:
from PyQt4.QtGui import QMainWindow, QDockWidget
from PyQt4 import QtCore
except ImportError:
from PyQt5.QtWidgets import QMainWindow, QDockWidget
from PyQt5 import QtCore
from pyqttoolbox.auxwidgets import StatusBar, ToolBar, MenuBar, PlayStop
from pyqttoolbox.auxwidgets import TabbedWidgets, TreeFileExplorer
from pyqttoolbox.pgplotting_old import BufferProvider
from pyqttoolbox.pgplotting import ScrollArea, PgDockArea
from pyqttoolbox.threadtools import Worker
from wpap_bgv import BgvDataFileCtrl, TriggerDataCtrl
# CONSTANTS
BGV_LOGDB_DICT = {'BEAM ENERGY': ('HX:ENG',),
'BEAM INTENSITY': ('LHC.BCTDC.A6R4.B1:BEAM_INTENSITY',
'LHC.BCTDC.A6R4.B2:BEAM_INTENSITY'),
'BUNCH INTENSITY': ('LHC.BCTFR.A6R4.B1:BUNCH_INTENSITY',
'LHC.BCTFR.A6R4.B2:BUNCH_INTENSITY'),
'FILLED BUCKETS': ('LHC.BQM.B1:FILLED_BUCKETS',
'LHC.BQM.B2:FILLED_BUCKETS'),
'VACUUM PRESSURE': ('VGI.439.7L4.R.PR', 'VGI.483.7L4.B2.PR',
'VGI.147.7L4.R.PR', 'VGI.147.7L4.B.PR'
'VGI.141.6L4.B.PR', 'VGI.163.6L4.R.PR'),
'RADMON - TID1': ('SIMA.7L4.4LM19S:TID1_INT',),
'RADMON - SEU': ('SIMA.7L4.4LM19S:SEU_COUNTS_INT', ),
'BSRT SIGMA': ('LHC.BSRT.5L4.B2:FIT_SIGMA_H',
'LHC.BSRT.5L4.B2:FIT_SIGMA_V'),
'BGV TEMPS': ('BGVDA.A7L4.B2:NOVASINA_DEWP_MANIFOLD',
'BGVDA.A7L4.B2:NOVASINA_DEWP_TENT',
'BGVDA.A7L4.B2:NOVASINA_TEMP_MANIFOLD',
'BGVDA.A7L4.B2:NOVASINA_TEMP_TENT',
'BGVDA.A7L4.B2:TEMP_CHM_CONE',
'BGVDA.A7L4.B2:TEMP_CHM_WIN_AL',
'BGVDA.A7L4.B2:TEMP_CHM_WIN_ST',
'BGVDA.A7L4.B2:TEMP_DET_FTI')
}
ISO_FMT = '%Y-%m-%d %H:%M:%S'
logger = logging.getLogger(__name__)
class BGVDataGUI(QMainWindow):
"""
Description.
"""
def __init__(self, **kwargs):
super(BGVDataGUI, self).__init__()
self.setWindowTitle('BGV Event Display')
#######################################################################
# Lists for filling widgets
#######################################################################
tab_dict = {} # For the tab widget {title: widget}
tb_widgets_list = [] # For the Toolbar
tb_actions_list = []
#######################################################################
# Basic Widgets - Status Bar, Menu Bar, Scroll Area, File Explorer
#######################################################################
self.status = StatusBar(reset_str='Ready')
menu = MenuBar()
self.toolbar = ToolBar()
self.sa = ScrollArea(drops=False)
self.da = PgDockArea()
self.sa.setWidget(self.da)
self.tfe = TreeFileExplorer()
# self.tfe.setMinimumWidth(500)
self.tfe.openFile.connect(self.data_file_open)
self.d_tree = QDockWidget('File Explorer')
self.d_tree.setWidget(self.tfe)
values = [v*0.5 for v in range(5)]
self.playstop = PlayStop(delay=True, values=values)
# TODO: check where to connect the signals
tb_widgets_list.append(self.playstop)
tb_widgets_list.append('s')
#######################################################################
# Providers & Algorithms
#######################################################################
# Setting the providers to the event box
self.bgv_ctrl = BgvDataFileCtrl(dock_area=self.da,
flow_ctrl=self.playstop,
tfe=self.tfe)
tab_dict['Evnt Ctrl'] = self.bgv_ctrl
self.trg_ctrl = TriggerDataCtrl(dock_area=self.da)
tab_dict['Trg Ctrl'] = self.trg_ctrl
# Setting the Logging Database
self.logdb_wdt = None
try:
from wpap_logdb import LogDBWidget
self.logdb_wdt = LogDBWidget(def_vars=BGV_LOGDB_DICT,
editable=True, dock_area=self.da)
self.d_logdb_wdt = QDockWidget('LogDB')
self.d_logdb_wdt.setWidget(self.logdb_wdt)
self.logdb_wdt.clicked_add() # just init it with a dummy var
except (ImportError, TypeError) as e:
logger.warning('LogDB controller could not be loaded.\n\t%s' % e)
self.logdb_wdt = None
#######################################################################
# Setting the tabs
#######################################################################
self.tabs = TabbedWidgets(tab_dict.values(),
tab_titles=tab_dict.keys())
self.d_tabs = QDockWidget('Control Tabs')
self.d_tabs.setWidget(self.tabs)
# Setting the ToolBar
tb_actions_list.append(self.d_tabs.toggleViewAction())
tb_actions_list.append(self.d_tree.toggleViewAction())
if self.logdb_wdt is not None:
tb_actions_list.append(self.d_logdb_wdt.toggleViewAction())
self.toolbar = ToolBar(actions=tb_actions_list,
widgets=tb_widgets_list, seq='wa')
# Layout
self.setStatusBar(self.status)
self.setMenuBar(menu)
self.addToolBar(QtCore.Qt.TopToolBarArea, self.toolbar)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.d_tree)
self.addDockWidget(QtCore.Qt.TopDockWidgetArea, self.d_tabs)
if self.logdb_wdt is not None:
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.d_logdb_wdt)
self.d_logdb_wdt.close() # keep it closed at init
self.setCentralWidget(self.sa)
##################################################
# TreeFileExplorer - Open File #
##################################################
def data_file_open(self):
ext = splitext(self.tfe.path)[1]
logger.debug('data_file_open: %s / ext:%s' % (self.tfe.path, ext))
self.status.showMessageDelay('File set: %s' % self.tfe.path)
if ext == '.csv':
thread = Worker(target=self.algo_seq_csv)
thread.start()
elif ext == '.h5':
thread = Worker(target=self.bgv_ctrl.load_algo)
thread.start()
elif ext == '.tdat':
logger.debug('Trigger data file to open')
thread = Worker(target=self.trg_ctrl.trg_retriever.execute,
args=(self.tfe.path,))
thread.start()
elif ext == '.bdat':
logger.debug('Bunch data file to open')
thread = Worker(target=self.trg_ctrl.bch_retriever.execute,
args=(self.tfe.path,))
thread.start()
elif ext == '.rdat':
logger.debug('RadMon file to open')
elif ext == '.mdf':
logger.debug('MDF file to open')
else:
logger.warning('Unrecognized file extension: [%s]' % ext)
##################################################
# Checking prerequisites #
##################################################
# TODO: To be removed - this better to be inside the providers class.
# Give prerequisites as keyword, then when enabled, enable them too.
def data_prov_change(self, label, index, state):
logger.debug('data_prov_change: lbl:%s, indx:%d, state:%s'
% (label, index, state))
label = str(label)
if state:
bp = BufferProvider(self.da, title=label)
self.data_providers[label] = bp
self.data_providers[label].setChecked(state)
else:
try:
self.data_providers[label].setChecked(state)
del self.data_providers[label]
except KeyError:
logger.warning('KeyError should not happen')
logger.debug('self.data_providers: %s' % self.data_providers)
| mit | 2,357,605,885,022,094,300 | 42.340102 | 79 | 0.501757 | false |
huongttlan/statsmodels | statsmodels/stats/tests/test_inter_rater.py | 34 | 11513 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 09:18:14 2012
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from statsmodels.stats.inter_rater import (fleiss_kappa, cohens_kappa,
to_table, aggregate_raters)
class Holder(object):
pass
table0 = np.asarray('''\
1 0 0 0 0 14 1.000
2 0 2 6 4 2 0.253
3 0 0 3 5 6 0.308
4 0 3 9 2 0 0.440
5 2 2 8 1 1 0.330
6 7 7 0 0 0 0.462
7 3 2 6 3 0 0.242
8 2 5 3 2 2 0.176
9 6 5 2 1 0 0.286
10 0 2 2 3 7 0.286'''.split(), float).reshape(10,-1)
table1 = table0[:, 1:-1]
table10 = [[0, 4, 1],
[0, 8, 0],
[0, 1, 5]]
#Fleiss 1971, Fleiss has only the transformed table
diagnoses = np.array( [[4, 4, 4, 4, 4, 4],
[2, 2, 2, 5, 5, 5],
[2, 3, 3, 3, 3, 5],
[5, 5, 5, 5, 5, 5],
[2, 2, 2, 4, 4, 4],
[1, 1, 3, 3, 3, 3],
[3, 3, 3, 3, 5, 5],
[1, 1, 3, 3, 3, 4],
[1, 1, 4, 4, 4, 4],
[5, 5, 5, 5, 5, 5],
[1, 4, 4, 4, 4, 4],
[1, 2, 4, 4, 4, 4],
[2, 2, 2, 3, 3, 3],
[1, 4, 4, 4, 4, 4],
[2, 2, 4, 4, 4, 5],
[3, 3, 3, 3, 3, 5],
[1, 1, 1, 4, 5, 5],
[1, 1, 1, 1, 1, 2],
[2, 2, 4, 4, 4, 4],
[1, 3, 3, 5, 5, 5],
[5, 5, 5, 5, 5, 5],
[2, 4, 4, 4, 4, 4],
[2, 2, 4, 5, 5, 5],
[1, 1, 4, 4, 4, 4],
[1, 4, 4, 4, 4, 5],
[2, 2, 2, 2, 2, 4],
[1, 1, 1, 1, 5, 5],
[2, 2, 4, 4, 4, 4],
[1, 3, 3, 3, 3, 3],
[5, 5, 5, 5, 5, 5]])
diagnoses_rownames = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', ]
diagnoses_colnames = ['rater1', 'rater2', 'rater3', 'rater4', 'rater5', 'rater6', ]
def test_fleiss_kappa():
#currently only example from Wikipedia page
kappa_wp = 0.210
assert_almost_equal(fleiss_kappa(table1), kappa_wp, decimal=3)
class CheckCohens(object):
def test_results(self):
res = self.res
res2 = self.res2
res_ = [res.kappa, res.std_kappa, res.kappa_low, res.kappa_upp, res.std_kappa0,
res.z_value, res.pvalue_one_sided, res.pvalue_two_sided]
assert_almost_equal(res_, res2, decimal=4)
assert_equal(str(res), self.res_string)
class UnweightedCohens(CheckCohens):
#comparison to printout of a SAS example
def __init__(self):
#temporary: res instance is at last position
self.res = cohens_kappa(table10)
res10_sas = [0.4842, 0.1380, 0.2137, 0.7547]
res10_sash0 = [0.1484, 3.2626, 0.0006, 0.0011] #for test H0:kappa=0
self.res2 = res10_sas + res10_sash0 #concatenate
self.res_string = '''\
Simple Kappa Coefficient
--------------------------------
Kappa 0.4842
ASE 0.1380
95% Lower Conf Limit 0.2137
95% Upper Conf Limit 0.7547
Test of H0: Simple Kappa = 0
ASE under H0 0.1484
Z 3.2626
One-sided Pr > Z 0.0006
Two-sided Pr > |Z| 0.0011''' + '\n'
def test_option(self):
kappa = cohens_kappa(table10, return_results=False)
assert_almost_equal(kappa, self.res2[0], decimal=4)
class TestWeightedCohens(CheckCohens):
#comparison to printout of a SAS example
def __init__(self):
#temporary: res instance is at last position
self.res = cohens_kappa(table10, weights=[0, 1, 2])
res10w_sas = [0.4701, 0.1457, 0.1845, 0.7558]
res10w_sash0 = [0.1426, 3.2971, 0.0005, 0.0010] #for test H0:kappa=0
self.res2 = res10w_sas + res10w_sash0 #concatenate
self.res_string = '''\
Weighted Kappa Coefficient
--------------------------------
Kappa 0.4701
ASE 0.1457
95% Lower Conf Limit 0.1845
95% Upper Conf Limit 0.7558
Test of H0: Weighted Kappa = 0
ASE under H0 0.1426
Z 3.2971
One-sided Pr > Z 0.0005
Two-sided Pr > |Z| 0.0010''' + '\n'
def test_option(self):
kappa = cohens_kappa(table10, weights=[0, 1, 2], return_results=False)
assert_almost_equal(kappa, self.res2[0], decimal=4)
def test_cohenskappa_weights():
#some tests for equivalent results with different options
np.random.seed(9743678)
table = np.random.randint(0, 10, size=(5,5)) + 5*np.eye(5)
#example aggregation, 2 groups of levels
mat = np.array([[1,1,1, 0,0],[0,0,0,1,1]])
table_agg = np.dot(np.dot(mat, table), mat.T)
res1 = cohens_kappa(table, weights=np.arange(5) > 2, wt='linear')
res2 = cohens_kappa(table_agg, weights=np.arange(2), wt='linear')
assert_almost_equal(res1.kappa, res2.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res2.var_kappa, decimal=14)
#equivalence toeplitz with linear for special cases
res1 = cohens_kappa(table, weights=2*np.arange(5), wt='linear')
res2 = cohens_kappa(table, weights=2*np.arange(5), wt='toeplitz')
res3 = cohens_kappa(table, weights=res1.weights[0], wt='toeplitz')
#2-Dim weights
res4 = cohens_kappa(table, weights=res1.weights)
assert_almost_equal(res1.kappa, res2.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res2.var_kappa, decimal=14)
assert_almost_equal(res1.kappa, res3.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res3.var_kappa, decimal=14)
assert_almost_equal(res1.kappa, res4.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res4.var_kappa, decimal=14)
#equivalence toeplitz with quadratic for special cases
res1 = cohens_kappa(table, weights=5*np.arange(5)**2, wt='toeplitz')
res2 = cohens_kappa(table, weights=5*np.arange(5), wt='quadratic')
assert_almost_equal(res1.kappa, res2.kappa, decimal=14)
assert_almost_equal(res1.var_kappa, res2.var_kappa, decimal=14)
anxiety = np.array([
3, 3, 3, 4, 5, 5, 2, 3, 5, 2, 2, 6, 1, 5, 2, 2, 1, 2, 4, 3, 3, 6, 4,
6, 2, 4, 2, 4, 3, 3, 2, 3, 3, 3, 2, 2, 1, 3, 3, 4, 2, 1, 4, 4, 3, 2,
1, 6, 1, 1, 1, 2, 3, 3, 1, 1, 3, 3, 2, 2
]).reshape(20,3, order='F')
anxiety_rownames = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', ]
anxiety_colnames = ['rater1', 'rater2', 'rater3', ]
def test_cohens_kappa_irr():
ck_w3 = Holder()
ck_w4 = Holder()
#>r = kappa2(anxiety[,1:2], c(0,0,0,1,1,1))
#> cat_items(r, pref="ck_w3.")
ck_w3.method = "Cohen's Kappa for 2 Raters (Weights: 0,0,0,1,1,1)"
ck_w3.irr_name = 'Kappa'
ck_w3.value = 0.1891892
ck_w3.stat_name = 'z'
ck_w3.statistic = 0.5079002
ck_w3.p_value = 0.6115233
#> r = kappa2(anxiety[,1:2], c(0,0,1,1,2,2))
#> cat_items(r, pref="ck_w4.")
ck_w4.method = "Cohen's Kappa for 2 Raters (Weights: 0,0,1,1,2,2)"
ck_w4.irr_name = 'Kappa'
ck_w4.value = 0.2820513
ck_w4.stat_name = 'z'
ck_w4.statistic = 1.257410
ck_w4.p_value = 0.2086053
ck_w1 = Holder()
ck_w2 = Holder()
ck_w3 = Holder()
ck_w4 = Holder()
#> r = kappa2(anxiety[,2:3])
#> cat_items(r, pref="ck_w1.")
ck_w1.method = "Cohen's Kappa for 2 Raters (Weights: unweighted)"
ck_w1.irr_name = 'Kappa'
ck_w1.value = -0.006289308
ck_w1.stat_name = 'z'
ck_w1.statistic = -0.0604067
ck_w1.p_value = 0.9518317
#> r = kappa2(anxiety[,2:3], "equal")
#> cat_items(r, pref="ck_w2.")
ck_w2.method = "Cohen's Kappa for 2 Raters (Weights: equal)"
ck_w2.irr_name = 'Kappa'
ck_w2.value = 0.1459075
ck_w2.stat_name = 'z'
ck_w2.statistic = 1.282472
ck_w2.p_value = 0.1996772
#> r = kappa2(anxiety[,2:3], "squared")
#> cat_items(r, pref="ck_w3.")
ck_w3.method = "Cohen's Kappa for 2 Raters (Weights: squared)"
ck_w3.irr_name = 'Kappa'
ck_w3.value = 0.2520325
ck_w3.stat_name = 'z'
ck_w3.statistic = 1.437451
ck_w3.p_value = 0.1505898
#> r = kappa2(anxiety[,2:3], c(0,0,1,1,2))
#> cat_items(r, pref="ck_w4.")
ck_w4.method = "Cohen's Kappa for 2 Raters (Weights: 0,0,1,1,2)"
ck_w4.irr_name = 'Kappa'
ck_w4.value = 0.2391304
ck_w4.stat_name = 'z'
ck_w4.statistic = 1.223734
ck_w4.p_value = 0.2210526
all_cases = [(ck_w1, None, None),
(ck_w2, None, 'linear'),
(ck_w2, np.arange(5), None),
(ck_w2, np.arange(5), 'toeplitz'),
(ck_w3, None, 'quadratic'),
(ck_w3, np.arange(5)**2, 'toeplitz'),
(ck_w3, 4*np.arange(5)**2, 'toeplitz'),
(ck_w4, [0,0,1,1,2], 'toeplitz')]
#Note R:irr drops the missing category level 4 and uses the reduced matrix
r = np.histogramdd(anxiety[:,1:], ([1, 2, 3, 4, 6, 7], [1, 2, 3, 4, 6, 7]))
for res2, w, wt in all_cases:
msg = repr(w) + repr(wt)
res1 = cohens_kappa(r[0], weights=w, wt=wt)
assert_almost_equal(res1.kappa, res2.value, decimal=6, err_msg=msg)
assert_almost_equal(res1.z_value, res2.statistic, decimal=5, err_msg=msg)
assert_almost_equal(res1.pvalue_two_sided, res2.p_value, decimal=6, err_msg=msg)
def test_fleiss_kappa_irr():
fleiss = Holder()
#> r = kappam.fleiss(diagnoses)
#> cat_items(r, pref="fleiss.")
fleiss.method = "Fleiss' Kappa for m Raters"
fleiss.irr_name = 'Kappa'
fleiss.value = 0.4302445
fleiss.stat_name = 'z'
fleiss.statistic = 17.65183
fleiss.p_value = 0
data_ = aggregate_raters(diagnoses)[0]
res1_kappa = fleiss_kappa(data_)
assert_almost_equal(res1_kappa, fleiss.value, decimal=7)
def test_to_table():
data = diagnoses
res1 = to_table(data[:,:2]-1, 5)
res0 = np.asarray([[(data[:,:2]-1 == [i,j]).all(1).sum()
for j in range(5)]
for i in range(5)] )
assert_equal(res1[0], res0)
res2 = to_table(data[:,:2])
assert_equal(res2[0], res0)
bins = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5]
res3 = to_table(data[:,:2], bins)
assert_equal(res3[0], res0)
#more than 2 columns
res4 = to_table(data[:,:3]-1, bins=[-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
res5 = to_table(data[:,:3]-1, bins=5)
assert_equal(res4[0].sum(-1), res0)
assert_equal(res5[0].sum(-1), res0)
def test_aggregate_raters():
data = diagnoses
resf = aggregate_raters(data)
colsum = np.array([26, 26, 30, 55, 43])
assert_equal(resf[0].sum(0), colsum)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x'#, '--pdb-failures'
], exit=False)
| bsd-3-clause | -4,133,153,892,323,069,400 | 34.643963 | 194 | 0.505429 | false |
t0in4/django | tests/admin_checks/models.py | 281 | 1836 | """
Tests of ModelAdmin system checks logic.
"""
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Album(models.Model):
title = models.CharField(max_length=150)
@python_2_unicode_compatible
class Song(models.Model):
title = models.CharField(max_length=150)
album = models.ForeignKey(Album, models.CASCADE)
original_release = models.DateField(editable=False)
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
def readonly_method_on_model(self):
# does nothing
pass
class TwoAlbumFKAndAnE(models.Model):
album1 = models.ForeignKey(Album, models.CASCADE, related_name="album1_set")
album2 = models.ForeignKey(Album, models.CASCADE, related_name="album2_set")
e = models.CharField(max_length=1)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
subtitle = models.CharField(max_length=100)
price = models.FloatField()
authors = models.ManyToManyField(Author, through='AuthorsBooks')
class AuthorsBooks(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
book = models.ForeignKey(Book, models.CASCADE)
featured = models.BooleanField()
class State(models.Model):
name = models.CharField(max_length=15)
class City(models.Model):
state = models.ForeignKey(State, models.CASCADE)
class Influence(models.Model):
name = models.TextField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
| bsd-3-clause | -1,933,984,206,519,987,700 | 26 | 80 | 0.726035 | false |
meziti/bigbullions-test | p2pool/bitcoin/worker_interface.py | 230 | 5901 | from __future__ import division
import StringIO
import json
import random
import sys
from twisted.internet import defer
import p2pool
from p2pool.bitcoin import data as bitcoin_data, getwork
from p2pool.util import expiring_dict, jsonrpc, pack, variable
class _Provider(object):
def __init__(self, parent, long_poll):
self.parent = parent
self.long_poll = long_poll
def rpc_getwork(self, request, data=None):
return self.parent._getwork(request, data, long_poll=self.long_poll)
class _GETableServer(jsonrpc.HTTPServer):
def __init__(self, provider, render_get_func):
jsonrpc.HTTPServer.__init__(self, provider)
self.render_GET = render_get_func
class WorkerBridge(object):
def __init__(self):
self.new_work_event = variable.Event()
def preprocess_request(self, request):
return request, # *args to self.compute
def get_work(self, request):
raise NotImplementedError()
class WorkerInterface(object):
def __init__(self, worker_bridge):
self.worker_bridge = worker_bridge
self.worker_views = {}
self.merkle_root_to_handler = expiring_dict.ExpiringDict(300)
def attach_to(self, res, get_handler=None):
res.putChild('', _GETableServer(_Provider(self, long_poll=False), get_handler))
def repost(request):
request.content = StringIO.StringIO(json.dumps(dict(id=0, method='getwork')))
return s.render_POST(request)
s = _GETableServer(_Provider(self, long_poll=True), repost)
res.putChild('long-polling', s)
@defer.inlineCallbacks
def _getwork(self, request, data, long_poll):
request.setHeader('X-Long-Polling', '/long-polling')
request.setHeader('X-Roll-NTime', 'expire=100')
request.setHeader('X-Is-P2Pool', 'true')
if request.getHeader('Host') is not None:
request.setHeader('X-Stratum', 'stratum+tcp://' + request.getHeader('Host'))
if data is not None:
header = getwork.decode_data(data)
if header['merkle_root'] not in self.merkle_root_to_handler:
print >>sys.stderr, '''Couldn't link returned work's merkle root with its handler. This should only happen if this process was recently restarted!'''
defer.returnValue(False)
defer.returnValue(self.merkle_root_to_handler[header['merkle_root']](header, request.getUser() if request.getUser() is not None else '', '\0'*self.worker_bridge.COINBASE_NONCE_LENGTH))
if p2pool.DEBUG:
id = random.randrange(1000, 10000)
print 'POLL %i START is_long_poll=%r user_agent=%r user=%r' % (id, long_poll, request.getHeader('User-Agent'), request.getUser())
if long_poll:
request_id = request.getClientIP(), request.getHeader('Authorization')
if self.worker_views.get(request_id, self.worker_bridge.new_work_event.times) != self.worker_bridge.new_work_event.times:
if p2pool.DEBUG:
print 'POLL %i PUSH' % (id,)
else:
if p2pool.DEBUG:
print 'POLL %i WAITING' % (id,)
yield self.worker_bridge.new_work_event.get_deferred()
self.worker_views[request_id] = self.worker_bridge.new_work_event.times
x, handler = self.worker_bridge.get_work(*self.worker_bridge.preprocess_request(request.getUser() if request.getUser() is not None else ''))
res = getwork.BlockAttempt(
version=x['version'],
previous_block=x['previous_block'],
merkle_root=bitcoin_data.check_merkle_link(bitcoin_data.hash256(x['coinb1'] + '\0'*self.worker_bridge.COINBASE_NONCE_LENGTH + x['coinb2']), x['merkle_link']),
timestamp=x['timestamp'],
bits=x['bits'],
share_target=x['share_target'],
)
assert res.merkle_root not in self.merkle_root_to_handler
self.merkle_root_to_handler[res.merkle_root] = handler
if p2pool.DEBUG:
print 'POLL %i END identifier=%i' % (id, self.worker_bridge.new_work_event.times)
extra_params = {}
if request.getHeader('User-Agent') == 'Jephis PIC Miner':
# ASICMINER BE Blades apparently have a buffer overflow bug and
# can't handle much extra in the getwork response
extra_params = {}
else:
extra_params = dict(identifier=str(self.worker_bridge.new_work_event.times), submitold=True)
defer.returnValue(res.getwork(**extra_params))
class CachingWorkerBridge(object):
def __init__(self, inner):
self._inner = inner
self.net = self._inner.net
self.COINBASE_NONCE_LENGTH = (inner.COINBASE_NONCE_LENGTH+1)//2
self.new_work_event = inner.new_work_event
self.preprocess_request = inner.preprocess_request
self._my_bits = (self._inner.COINBASE_NONCE_LENGTH - self.COINBASE_NONCE_LENGTH)*8
self._cache = {}
self._times = None
def get_work(self, *args):
if self._times != self.new_work_event.times:
self._cache = {}
self._times = self.new_work_event.times
if args not in self._cache:
x, handler = self._inner.get_work(*args)
self._cache[args] = x, handler, 0
x, handler, nonce = self._cache.pop(args)
res = (
dict(x, coinb1=x['coinb1'] + pack.IntType(self._my_bits).pack(nonce)),
lambda header, user, coinbase_nonce: handler(header, user, pack.IntType(self._my_bits).pack(nonce) + coinbase_nonce),
)
if nonce + 1 != 2**self._my_bits:
self._cache[args] = x, handler, nonce + 1
return res
| gpl-3.0 | -7,274,254,705,382,987,000 | 40.556338 | 196 | 0.604813 | false |
polynomial/nixops | nixops/resources/gce_image.py | 3 | 3281 | # -*- coding: utf-8 -*-
# Automatic provisioning of GCE Images.
import os
import libcloud.common.google
from nixops.util import attr_property
from nixops.gce_common import ResourceDefinition, ResourceState
class GCEImageDefinition(ResourceDefinition):
"""Definition of a GCE Image"""
@classmethod
def get_type(cls):
return "gce-image"
@classmethod
def get_resource_type(cls):
return "gceImages"
def __init__(self, xml):
ResourceDefinition.__init__(self, xml)
self.image_name = self.get_option_value(xml, 'name', str)
self.copy_option(xml, 'sourceUri', str)
self.copy_option(xml, 'description', str, optional = True)
def show_type(self):
return self.get_type()
class GCEImageState(ResourceState):
"""State of a GCE Image"""
image_name = attr_property("gce.name", None)
source_uri = attr_property("gce.sourceUri", None)
description = attr_property("gce.description", None)
@classmethod
def get_type(cls):
return "gce-image"
def __init__(self, depl, name, id):
ResourceState.__init__(self, depl, name, id)
def show_type(self):
return super(GCEImageState, self).show_type()
@property
def resource_id(self):
return self.image_name
nix_name = "gceImages"
@property
def full_name(self):
return "GCE image '{0}'".format(self.image_name)
def image(self):
img = self.connect().ex_get_image(self.image_name)
if img:
img.destroy = img.delete
return img
defn_properties = [ 'description', 'source_uri' ]
def create(self, defn, check, allow_reboot, allow_recreate):
self.no_property_change(defn, 'source_uri')
self.no_property_change(defn, 'description')
self.no_project_change(defn)
self.copy_credentials(defn)
self.image_name = defn.image_name
if check:
image = self.image()
if image:
if self.state == self.UP:
self.handle_changed_property('description', image.extra['description'], can_fix = False)
else:
self.warn_not_supposed_to_exist(valuable_data = True)
self.confirm_destroy(image, self.full_name)
else:
self.warn_missing_resource()
if self.state != self.UP:
self.log("creating {0}...".format(self.full_name))
try:
image = self.connect().ex_copy_image(defn.image_name, defn.source_uri,
description = defn.description)
except libcloud.common.google.ResourceExistsError:
raise Exception("tried creating an image that already exists; "
"please run 'deploy --check' to fix this")
self.state = self.UP
self.copy_properties(defn)
def destroy(self, wipe=False):
if self.state == self.UP:
image = self.image()
if image:
return self.confirm_destroy(image, self.full_name, abort = False)
else:
self.warn("tried to destroy {0} which didn't exist".format(self.full_name))
return True
| lgpl-3.0 | -2,458,562,005,144,659,500 | 29.95283 | 108 | 0.583359 | false |
sergio-incaser/odoo | openerp/service/__init__.py | 380 | 1613 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import common
import db
import model
import report
import wsgi_server
import server
#.apidoc title: RPC Services
""" Classes of this module implement the network protocols that the
OpenERP server uses to communicate with remote clients.
Some classes are mostly utilities, whose API need not be visible to
the average user/developer. Study them only if you are about to
implement an extension to the network protocols, or need to debug some
low-level behavior of the wire.
"""
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,975,492,726,096,804,000 | 37.404762 | 78 | 0.66088 | false |
MihaiMoldovanu/ansible | test/units/modules/cloud/amazon/test_kinesis_stream.py | 28 | 9780 | import pytest
import unittest
boto3 = pytest.importorskip("boto3")
botocore = pytest.importorskip("botocore")
import ansible.modules.cloud.amazon.kinesis_stream as kinesis_stream
aws_region = 'us-west-2'
class AnsibleKinesisStreamFunctions(unittest.TestCase):
def test_convert_to_lower(self):
example = {
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': 'test',
'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
'StreamStatus': 'ACTIVE'
}
converted_example = kinesis_stream.convert_to_lower(example)
keys = list(converted_example.keys())
keys.sort()
for i in range(len(keys)):
if i == 0:
self.assertEqual(keys[i], 'has_more_shards')
if i == 1:
self.assertEqual(keys[i], 'retention_period_hours')
if i == 2:
self.assertEqual(keys[i], 'stream_arn')
if i == 3:
self.assertEqual(keys[i], 'stream_name')
if i == 4:
self.assertEqual(keys[i], 'stream_status')
def test_make_tags_in_aws_format(self):
example = {
'env': 'development'
}
should_return = [
{
'Key': 'env',
'Value': 'development'
}
]
aws_tags = kinesis_stream.make_tags_in_aws_format(example)
self.assertEqual(aws_tags, should_return)
def test_make_tags_in_proper_format(self):
example = [
{
'Key': 'env',
'Value': 'development'
},
{
'Key': 'service',
'Value': 'web'
}
]
should_return = {
'env': 'development',
'service': 'web'
}
proper_tags = kinesis_stream.make_tags_in_proper_format(example)
self.assertEqual(proper_tags, should_return)
def test_recreate_tags_from_list(self):
example = [('environment', 'development'), ('service', 'web')]
should_return = [
{
'Key': 'environment',
'Value': 'development'
},
{
'Key': 'service',
'Value': 'web'
}
]
aws_tags = kinesis_stream.recreate_tags_from_list(example)
self.assertEqual(aws_tags, should_return)
def test_get_tags(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg, tags = kinesis_stream.get_tags(client, 'test', check_mode=True)
self.assertTrue(success)
should_return = [
{
'Key': 'DryRunMode',
'Value': 'true'
}
]
self.assertEqual(tags, should_return)
def test_find_stream(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg, stream = (
kinesis_stream.find_stream(client, 'test', check_mode=True)
)
should_return = {
'OpenShardsCount': 5,
'ClosedShardsCount': 0,
'ShardsCount': 5,
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': 'test',
'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
'StreamStatus': 'ACTIVE'
}
self.assertTrue(success)
self.assertEqual(stream, should_return)
def test_wait_for_status(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg, stream = (
kinesis_stream.wait_for_status(
client, 'test', 'ACTIVE', check_mode=True
)
)
should_return = {
'OpenShardsCount': 5,
'ClosedShardsCount': 0,
'ShardsCount': 5,
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': 'test',
'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
'StreamStatus': 'ACTIVE'
}
self.assertTrue(success)
self.assertEqual(stream, should_return)
def test_tags_action_create(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, err_msg = (
kinesis_stream.tags_action(
client, 'test', tags, 'create', check_mode=True
)
)
self.assertTrue(success)
def test_tags_action_delete(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, err_msg = (
kinesis_stream.tags_action(
client, 'test', tags, 'delete', check_mode=True
)
)
self.assertTrue(success)
def test_tags_action_invalid(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, err_msg = (
kinesis_stream.tags_action(
client, 'test', tags, 'append', check_mode=True
)
)
self.assertFalse(success)
def test_update_tags(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, changed, err_msg = (
kinesis_stream.update_tags(
client, 'test', tags, check_mode=True
)
)
self.assertTrue(success)
def test_stream_action_create(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.stream_action(
client, 'test', 10, 'create', check_mode=True
)
)
self.assertTrue(success)
def test_stream_action_delete(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.stream_action(
client, 'test', 10, 'delete', check_mode=True
)
)
self.assertTrue(success)
def test_stream_action_invalid(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.stream_action(
client, 'test', 10, 'append', check_mode=True
)
)
self.assertFalse(success)
def test_retention_action_increase(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.retention_action(
client, 'test', 48, 'increase', check_mode=True
)
)
self.assertTrue(success)
def test_retention_action_decrease(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.retention_action(
client, 'test', 24, 'decrease', check_mode=True
)
)
self.assertTrue(success)
def test_retention_action_invalid(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.retention_action(
client, 'test', 24, 'create', check_mode=True
)
)
self.assertFalse(success)
def test_update_shard_count(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.update_shard_count(
client, 'test', 5, check_mode=True
)
)
self.assertTrue(success)
def test_update(self):
client = boto3.client('kinesis', region_name=aws_region)
current_stream = {
'OpenShardsCount': 5,
'ClosedShardsCount': 0,
'ShardsCount': 1,
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': 'test',
'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
'StreamStatus': 'ACTIVE'
}
tags = {
'env': 'development',
'service': 'web'
}
success, changed, err_msg = (
kinesis_stream.update(
client, current_stream, 'test', number_of_shards=2, retention_period=48,
tags=tags, check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
self.assertEqual(err_msg, 'Kinesis Stream test updated successfully.')
def test_create_stream(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, changed, err_msg, results = (
kinesis_stream.create_stream(
client, 'test', number_of_shards=10, retention_period=48,
tags=tags, check_mode=True
)
)
should_return = {
'open_shards_count': 5,
'closed_shards_count': 0,
'shards_count': 5,
'has_more_shards': True,
'retention_period_hours': 24,
'stream_name': 'test',
'stream_arn': 'arn:aws:kinesis:east-side:123456789:stream/test',
'stream_status': 'ACTIVE',
'tags': tags,
}
self.assertTrue(success)
self.assertTrue(changed)
self.assertEqual(results, should_return)
self.assertEqual(err_msg, 'Kinesis Stream test updated successfully.')
| gpl-3.0 | 6,306,916,094,429,408,000 | 31.6 | 89 | 0.520757 | false |
sciurus/python_koans | python3/runner/sensei.py | 59 | 9937 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import re
import sys
import os
import glob
from . import helper
from .mockable_test_result import MockableTestResult
from runner import path_to_enlightenment
from libs.colorama import init, Fore, Style
init() # init colorama
class Sensei(MockableTestResult):
def __init__(self, stream):
unittest.TestResult.__init__(self)
self.stream = stream
self.prevTestClassName = None
self.tests = path_to_enlightenment.koans()
self.pass_count = 0
self.lesson_pass_count = 0
self.all_lessons = None
def startTest(self, test):
MockableTestResult.startTest(self, test)
if helper.cls_name(test) != self.prevTestClassName:
self.prevTestClassName = helper.cls_name(test)
if not self.failures:
self.stream.writeln()
self.stream.writeln("{0}{1}Thinking {2}".format(
Fore.RESET, Style.NORMAL, helper.cls_name(test)))
if helper.cls_name(test) not in ['AboutAsserts', 'AboutExtraCredit']:
self.lesson_pass_count += 1
def addSuccess(self, test):
if self.passesCount():
MockableTestResult.addSuccess(self, test)
self.stream.writeln( \
" {0}{1}{2} has expanded your awareness.{3}{4}" \
.format(Fore.GREEN, Style.BRIGHT, test._testMethodName, \
Fore.RESET, Style.NORMAL))
self.pass_count += 1
def addError(self, test, err):
# Having 1 list for errors and 1 list for failures would mess with
# the error sequence
self.addFailure(test, err)
def passesCount(self):
return not (self.failures and helper.cls_name(self.failures[0][0]) != self.prevTestClassName)
def addFailure(self, test, err):
MockableTestResult.addFailure(self, test, err)
def sortFailures(self, testClassName):
table = list()
for test, err in self.failures:
if helper.cls_name(test) == testClassName:
m = re.search("(?<= line )\d+" ,err)
if m:
tup = (int(m.group(0)), test, err)
table.append(tup)
if table:
return sorted(table)
else:
return None
def firstFailure(self):
if not self.failures: return None
table = self.sortFailures(helper.cls_name(self.failures[0][0]))
if table:
return (table[0][1], table[0][2])
else:
return None
def learn(self):
self.errorReport()
self.stream.writeln("")
self.stream.writeln("")
self.stream.writeln(self.report_progress())
if self.failures:
self.stream.writeln(self.report_remaining())
self.stream.writeln("")
self.stream.writeln(self.say_something_zenlike())
if self.failures: sys.exit(-1)
self.stream.writeln(
"\n{0}**************************************************" \
.format(Fore.RESET))
self.stream.writeln("\n{0}That was the last one, well done!" \
.format(Fore.MAGENTA))
self.stream.writeln(
"\nIf you want more, take a look at about_extra_credit_task.py{0}{1}" \
.format(Fore.RESET, Style.NORMAL))
def errorReport(self):
problem = self.firstFailure()
if not problem: return
test, err = problem
self.stream.writeln(" {0}{1}{2} has damaged your "
"karma.".format(Fore.RED, Style.BRIGHT, test._testMethodName))
self.stream.writeln("\n{0}{1}You have not yet reached enlightenment ..." \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}".format(Fore.RED, \
Style.BRIGHT, self.scrapeAssertionError(err)))
self.stream.writeln("")
self.stream.writeln("{0}{1}Please meditate on the following code:" \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}{3}{4}".format(Fore.YELLOW, Style.BRIGHT, \
self.scrapeInterestingStackDump(err), Fore.RESET, Style.NORMAL))
def scrapeAssertionError(self, err):
if not err: return ""
error_text = ""
count = 0
for line in err.splitlines():
m = re.search("^[^^ ].*$",line)
if m and m.group(0):
count+=1
if count>1:
error_text += (" " + line.strip()).rstrip() + '\n'
return error_text.strip('\n')
def scrapeInterestingStackDump(self, err):
if not err:
return ""
lines = err.splitlines()
sep = '@@@@@SEP@@@@@'
stack_text = ""
for line in lines:
m = re.search("^ File .*$",line)
if m and m.group(0):
stack_text += '\n' + line
m = re.search("^ \w(\w)+.*$",line)
if m and m.group(0):
stack_text += sep + line
lines = stack_text.splitlines()
stack_text = ""
for line in lines:
m = re.search("^.*[/\\\\]koans[/\\\\].*$",line)
if m and m.group(0):
stack_text += line + '\n'
stack_text = stack_text.replace(sep, '\n').strip('\n')
stack_text = re.sub(r'(about_\w+.py)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
stack_text = re.sub(r'(line \d+)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
return stack_text
def report_progress(self):
return "You have completed {0} koans and " \
"{1} lessons.".format(
self.pass_count,
self.lesson_pass_count)
def report_remaining(self):
koans_remaining = self.total_koans() - self.pass_count
lessons_remaining = self.total_lessons() - self.lesson_pass_count
return "You are now {0} koans and {1} lessons away from " \
"reaching enlightenment.".format(
koans_remaining,
lessons_remaining)
# Hat's tip to Tim Peters for the zen statements from The 'Zen
# of Python' (http://www.python.org/dev/peps/pep-0020/)
#
# Also a hat's tip to Ara T. Howard for the zen statements from his
# metakoans Ruby Quiz (http://rubyquiz.com/quiz67.html) and
# Edgecase's later permutation in the Ruby Koans
def say_something_zenlike(self):
if self.failures:
turn = self.pass_count % 37
zenness = "";
if turn == 0:
zenness = "Beautiful is better than ugly."
elif turn == 1 or turn == 2:
zenness = "Explicit is better than implicit."
elif turn == 3 or turn == 4:
zenness = "Simple is better than complex."
elif turn == 5 or turn == 6:
zenness = "Complex is better than complicated."
elif turn == 7 or turn == 8:
zenness = "Flat is better than nested."
elif turn == 9 or turn == 10:
zenness = "Sparse is better than dense."
elif turn == 11 or turn == 12:
zenness = "Readability counts."
elif turn == 13 or turn == 14:
zenness = "Special cases aren't special enough to " \
"break the rules."
elif turn == 15 or turn == 16:
zenness = "Although practicality beats purity."
elif turn == 17 or turn == 18:
zenness = "Errors should never pass silently."
elif turn == 19 or turn == 20:
zenness = "Unless explicitly silenced."
elif turn == 21 or turn == 22:
zenness = "In the face of ambiguity, refuse the " \
"temptation to guess."
elif turn == 23 or turn == 24:
zenness = "There should be one-- and preferably only " \
"one --obvious way to do it."
elif turn == 25 or turn == 26:
zenness = "Although that way may not be obvious at " \
"first unless you're Dutch."
elif turn == 27 or turn == 28:
zenness = "Now is better than never."
elif turn == 29 or turn == 30:
zenness = "Although never is often better than right " \
"now."
elif turn == 31 or turn == 32:
zenness = "If the implementation is hard to explain, " \
"it's a bad idea."
elif turn == 33 or turn == 34:
zenness = "If the implementation is easy to explain, " \
"it may be a good idea."
else:
zenness = "Namespaces are one honking great idea -- " \
"let's do more of those!"
return "{0}{1}{2}{3}".format(Fore.CYAN, zenness, Fore.RESET, Style.NORMAL);
else:
return "{0}Nobody ever expects the Spanish Inquisition." \
.format(Fore.CYAN)
# Hopefully this will never ever happen!
return "The temple is collapsing! Run!!!"
def total_lessons(self):
all_lessons = self.filter_all_lessons()
if all_lessons:
return len(all_lessons)
else:
return 0
def total_koans(self):
return self.tests.countTestCases()
def filter_all_lessons(self):
cur_dir = os.path.split(os.path.realpath(__file__))[0]
if not self.all_lessons:
self.all_lessons = glob.glob('{0}/../koans/about*.py'.format(cur_dir))
self.all_lessons = list(filter(lambda filename:
"about_extra_credit" not in filename,
self.all_lessons))
return self.all_lessons
| mit | -5,399,442,842,129,708,000 | 36.217228 | 101 | 0.529838 | false |
tqchen/tvm | tests/python/topi/python/test_topi_group_conv2d.py | 1 | 11141 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do group convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.util import get_const_tuple
from common import Int8Fallback
import tvm.testing
_group_conv2d_nchw_implement = {
"generic": (topi.nn.group_conv2d_nchw, topi.generic.schedule_group_conv2d_nchw),
"gpu": (topi.cuda.group_conv2d_nchw, topi.cuda.schedule_group_conv2d_nchw),
}
def verify_group_conv2d_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
W = te.placeholder((num_filter, in_channel // groups, kernel, kernel), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
fcompute, fschedule = tvm.topi.testing.dispatch(device, _group_conv2d_nchw_implement)
C = fcompute(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), ctx)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5)
for device in ["llvm", "cuda"]:
check_device(device)
oc_block_factor = 4
def verify_group_conv2d_NCHWc_int8(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype="int8")
W = te.placeholder((num_filter, in_channel // groups, kernel, kernel), name="W", dtype="int8")
bias = te.placeholder(
(num_filter // oc_block_factor, 1, 1, oc_block_factor), name="bias", dtype="int8"
)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_NCHWc_int8")
def get_ref_data():
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
# convert to NCHWc
_, _, out_height, out_width = c_np.shape
c_np = c_np.reshape(
(batch, num_filter // oc_block_factor, oc_block_factor, out_height, out_width)
).transpose(0, 1, 3, 4, 2)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
if device == "cuda" and not tvm.contrib.nvcc.have_int8(ctx.compute_version):
print("Skip because int8 intrinsics are not available")
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
C = topi.cuda.group_conv2d_NCHWc_int8(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.cuda.schedule_group_conv2d_NCHWc_int8([C])
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), ctx)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5)
for device in ["cuda"]:
check_device(device)
@tvm.testing.uses_gpu
def test_group_conv2d_nchw():
# ResNeXt-50 workload
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
# bias, relu
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True)
# dilation
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 2, 32)
# batch size
verify_group_conv2d_nchw(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(9, 128, 56, 128, 3, 1, 1, 1, 32)
@tvm.testing.requires_cuda
def test_group_conv2d_NCHWc_int8():
with Int8Fallback():
# ResNeXt-50 workload
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
# bias, relu
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_NCHWc_int8(
1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True
)
# dilation
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 2, 32)
# batch size
verify_group_conv2d_NCHWc_int8(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(9, 128, 56, 128, 3, 1, 1, 1, 32)
if __name__ == "__main__":
test_group_conv2d_nchw()
test_group_conv2d_NCHWc_int8()
| apache-2.0 | 8,500,505,208,735,133,000 | 32.760606 | 98 | 0.532986 | false |
landism/pants | tests/python/pants_test/binaries/test_binary_util.py | 8 | 8219 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import mock
from pants.binaries.binary_util import BinaryUtil
from pants.net.http.fetcher import Fetcher
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_open
from pants_test.base_test import BaseTest
class BinaryUtilTest(BaseTest):
"""Tests binary_util's pants_support_baseurls handling."""
class MapFetcher(object):
"""Class which pretends to be a pants.net.http.Fetcher, but is actually a dictionary."""
def __init__(self, read_map):
self._map = read_map
def download(self, url, path_or_fd=None, **kwargs):
if not url in self._map:
raise IOError("404: Virtual URL '{}' does not exist.".format(url))
if not path_or_fd:
raise AssertionError("Expected path_or_fd to be set")
path_or_fd.write(self._map[url])
return path_or_fd
def keys(self):
return self._map.keys()
def values(self):
return self._map.values()
def __getitem__(self, key):
return self._map[key] # Vanilla internal map access (without lambda shenanigans).
@classmethod
def _fake_base(cls, name):
return 'fake-url-{name}'.format(name=name)
@classmethod
def _fake_url(cls, binaries, base, binary_key):
binary_util = BinaryUtil([], 0, '/tmp')
supportdir, version, name = binaries[binary_key]
binary = binary_util._select_binary_base_path(supportdir, version, binary_key)
return '{base}/{binary}'.format(base=base, binary=binary)
def test_timeout(self):
fetcher = mock.create_autospec(Fetcher, spec_set=True)
binary_util = BinaryUtil(baseurls=['http://binaries.example.com'],
timeout_secs=42,
bootstrapdir='/tmp')
self.assertFalse(fetcher.download.called)
with binary_util._select_binary_stream('a-binary', 'a-binary/v1.2/a-binary', fetcher=fetcher):
fetcher.download.assert_called_once_with('http://binaries.example.com/a-binary/v1.2/a-binary',
listener=mock.ANY,
path_or_fd=mock.ANY,
timeout_secs=42)
def test_nobases(self):
"""Tests exception handling if build support urls are improperly specified."""
binary_util = BinaryUtil(baseurls=[], timeout_secs=30, bootstrapdir='/tmp')
with self.assertRaises(binary_util.NoBaseUrlsError):
binary_path = binary_util._select_binary_base_path(supportdir='bin/protobuf',
version='2.4.1',
name='protoc')
with binary_util._select_binary_stream(name='protoc', binary_path=binary_path):
self.fail('Expected acquisition of the stream to raise.')
def test_support_url_multi(self):
"""Tests to make sure existing base urls function as expected."""
with temporary_dir() as invalid_local_files, temporary_dir() as valid_local_files:
binary_util = BinaryUtil(
baseurls=[
'BLATANTLY INVALID URL',
'https://dl.bintray.com/pantsbuild/bin/reasonably-invalid-url',
invalid_local_files,
valid_local_files,
'https://dl.bintray.com/pantsbuild/bin/another-invalid-url',
],
timeout_secs=30,
bootstrapdir='/tmp')
binary_path = binary_util._select_binary_base_path(supportdir='bin/protobuf',
version='2.4.1',
name='protoc')
contents = b'proof'
with safe_open(os.path.join(valid_local_files, binary_path), 'wb') as fp:
fp.write(contents)
with binary_util._select_binary_stream(name='protoc', binary_path=binary_path) as stream:
self.assertEqual(contents, stream())
def test_support_url_fallback(self):
"""Tests fallback behavior with multiple support baseurls.
Mocks up some dummy baseurls and then swaps out the URL reader to make sure urls are accessed
and others are not.
"""
fake_base, fake_url = self._fake_base, self._fake_url
bases = [fake_base('apple'), fake_base('orange'), fake_base('banana')]
binary_util = BinaryUtil(bases, 30, '/tmp')
binaries = {t[2]: t for t in (('bin/protobuf', '2.4.1', 'protoc'),
('bin/ivy', '4.3.7', 'ivy'),
('bin/bash', '4.4.3', 'bash'))}
fetcher = self.MapFetcher({
fake_url(binaries, bases[0], 'protoc'): 'SEEN PROTOC',
fake_url(binaries, bases[0], 'ivy'): 'SEEN IVY',
fake_url(binaries, bases[1], 'bash'): 'SEEN BASH',
fake_url(binaries, bases[1], 'protoc'): 'UNSEEN PROTOC 1',
fake_url(binaries, bases[2], 'protoc'): 'UNSEEN PROTOC 2',
fake_url(binaries, bases[2], 'ivy'): 'UNSEEN IVY 2',
})
unseen = [item for item in fetcher.values() if item.startswith('SEEN ')]
for supportdir, version, name in binaries.values():
binary_path = binary_util._select_binary_base_path(supportdir=supportdir,
version=version,
name=name)
with binary_util._select_binary_stream(name=name,
binary_path=binary_path,
fetcher=fetcher) as stream:
result = stream()
self.assertEqual(result, 'SEEN ' + name.upper())
unseen.remove(result)
self.assertEqual(0, len(unseen)) # Make sure we've seen all the SEENs.
def test_select_binary_base_path_linux(self):
binary_util = BinaryUtil([], 0, '/tmp')
def uname_func():
return "linux", "dontcare1", "dontcare2", "dontcare3", "amd64"
self.assertEquals("supportdir/linux/x86_64/name/version",
binary_util._select_binary_base_path("supportdir", "name", "version",
uname_func=uname_func))
def test_select_binary_base_path_darwin(self):
binary_util = BinaryUtil([], 0, '/tmp')
def uname_func():
return "darwin", "dontcare1", "14.9", "dontcare2", "dontcare3",
self.assertEquals("supportdir/mac/10.10/name/version",
binary_util._select_binary_base_path("supportdir", "name", "version",
uname_func=uname_func))
def test_select_binary_base_path_missing_os(self):
binary_util = BinaryUtil([], 0, '/tmp')
def uname_func():
return "vms", "dontcare1", "999.9", "dontcare2", "VAX9"
with self.assertRaisesRegexp(BinaryUtil.MissingMachineInfo,
r'Pants has no binaries for vms'):
binary_util._select_binary_base_path("supportdir", "name", "version", uname_func=uname_func)
def test_select_binary_base_path_missing_version(self):
binary_util = BinaryUtil([], 0, '/tmp')
def uname_func():
return "darwin", "dontcare1", "999.9", "dontcare2", "x86_64"
os_id = ('darwin', '999')
with self.assertRaisesRegexp(BinaryUtil.MissingMachineInfo,
r'Update --binaries-path-by-id to find binaries for '
r'{}'.format(re.escape(repr(os_id)))):
binary_util._select_binary_base_path("supportdir", "name", "version", uname_func=uname_func)
def test_select_binary_base_path_override(self):
binary_util = BinaryUtil([], 0, '/tmp',
{('darwin', '100'): ['skynet', '42']})
def uname_func():
return "darwin", "dontcare1", "100.99", "dontcare2", "t1000"
self.assertEquals("supportdir/skynet/42/name/version",
binary_util._select_binary_base_path("supportdir", "name", "version",
uname_func=uname_func))
| apache-2.0 | 7,448,616,268,781,142,000 | 42.031414 | 100 | 0.583283 | false |
goodwinnk/intellij-community | python/helpers/py2only/docutils/languages/nl.py | 200 | 1865 | # $Id: nl.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Martijn Pieters <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Dutch-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': 'Auteur',
'authors': 'Auteurs',
'organization': 'Organisatie',
'address': 'Adres',
'contact': 'Contact',
'version': 'Versie',
'revision': 'Revisie',
'status': 'Status',
'date': 'Datum',
'copyright': 'Copyright',
'dedication': 'Toewijding',
'abstract': 'Samenvatting',
'attention': 'Attentie!',
'caution': 'Let op!',
'danger': '!GEVAAR!',
'error': 'Fout',
'hint': 'Hint',
'important': 'Belangrijk',
'note': 'Opmerking',
'tip': 'Tip',
'warning': 'Waarschuwing',
'contents': 'Inhoud'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'auteur': 'author',
'auteurs': 'authors',
'organisatie': 'organization',
'adres': 'address',
'contact': 'contact',
'versie': 'version',
'revisie': 'revision',
'status': 'status',
'datum': 'date',
'copyright': 'copyright',
'toewijding': 'dedication',
'samenvatting': 'abstract'}
"""Dutch (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| apache-2.0 | 8,914,560,059,603,353,000 | 30.083333 | 76 | 0.612869 | false |
percipient/raven-python | raven/contrib/bottle/utils.py | 25 | 1045 | """
raven.contrib.bottle.utils
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from raven.utils.compat import _urlparse
from raven.utils.wsgi import get_headers, get_environ
logger = logging.getLogger(__name__)
def get_data_from_request(request):
urlparts = _urlparse.urlsplit(request.url)
try:
form_dict = request.forms.dict
# we only are about the most recent one
formdata = dict([(k, form_dict[k][-1]) for k in form_dict])
except Exception:
formdata = {}
data = {
'request': {
'url': '%s://%s%s' % (urlparts.scheme, urlparts.netloc, urlparts.path),
'query_string': urlparts.query,
'method': request.method,
'data': formdata,
'headers': dict(get_headers(request.environ)),
'env': dict(get_environ(request.environ)),
}
}
return data
| bsd-3-clause | 8,132,754,021,142,023,000 | 25.794872 | 83 | 0.600957 | false |
sauloal/pycluster | pypy-1.9_64/lib-python/2.7/plat-mac/lib-scriptpackages/Explorer/Microsoft_Internet_Explorer.py | 82 | 3140 | """Suite Microsoft Internet Explorer Suite: Events defined by Internet Explorer
Level 1, version 1
Generated from /Applications/Internet Explorer.app
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'MSIE'
class Microsoft_Internet_Explorer_Events:
def GetSource(self, _object=None, _attributes={}, **_arguments):
"""GetSource: Get the HTML source of a browser window
Required argument: Window Identifier of window from which to get the source. No value means get the source from the frontmost window.
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: undocumented, typecode 'TEXT'
"""
_code = 'MSIE'
_subcode = 'SORC'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def PrintBrowserWindow(self, _object=None, _attributes={}, **_arguments):
"""PrintBrowserWindow: Print contents of browser window (HTML)
Required argument: Window Identifier of the window to print. No value means print the frontmost browser window.
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'misc'
_subcode = 'pWND'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_do_script = {
'window' : 'WIND',
}
def do_script(self, _object, _attributes={}, **_arguments):
"""do script: Execute script commands
Required argument: JavaScript text to execute
Keyword argument window: optional Window Identifier (as supplied by the ListWindows event) specifying context in which to execute the script
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: Return value
"""
_code = 'misc'
_subcode = 'dosc'
aetools.keysubst(_arguments, self._argmap_do_script)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
#
# Indices of types declared in this module
#
_classdeclarations = {
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
}
| mit | -5,842,879,973,335,420,000 | 31.708333 | 148 | 0.627707 | false |
ujenmr/ansible | lib/ansible/modules/cloud/digital_ocean/digital_ocean_certificate_facts.py | 29 | 3228 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: digital_ocean_certificate_facts
short_description: Gather facts about DigitalOcean certificates
description:
- This module can be used to gather facts about DigitalOcean provided certificates.
author: "Abhijeet Kasurde (@Akasurde)"
version_added: "2.6"
options:
certificate_id:
description:
- Certificate ID that can be used to identify and reference a certificate.
required: false
requirements:
- "python >= 2.6"
extends_documentation_fragment: digital_ocean.documentation
'''
EXAMPLES = '''
- name: Gather facts about all certificates
digital_ocean_certificate_facts:
oauth_token: "{{ oauth_token }}"
- name: Gather facts about certificate with given id
digital_ocean_certificate_facts:
oauth_token: "{{ oauth_token }}"
certificate_id: "892071a0-bb95-49bc-8021-3afd67a210bf"
- name: Get not after facts about certificate
digital_ocean_certificate_facts:
register: resp_out
- set_fact:
not_after_date: "{{ item.not_after }}"
loop: "{{ resp_out.data|json_query(name) }}"
vars:
name: "[?name=='web-cert-01']"
- debug: var=not_after_date
'''
RETURN = '''
data:
description: DigitalOcean certificate facts
returned: success
type: list
sample: [
{
"id": "892071a0-bb95-49bc-8021-3afd67a210bf",
"name": "web-cert-01",
"not_after": "2017-02-22T00:23:00Z",
"sha1_fingerprint": "dfcc9f57d86bf58e321c2c6c31c7a971be244ac7",
"created_at": "2017-02-08T16:02:37Z"
},
]
'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
def core(module):
certificate_id = module.params.get('certificate_id', None)
rest = DigitalOceanHelper(module)
base_url = 'certificates?'
if certificate_id is not None:
response = rest.get("%s/%s" % (base_url, certificate_id))
status_code = response.status_code
if status_code != 200:
module.fail_json(msg="Failed to retrieve certificates for DigitalOcean")
resp_json = response.json
certificate = resp_json['certificate']
else:
certificate = rest.get_paginated_data(base_url=base_url, data_key_name='certificates')
module.exit_json(changed=False, data=certificate)
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
certificate_id=dict(type='str', required=False),
)
module = AnsibleModule(argument_spec=argument_spec)
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 | 2,237,879,689,643,854,800 | 27.069565 | 94 | 0.67596 | false |
kajgan/e2 | lib/python/Plugins/SystemPlugins/VideoEnhancement/VideoEnhancement.py | 44 | 11758 | from os import path as os_path
from Components.config import config, ConfigSubsection, ConfigSlider, ConfigSelection, ConfigBoolean, ConfigNothing, NoSave
# The "VideoEnhancement" is the interface to /proc/stb/vmpeg/0.
class VideoEnhancement:
firstRun = True
def __init__(self):
self.last_modes_preferred = [ ]
self.createConfig()
def createConfig(self, *args):
config.pep = ConfigSubsection()
config.pep.configsteps = NoSave(ConfigSelection(choices=[1, 5, 10, 25], default = 1))
if os_path.exists("/proc/stb/vmpeg/0/pep_contrast"):
def setContrast(config):
myval = int(config.value * 256)
try:
print "--> setting contrast to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_contrast", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_contrast."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.contrast = ConfigSlider(default=128, limits=(0,256))
config.pep.contrast.addNotifier(setContrast)
else:
config.pep.contrast = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_saturation"):
def setSaturation(config):
myval = int(config.value * 256)
try:
print "--> setting saturation to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_saturation", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_saturaion."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.saturation = ConfigSlider(default=128, limits=(0,256))
config.pep.saturation.addNotifier(setSaturation)
else:
config.pep.saturation = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_hue"):
def setHue(config):
myval = int(config.value * 256)
try:
print "--> setting hue to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_hue", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_hue."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.hue = ConfigSlider(default=128, limits=(0,256))
config.pep.hue.addNotifier(setHue)
else:
config.pep.hue = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_brightness"):
def setBrightness(config):
myval = int(config.value * 256)
try:
print "--> setting brightness to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_brightness", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_brightness."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.brightness = ConfigSlider(default=128, limits=(0,256))
config.pep.brightness.addNotifier(setBrightness)
else:
config.pep.brightness = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_block_noise_reduction"):
def setBlock_noise_reduction(config):
myval = int(config.value)
try:
print "--> setting block_noise_reduction to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_block_noise_reduction", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_block_noise_reduction."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.block_noise_reduction = ConfigSlider(default=0, limits=(0,5))
config.pep.block_noise_reduction.addNotifier(setBlock_noise_reduction)
else:
config.pep.block_noise_reduction = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_mosquito_noise_reduction"):
def setMosquito_noise_reduction(config):
myval = int(config.value)
try:
print "--> setting mosquito_noise_reduction to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_mosquito_noise_reduction", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_mosquito_noise_reduction."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.mosquito_noise_reduction = ConfigSlider(default=0, limits=(0,5))
config.pep.mosquito_noise_reduction.addNotifier(setMosquito_noise_reduction)
else:
config.pep.mosquito_noise_reduction = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_digital_contour_removal"):
def setDigital_contour_removal(config):
myval = int(config.value)
try:
print "--> setting digital_contour_removal to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_digital_contour_removal", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_digital_contour_removal."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.digital_contour_removal = ConfigSlider(default=0, limits=(0,5))
config.pep.digital_contour_removal.addNotifier(setDigital_contour_removal)
else:
config.pep.digital_contour_removal = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_split"):
def setSplitMode(config):
try:
print "--> setting splitmode to:",str(config.value)
f = open("/proc/stb/vmpeg/0/pep_split", "w")
f.write(str(config.value))
f.close()
except IOError:
print "couldn't write pep_split."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.split = ConfigSelection(choices={
"off": _("Off"),
"left": _("Left"),
"right": _("Right")},
default = "off")
config.pep.split.addNotifier(setSplitMode)
else:
config.pep.split = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_sharpness"):
def setSharpness(config):
myval = int(config.value * 256)
try:
print "--> setting sharpness to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_sharpness", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_sharpness."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.sharpness = ConfigSlider(default=0, limits=(0,256))
config.pep.sharpness.addNotifier(setSharpness)
else:
config.pep.sharpness = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_auto_flesh"):
def setAutoflesh(config):
myval = int(config.value)
try:
print "--> setting auto_flesh to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_auto_flesh", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_auto_flesh."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.auto_flesh = ConfigSlider(default=0, limits=(0,4))
config.pep.auto_flesh.addNotifier(setAutoflesh)
else:
config.pep.auto_flesh = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_green_boost"):
def setGreenboost(config):
myval = int(config.value)
try:
print "--> setting green_boost to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_green_boost", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_green_boost."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.green_boost = ConfigSlider(default=0, limits=(0,4))
config.pep.green_boost.addNotifier(setGreenboost)
else:
config.pep.green_boost = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_blue_boost"):
def setBlueboost(config):
myval = int(config.value)
try:
print "--> setting blue_boost to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_blue_boost", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_blue_boost."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.blue_boost = ConfigSlider(default=0, limits=(0,4))
config.pep.blue_boost.addNotifier(setBlueboost)
else:
config.pep.blue_boost = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_dynamic_contrast"):
def setDynamic_contrast(config):
myval = int(config.value)
try:
print "--> setting dynamic_contrast to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_dynamic_contrast", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_dynamic_contrast."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.dynamic_contrast = ConfigSlider(default=0, limits=(0,256))
config.pep.dynamic_contrast.addNotifier(setDynamic_contrast)
else:
config.pep.dynamic_contrast = NoSave(ConfigNothing())
try:
x = config.av.scaler_sharpness.value
except KeyError:
if os_path.exists("/proc/stb/vmpeg/0/pep_scaler_sharpness"):
def setScaler_sharpness(config):
myval = int(config.value)
try:
print "--> setting scaler_sharpness to: %0.8X" % myval
f = open("/proc/stb/vmpeg/0/pep_scaler_sharpness", "w")
f.write("%0.8X" % myval)
f.close()
except IOError:
print "couldn't write pep_scaler_sharpness."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.av.scaler_sharpness = ConfigSlider(default=13, limits=(0,26))
config.av.scaler_sharpness.addNotifier(setScaler_sharpness)
else:
config.av.scaler_sharpness = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/video/hdmi_colorspace") and os_path.exists("/proc/stb/video/hdmi_colorspace_choices"):
def setColour_space(config):
myval = config.value
try:
print "--> setting color_soace to:", myval
f = open("/proc/stb/video/hdmi_colorspace", "w")
f.write(myval)
f.close()
except IOError:
print "couldn't write color_soace."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
file = open("/proc/stb/video/hdmi_colorspace_choices", "r")
modes = file.readline().split()
file.close()
config.pep.color_space = ConfigSelection(modes, modes[0])
config.pep.color_space.addNotifier(setColour_space)
else:
config.pep.color_space = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/pep_scaler_vertical_dejagging"):
def setScaler_vertical_dejagging(configElement):
myval = configElement.value and "enable" or "disable"
try:
print "--> setting scaler_vertical_dejagging to: %s" % myval
open("/proc/stb/vmpeg/0/pep_scaler_vertical_dejagging", "w").write(myval)
except IOError:
print "couldn't write pep_scaler_vertical_dejagging."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.scaler_vertical_dejagging = ConfigBoolean(default=False, descriptions = {False: _("Disabled"), True: _("Enabled")} )
config.pep.scaler_vertical_dejagging.addNotifier(setScaler_vertical_dejagging)
else:
config.pep.scaler_vertical_dejagging = NoSave(ConfigNothing())
if os_path.exists("/proc/stb/vmpeg/0/smooth"):
def setSmooth(configElement):
myval = configElement.value and "enable" or "disable"
try:
print "--> setting smooth to: %s" % myval
open("/proc/stb/vmpeg/0/smooth", "w").write(myval)
except IOError:
print "couldn't write smooth."
if not VideoEnhancement.firstRun:
self.setConfiguredValues()
config.pep.smooth = ConfigBoolean(default=False, descriptions = {False: _("Disabled"), True: _("Enabled")} )
config.pep.smooth.addNotifier(setSmooth)
else:
config.pep.smooth = NoSave(ConfigNothing())
if VideoEnhancement.firstRun:
self.setConfiguredValues()
VideoEnhancement.firstRun = False
def setConfiguredValues(self):
try:
print "--> applying pep values"
f = open("/proc/stb/vmpeg/0/pep_apply", "w")
f.write("1")
f.close()
except IOError:
print "couldn't apply pep values."
VideoEnhancement()
| gpl-2.0 | 6,331,951,586,189,336,000 | 31.661111 | 130 | 0.674094 | false |
oudalab/phyllo | phyllo/extractors/anselmDB.py | 1 | 5827 | import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
import nltk
from itertools import cycle
nltk.download('punkt')
from nltk import sent_tokenize
anselmSOUP=""
idx = -1
cha_array=[]
suburl = []
verse = []
def parseRes2(soup, title, url, c, author, date, collectiontitle):
chapter = '-'
if url=="http://www.thelatinlibrary.com/anselmepistula.html":
getp = soup.find_all('p')[:-1]
i=len(getp)
for p in getp:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
num = len(getp) - (i - 1)
if p.findAll('br'):
sentn=p.get_text()
num=1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, collectiontitle, title, 'Latin', author, date, chapter,
num, sentn.strip(), url, 'prose'))
i=0
else:
i=i+1
ptext = p.string
chapter = str(i) # App. not associated with any chapter
# the first element is an empty string.
ptext = ptext[3:]
num=0
for sentn in sent_tokenize(ptext):
num=num+1
if sentn.strip() == 'men.': # textual fix
sentn = "Amen."
chapter = '-'
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, collectiontitle, title, 'Latin', author, date, chapter,
num, sentn.strip(), url, 'prose'))
else:
getp = soup.find_all('p')[:-1]
geturl=soup.find_all('a', href=True)
global idx
j = 0
#print(getp)
for u in geturl:
if u.get('href') != 'index.html' and u.get('href') != 'classics.html' and u.get('href') != 'christian.html':
suburl.append('http://www.thelatinlibrary.com/anselmproslogion.html'+u.get('href'))
suburl[13]='http://www.thelatinlibrary.com/anselmproslogion.html#capxiii'
suburl[23]='http://www.thelatinlibrary.com/anselmproslogion.html#capxxiii'
suburl.insert(14, 'http://www.thelatinlibrary.com/anselmproslogion.html#capxiv')
suburl.insert(24, 'http://www.thelatinlibrary.com/anselmproslogion.html#capxxiii')
i = len(getp)
for ch in soup.findAll('b'):
chap = ch.string
cha_array.append(''.join([i for i in chap if not i.isdigit()]))
for p in getp:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin'
'internal_navigation']: # these are not part of the main text
continue
except:
pass
if p.string == None:
idx = (idx + 1) % len(suburl)
chapter = cha_array[idx]
nurl = suburl[idx]
if p.string:
j=j+1
num=j
sentn = str(p.string)
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, collectiontitle, title, 'Latin', author, date, chapter,
num, sentn, nurl, 'prose'))
def main():
# get proper URLs
siteURL = 'http://www.thelatinlibrary.com'
anselmURL = 'http://www.thelatinlibrary.com/anselm.html'
anselmOPEN = urllib.request.urlopen(anselmURL)
anselmSOUP = BeautifulSoup(anselmOPEN, 'html5lib')
textsURL = []
for a in anselmSOUP.find_all('a', href=True):
link = a['href']
textsURL.append("{}/{}".format(siteURL, link))
# remove some unnecessary urls
while ("http://www.thelatinlibrary.com/index.html" in textsURL):
textsURL.remove("http://www.thelatinlibrary.com/index.html")
textsURL.remove("http://www.thelatinlibrary.com/classics.html")
textsURL.remove("http://www.thelatinlibrary.com/christian.html")
logger.info("\n".join(textsURL))
author = anselmSOUP.title.string
author = author.strip()
collectiontitle = anselmSOUP.span.contents[0].strip()
date = anselmSOUP.span.contents[0].strip().replace('(', '').replace(')', '').replace(u"\u2013", '-')
title = []
for link in anselmSOUP.findAll('a'):
if (link.get('href') and link.get('href') != 'index.html' and link.get('href') != 'classics.html' and link.get('href') != 'christian.html'):
title.append(link.string)
i=0
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Anselm'")
for u in textsURL:
uOpen = urllib.request.urlopen(u)
gestSoup = BeautifulSoup(uOpen, 'html5lib')
parseRes2(gestSoup, title[i], u, c, author, date, collectiontitle)
i=i+1
if __name__ == '__main__':
main()
| apache-2.0 | -278,816,513,929,801,020 | 36.589404 | 148 | 0.519478 | false |
binghongcha08/pyQMD | GWP/2D/1.0.1/comp.py | 29 | 1292 | ##!/usr/bin/python
import numpy as np
import pylab as plt
data = np.genfromtxt(fname='t100/wf.dat')
data1 = np.genfromtxt(fname='t300/wf.dat')
data2 = np.genfromtxt(fname='t500/wf.dat')
data3 = np.genfromtxt(fname='t600/wf.dat')
data00 = np.genfromtxt('../spo_1d/t100')
data01 = np.genfromtxt('../spo_1d/t300')
data02 = np.genfromtxt('../spo_1d/t500')
data03 = np.genfromtxt('../spo_1d/t600')
plt.subplot(2,2,1)
plt.xlim(0.5,2.5)
plt.title('t = 100 a.u.')
plt.plot(data[:,0],data[:,1],'r--',linewidth=2,label='LQF')
plt.plot(data00[:,0],data00[:,1],'k-',linewidth=2, label='Exact')
plt.xlabel('x')
plt.ylabel('$\psi^*\psi$')
plt.subplot(2,2,2)
plt.title('t = 300 a.u.')
plt.xlim(0.5,2.5)
plt.plot(data1[:,0],data1[:,1],'r--',linewidth=2)
plt.plot(data01[:,0],data01[:,1],'k-',linewidth=2)
plt.xlabel('x')
plt.ylabel('$\psi^*\psi$')
plt.subplot(2,2,3)
plt.title('t = 500 a.u.')
plt.xlim(0.5,2.5)
plt.plot(data2[:,0],data2[:,1],'r--',linewidth=2)
plt.plot(data02[:,0],data02[:,1],'k-',linewidth=2)
plt.xlabel('x')
plt.ylabel('$\psi^*\psi$')
plt.subplot(2,2,4)
plt.title('t = 600 a.u.')
plt.xlim(0.5,2.5)
plt.plot(data3[:,0],data3[:,1],'r--',linewidth=2)
plt.plot(data03[:,0],data03[:,1],'k-',linewidth=2)
plt.xlabel('x')
plt.ylabel('$\psi^*\psi$')
plt.savefig('wft.pdf')
plt.show()
| gpl-3.0 | 7,853,780,900,382,726,000 | 23.846154 | 65 | 0.629257 | false |
SOKP/external_chromium_org | content/test/gpu/gpu_tests/webgl_conformance.py | 26 | 5126 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import optparse
import os
import sys
import webgl_conformance_expectations
from telemetry import benchmark as benchmark_module
from telemetry.core import util
from telemetry.page import page_set
from telemetry.page import page as page_module
from telemetry.page import page_test
conformance_path = os.path.join(
util.GetChromiumSrcDir(),
'third_party', 'webgl', 'src', 'sdk', 'tests')
conformance_harness_script = r"""
var testHarness = {};
testHarness._allTestSucceeded = true;
testHarness._messages = '';
testHarness._failures = 0;
testHarness._finished = false;
testHarness._originalLog = window.console.log;
testHarness.log = function(msg) {
testHarness._messages += msg + "\n";
testHarness._originalLog.apply(window.console, [msg]);
}
testHarness.reportResults = function(url, success, msg) {
testHarness._allTestSucceeded = testHarness._allTestSucceeded && !!success;
if(!success) {
testHarness._failures++;
if(msg) {
testHarness.log(msg);
}
}
};
testHarness.notifyFinished = function(url) {
testHarness._finished = true;
};
testHarness.navigateToPage = function(src) {
var testFrame = document.getElementById("test-frame");
testFrame.src = src;
};
window.webglTestHarness = testHarness;
window.parent.webglTestHarness = testHarness;
window.console.log = testHarness.log;
window.onerror = function(message, url, line) {
testHarness.reportResults(null, false, message);
testHarness.notifyFinished(null);
};
"""
def _DidWebGLTestSucceed(tab):
return tab.EvaluateJavaScript('webglTestHarness._allTestSucceeded')
def _WebGLTestMessages(tab):
return tab.EvaluateJavaScript('webglTestHarness._messages')
class WebglConformanceValidator(page_test.PageTest):
def __init__(self):
super(WebglConformanceValidator, self).__init__(attempts=1, max_failures=10)
def ValidateAndMeasurePage(self, page, tab, results):
if not _DidWebGLTestSucceed(tab):
raise page_test.Failure(_WebGLTestMessages(tab))
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--disable-gesture-requirement-for-media-playback',
'--disable-domain-blocking-for-3d-apis',
'--disable-gpu-process-crash-limit'
])
class WebglConformancePage(page_module.Page):
def __init__(self, page_set, test):
super(WebglConformancePage, self).__init__(
url='file://' + test, page_set=page_set, base_dir=page_set.base_dir,
name=('WebglConformance.%s' %
test.replace('/', '_').replace('-', '_').
replace('\\', '_').rpartition('.')[0].replace('.', '_')))
self.script_to_evaluate_on_commit = conformance_harness_script
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'webglTestHarness._finished', timeout_in_seconds=180)
class WebglConformance(benchmark_module.Benchmark):
"""Conformance with Khronos WebGL Conformance Tests"""
test = WebglConformanceValidator
@classmethod
def AddTestCommandLineArgs(cls, group):
group.add_option('--webgl-conformance-version',
help='Version of the WebGL conformance tests to run.',
default='1.0.3')
def CreatePageSet(self, options):
tests = self._ParseTests('00_test_list.txt',
options.webgl_conformance_version)
ps = page_set.PageSet(
user_agent_type='desktop',
serving_dirs=[''],
file_path=conformance_path)
for test in tests:
ps.AddPage(WebglConformancePage(ps, test))
return ps
def CreateExpectations(self, page_set):
return webgl_conformance_expectations.WebGLConformanceExpectations()
@staticmethod
def _ParseTests(path, version=None):
test_paths = []
current_dir = os.path.dirname(path)
full_path = os.path.normpath(os.path.join(conformance_path, path))
if not os.path.exists(full_path):
raise Exception('The WebGL conformance test path specified ' +
'does not exist: ' + full_path)
with open(full_path, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
if line.startswith('//') or line.startswith('#'):
continue
line_tokens = line.split(' ')
i = 0
min_version = None
while i < len(line_tokens):
token = line_tokens[i]
if token == '--min-version':
i += 1
min_version = line_tokens[i]
i += 1
if version and min_version and version < min_version:
continue
test_name = line_tokens[-1]
if '.txt' in test_name:
include_path = os.path.join(current_dir, test_name)
test_paths += WebglConformance._ParseTests(
include_path, version)
else:
test = os.path.join(current_dir, test_name)
test_paths.append(test)
return test_paths
| bsd-3-clause | -7,109,474,740,519,521,000 | 29.331361 | 80 | 0.665821 | false |
TinyOS-Camp/DDEA-DEV | Archive/[14_10_03] Data_Collection_Sample/DB access sample code/vtt/sampling_density_VTT.py | 1 | 6262 | import os
import sys
import json
from datetime import datetime
import time
import math
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pylab as pl
import pickle
######
### Configurations
######
UUID_FILE = 'finland_ids.csv'
#DATA_FOLDER = 'VTT_week/'
DATA_FOLDER = 'data_year/'
DATA_EXT = '.csv'
SCRIPT_DIR = os.path.dirname(__file__)
def saveObjectBinary(obj, filename):
with open(filename, "wb") as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def loadObjectBinary(filename):
with open(filename, "rb") as input:
obj = pickle.load(input)
return obj
def group_uuids(uuid_list):
sensors_metadata = []
for uuid in uuid_list:
metadata_filepath = os.path.join(SCRIPT_DIR, 'metadata/meta_' + uuid + '.dat')
### open metadata file ###
with open(str(metadata_filepath)) as f:
#metadata = f.read().strip()
#sensors_metadata.append(metadata)
sensor_metadata = json.load(f)
sensors_metadata.append((uuid, sensor_metadata[0]['Path']))
sensors_metadata.sort(key=lambda tup: tup[1])
#print sensors_metadata
return sensors_metadata
### delta_t in ms ; max_sr in ms ###
### start_time = "2013/11/01-00:00:00"
### end_time = "2013/11/07-23:59:59"
def load_uuid_list():
uuid_list = []
uuid_filepath = os.path.join(SCRIPT_DIR, UUID_FILE)
temp_uuid_list = open(uuid_filepath).readlines()
for line in temp_uuid_list:
tokens = line.strip().split(',')
if len(tokens) == 0:
continue
uuid_list.append(tokens[0].strip())
return uuid_list
def print_readings(uuid):
sensor_filepath = os.path.join(SCRIPT_DIR, 'readings/' + uuid + '.dat')
sensors_readings = []
with open(str(sensor_filepath)) as f:
# sensors_metadata.append(f.read())
json_readings = json.load(f)
sensors_readings = json_readings[0]['Readings']
if len(sensors_readings) == 0:
return
for pair in sensors_readings:
if pair[1] is None:
continue
ts = pair[0]
readable_ts = datetime.fromtimestamp(int(ts) / 1000).strftime('%Y-%m-%d %H:%M:%S')
reading = pair[1]
print str(ts), str(readable_ts), reading
def compute_sampling_density(uuid, start_time, end_time, delta_t, max_sr):
### for testing ###
#start_time = "2013/11/01-00:00:00"
#end_time = "2013/11/07-23:59:59"
start_ts = int(time.mktime(datetime.strptime(start_time, "%Y/%m/%d-%H:%M:%S").timetuple()) * 1000)
end_ts = int(time.mktime(datetime.strptime(end_time, "%Y/%m/%d-%H:%M:%S").timetuple()) * 1000)
if (end_ts - start_ts) * 1.0 / delta_t == int ( math.floor((end_ts - start_ts) / delta_t)):
num_intervals = int ( (end_ts - start_ts) / delta_t) + 1
else:
num_intervals = int(math.ceil((end_ts - start_ts) * 1.0 / delta_t))
sampling_density = [0] * num_intervals
###### open reading of uuid - BERKELEY SDH BUILDING ######
# sensor_filepath = os.path.join(SCRIPT_DIR, 'readings/' + uuid + '.dat')
# with open(str(sensor_filepath)) as f:
# # sensors_metadata.append(f.read())
# json_readings = json.load(f)
# sensors_readings = json_readings[0]['Readings']
# if len(sensors_readings) == 0:
# return sampling_density
###### open reading of uuid - VTT FINLAND ######
sensor_filepath = os.path.join(SCRIPT_DIR, DATA_FOLDER + uuid + DATA_EXT)
lines = open(str(sensor_filepath)).readlines()
sensors_readings = []
for line in lines:
pair = []
if line == "":
continue
tokens = line.strip().split(',')
if len(tokens) < 2:
continue
#[curr_date, curr_time] = tokens[0].split(' ')
#print curr_date.strip() + '-' + curr_time.strip()
ts = int(time.mktime(datetime.strptime(tokens[0].strip(), "%Y-%m-%d %H:%M:%S").timetuple()) * 1000)
reading = float(tokens[1].strip())
pair.append(ts)
pair.append(reading)
#print tokens[0].strip(), str(ts), str(reading)
# sensors_metadata.append(f.read())
###for pair in sensors_readings:
curr_ts = int(pair[0])
#reading = float(pair[1])
if curr_ts < start_ts:
continue
if curr_ts > end_ts:
break
if pair[1] is None:
continue
curr_reading_index = int( (curr_ts - start_ts) / delta_t)
sampling_density[curr_reading_index] = sampling_density[curr_reading_index] + 1
### compute density
max_num_samples = delta_t / max_sr
for i in range(0, num_intervals):
sampling_density[i] = sampling_density[i] * 1.0 / max_num_samples
return sampling_density
def compute_sampling_density_matrix(start_time, end_time, delta_t, max_sr):
uuid_list = load_uuid_list()
uuid_list = uuid_list[0:1000]
sampling_density_matrix = []
for uuid in uuid_list:
sampling_density = compute_sampling_density(uuid, start_time, end_time, delta_t, max_sr)
if len(sampling_density) == 0:
continue
sampling_density_matrix.append(sampling_density)
return sampling_density_matrix
def visualize_density_matrix(sampling_density_matrix):
plt.imshow(sampling_density_matrix, interpolation="nearest", cmap=pl.cm.spectral)
pl.savefig('density.png', bbox_inches='tight')
######
### Example
######
#uuid = "GW1.HA1_AS_TE_AH_FM"
start_time = "2013/11/01-00:00:00"
end_time = "2013/11/07-23:59:59"
max_sr = 300000 ### 1000 ms = 1s, 5mins
delta_t = 1200000 ### ms ; 20 mins
sys_argv = sys.argv
if len(sys_argv) == 5:
start_time = sys_argv[1]
end_time = sys_argv[2]
delta_t = int(sys_argv[3])
max_sr = int(sys_argv[4])
### compute sampling density matrix and visualize
sampling_density_matrix = np.asarray(compute_sampling_density_matrix(start_time, end_time, delta_t, max_sr))
visualize_density_matrix(sampling_density_matrix)
| gpl-2.0 | -1,401,599,265,538,142,000 | 29.696078 | 108 | 0.583041 | false |
jobiols/odoomrp-wip | mrp_byproduct_operations/__openerp__.py | 27 | 1453 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Daniel Campos ([email protected]) Date: 29/09/2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "MRP byproduct Operations",
"version": "1.0",
"description": """
This module allows to add the operation on BoM where the secondary products
will be produced.
""",
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
'website': "http://www.odoomrp.com",
"depends": ['mrp_byproduct', 'mrp_operations_extension'],
"category": "Manufacturing",
"data": ['views/mrp_bom_view.xml',
],
"installable": True
}
| agpl-3.0 | -6,540,752,538,776,238,000 | 37.236842 | 79 | 0.585685 | false |
echristophe/lic | src/RectanglePacker.py | 5 | 11617 | """This library is free software; you can redistribute it and/or
modify it under the terms of the IBM Common Public License as
published by the IBM Corporation; either version 1.0 of the
License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
IBM Common Public License for more details.
You should have received a copy of the IBM Common Public
License along with this library
"""
from bisect import bisect_left
class OutOfSpaceError(Exception): pass
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __cmp__(self, other):
"""Compares the starting position of height slices"""
return self.x - other.x
class RectanglePacker(object):
"""Base class for rectangle packing algorithms
By uniting all rectangle packers under this common base class, you can
easily switch between different algorithms to find the most efficient or
performant one for a given job.
An almost exhaustive list of packing algorithms can be found here:
http://www.csc.liv.ac.uk/~epa/surveyhtml.html"""
def __init__(self, packingAreaWidth, packingAreaHeight):
"""Initializes a new rectangle packer
packingAreaWidth: Maximum width of the packing area
packingAreaHeight: Maximum height of the packing area"""
self.packingAreaWidth = packingAreaWidth
self.packingAreaHeight = packingAreaHeight
def Pack(self, rectangleWidth, rectangleHeight):
"""Allocates space for a rectangle in the packing area
rectangleWidth: Width of the rectangle to allocate
rectangleHeight: Height of the rectangle to allocate
Returns the location at which the rectangle has been placed"""
return self.TryPack(rectangleWidth, rectangleHeight)
def TryPack(self, rectangleWidth, rectangleHeight):
"""Tries to allocate space for a rectangle in the packing area
rectangleWidth: Width of the rectangle to allocate
rectangleHeight: Height of the rectangle to allocate
Returns a Point instance if space for the rectangle could be allocated
be found, otherwise returns None"""
raise NotImplementedError
class CygonRectanglePacker(RectanglePacker):
"""
Packer using a custom algorithm by Markus 'Cygon' Ewald
Algorithm conceived by Markus Ewald (cygon at nuclex dot org), though
I'm quite sure I'm not the first one to come up with it :)
The algorithm always places rectangles as low as possible in the packing
area. So, for any new rectangle that is to be added, the packer has to
determine the X coordinate at which the rectangle can have the lowest
overall height without intersecting any other rectangles.
To quickly discover these locations, the packer uses a sophisticated
data structure that stores the upper silhouette of the packing area. When
a new rectangle needs to be added, only the silouette edges need to be
analyzed to find the position where the rectangle would achieve the lowest"""
def __init__(self, packingAreaWidth, packingAreaHeight):
"""Initializes a new rectangle packer
packingAreaWidth: Maximum width of the packing area
packingAreaHeight: Maximum height of the packing area"""
RectanglePacker.__init__(self, packingAreaWidth, packingAreaHeight)
# Stores the height silhouette of the rectangles
self.heightSlices = []
# At the beginning, the packing area is a single slice of height 0
self.heightSlices.append(Point(0,0))
def TryPack(self, rectangleWidth, rectangleHeight):
"""Tries to allocate space for a rectangle in the packing area
rectangleWidth: Width of the rectangle to allocate
rectangleHeight: Height of the rectangle to allocate
Returns a Point instance if space for the rectangle could be allocated
be found, otherwise returns None"""
# If the rectangle is larger than the packing area in any dimension,
# it will never fit!
if rectangleWidth > self.packingAreaWidth or rectangleHeight > self.packingAreaHeight:
return None
# Determine the placement for the new rectangle
placement = self.tryFindBestPlacement(rectangleWidth, rectangleHeight)
# If a place for the rectangle could be found, update the height slice
# table to mark the region of the rectangle as being taken.
if placement:
self.integrateRectangle(placement.x, rectangleWidth, placement.y + rectangleHeight)
return placement
def tryFindBestPlacement(self, rectangleWidth, rectangleHeight):
"""Finds the best position for a rectangle of the given dimensions
rectangleWidth: Width of the rectangle to find a position for
rectangleHeight: Height of the rectangle to find a position for
Returns a Point instance if a valid placement for the rectangle could
be found, otherwise returns None"""
# Slice index, vertical position and score of the best placement we
# could find
bestSliceIndex = -1 # Slice index where the best placement was found
bestSliceY = 0 # Y position of the best placement found
# lower == better!
bestScore = self.packingAreaWidth * self.packingAreaHeight
# This is the counter for the currently checked position. The search
# works by skipping from slice to slice, determining the suitability
# of the location for the placement of the rectangle.
leftSliceIndex = 0
# Determine the slice in which the right end of the rectangle is located
rightSliceIndex = bisect_left(self.heightSlices, Point(rectangleWidth, 0))
if rightSliceIndex < 0:
rightSliceIndex = ~rightSliceIndex
while rightSliceIndex <= len(self.heightSlices):
# Determine the highest slice within the slices covered by the
# rectangle at its current placement. We cannot put the rectangle
# any lower than this without overlapping the other rectangles.
highest = self.heightSlices[leftSliceIndex].y
for index in xrange(leftSliceIndex + 1, rightSliceIndex):
if self.heightSlices[index].y > highest:
highest = self.heightSlices[index].y
# Only process this position if it doesn't leave the packing area
if highest + rectangleHeight < self.packingAreaHeight:
score = highest
if score < bestScore:
bestSliceIndex = leftSliceIndex
bestSliceY = highest
bestScore = score
# Advance the starting slice to the next slice start
leftSliceIndex += 1
if leftSliceIndex >= len(self.heightSlices):
break
# Advance the ending slice until we're on the proper slice again,
# given the new starting position of the rectangle.
rightRectangleEnd = self.heightSlices[leftSliceIndex].x + rectangleWidth
while rightSliceIndex <= len(self.heightSlices):
if rightSliceIndex == len(self.heightSlices):
rightSliceStart = self.packingAreaWidth
else:
rightSliceStart = self.heightSlices[rightSliceIndex].x
# Is this the slice we're looking for?
if rightSliceStart > rightRectangleEnd:
break
rightSliceIndex += 1
# If we crossed the end of the slice array, the rectangle's right
# end has left the packing area, and thus, our search ends.
if rightSliceIndex > len(self.heightSlices):
break
# Return the best placement we found for this rectangle. If the
# rectangle didn't fit anywhere, the slice index will still have its
# initialization value of -1 and we can report that no placement
# could be found.
if bestSliceIndex == -1:
return None
else:
return Point(self.heightSlices[bestSliceIndex].x, bestSliceY)
def integrateRectangle(self, left, width, bottom):
"""Integrates a new rectangle into the height slice table
left: Position of the rectangle's left side
width: Width of the rectangle
bottom: Position of the rectangle's lower side"""
# Find the first slice that is touched by the rectangle
startSlice = bisect_left(self.heightSlices, Point(left, 0))
# Did we score a direct hit on an existing slice start?
if startSlice >= 0:
# We scored a direct hit, so we can replace the slice we have hit
firstSliceOriginalHeight = self.heightSlices[startSlice].y
self.heightSlices[startSlice] = Point(left, bottom)
else: # No direct hit, slice starts inside another slice
# Add a new slice after the slice in which we start
startSlice = ~startSlice
firstSliceOriginalHeight = self.heightSlices[startSlice - 1].y
self.heightSlices.insert(startSlice, Point(left, bottom))
right = left + width
startSlice += 1
# Special case, the rectangle started on the last slice, so we cannot
# use the start slice + 1 for the binary search and the possibly
# already modified start slice height now only remains in our temporary
# firstSliceOriginalHeight variable
if startSlice >= len(self.heightSlices):
# If the slice ends within the last slice (usual case, unless it
# has the exact same width the packing area has), add another slice
# to return to the original height at the end of the rectangle.
if right < self.packingAreaWidth:
self.heightSlices.append(Point(right, firstSliceOriginalHeight))
else: # The rectangle doesn't start on the last slice
endSlice = bisect_left(self.heightSlices, Point(right,0), \
startSlice, len(self.heightSlices))
# Another direct hit on the final slice's end?
if endSlice > 0:
del self.heightSlices[startSlice:endSlice]
else: # No direct hit, rectangle ends inside another slice
# Make index from negative bisect_left() result
endSlice = ~endSlice
# Find out to which height we need to return at the right end of
# the rectangle
if endSlice == startSlice:
returnHeight = firstSliceOriginalHeight
else:
returnHeight = self.heightSlices[endSlice - 1].y
# Remove all slices covered by the rectangle and begin a new
# slice at its end to return back to the height of the slice on
# which the rectangle ends.
del self.heightSlices[startSlice:endSlice]
if right < self.packingAreaWidth:
self.heightSlices.insert(startSlice, Point(right, returnHeight))
| gpl-3.0 | 530,481,519,200,735,170 | 44.282869 | 95 | 0.648188 | false |
dhelbegor/omelette | project_name/settings/base.py | 1 | 3526 | """
Django settings for {{ project_name }} project.
Generated by 'django-admin startproject' using Django {{ django_version }}.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
DEFAULT_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
]
LOCAL_APPS = [
'apps.core',
]
INSTALLED_APPS = DEFAULT_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'database.db'),
}
}
# Password validation
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| mit | -1,956,583,953,818,279,400 | 25.511278 | 93 | 0.681225 | false |
dahlstrom-g/intellij-community | python/testData/refactoring/move/optimizeImportsAfterMoveInvalidatesMembersToBeMoved/after/src/src.py | 22 | 2358 | # -*- coding: utf-8 -*-
# (c) 2017 Tuomas Airaksinen
#
# This file is part of Serviceform.
#
# Serviceform is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Serviceform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Serviceform. If not, see <http://www.gnu.org/licenses/>.
import datetime
import string
import logging
from enum import Enum
from typing import Tuple, Set, Optional, Sequence, Iterator, Iterable, TYPE_CHECKING
from colorful.fields import RGBColorField
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.db.models import Prefetch
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from guardian.shortcuts import get_users_with_perms
from select2 import fields as select2_fields
from serviceform.tasks.models import Task
from .. import emails, utils
from ..utils import ColorStr
from .mixins import CopyMixin
from .people import Participant, ResponsibilityPerson
from .email import EmailTemplate
from .participation import QuestionAnswer
if TYPE_CHECKING:
from .participation import ParticipationActivity, ParticipationActivityChoice
local_tz = timezone.get_default_timezone()
logger = logging.getLogger(__name__)
def imported_symbols_anchor():
print(RGBColorField, settings, GenericRelation, Prefetch, render_to_string, reverse, format_html,
get_users_with_perms, select2_fields, Task, emails, CopyMixin, Participant, ResponsibilityPerson,
EmailTemplate, QuestionAnswer, ParticipationActivity, ParticipationActivityChoice, datetime, Enum, string,
Tuple, Set, Optional, Sequence, Iterator, Iterable, _, cached_property, models, utils, ColorStr)
| apache-2.0 | -4,469,407,654,177,266,000 | 38.966102 | 116 | 0.790925 | false |
globau/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_treewalkers.py | 429 | 13692 | from __future__ import absolute_import, division, unicode_literals
import os
import sys
import unittest
import warnings
from difflib import unified_diff
try:
unittest.TestCase.assertEqual
except AttributeError:
unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
from .support import get_data_files, TestData, convertExpected
from html5lib import html5parser, treewalkers, treebuilders, constants
def PullDOMAdapter(node):
from xml.dom import Node
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, COMMENT, CHARACTERS
if node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise NotImplementedError("DOCTYPE nodes are not supported by PullDOM")
elif node.nodeType == Node.COMMENT_NODE:
yield COMMENT, node
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
yield CHARACTERS, node
elif node.nodeType == Node.ELEMENT_NODE:
yield START_ELEMENT, node
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
yield END_ELEMENT, node
else:
raise NotImplementedError("Node type not supported: " + str(node.nodeType))
treeTypes = {
"DOM": {"builder": treebuilders.getTreeBuilder("dom"),
"walker": treewalkers.getTreeWalker("dom")},
"PullDOM": {"builder": treebuilders.getTreeBuilder("dom"),
"adapter": PullDOMAdapter,
"walker": treewalkers.getTreeWalker("pulldom")},
}
# Try whatever etree implementations are available from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
pass
else:
treeTypes['ElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
pass
else:
treeTypes['cElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
try:
import lxml.etree as ElementTree # flake8: noqa
except ImportError:
pass
else:
treeTypes['lxml_native'] = \
{"builder": treebuilders.getTreeBuilder("lxml"),
"walker": treewalkers.getTreeWalker("lxml")}
try:
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
except ImportError:
pass
else:
def GenshiAdapter(tree):
text = None
for token in treewalkers.getTreeWalker("dom")(tree):
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
if text is None:
text = token["data"]
else:
text += token["data"]
elif text is not None:
yield TEXT, text, (None, -1, -1)
text = None
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text is not None:
yield TEXT, text, (None, -1, -1)
treeTypes["genshi"] = \
{"builder": treebuilders.getTreeBuilder("dom"),
"adapter": GenshiAdapter,
"walker": treewalkers.getTreeWalker("genshi")}
def concatenateCharacterTokens(tokens):
charactersToken = None
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
if charactersToken is None:
charactersToken = {"type": "Characters", "data": token["data"]}
else:
charactersToken["data"] += token["data"]
else:
if charactersToken is not None:
yield charactersToken
charactersToken = None
yield token
if charactersToken is not None:
yield charactersToken
def convertTokens(tokens):
output = []
indent = 0
for token in concatenateCharacterTokens(tokens):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
if (token["namespace"] and
token["namespace"] != constants.namespaces["html"]):
if token["namespace"] in constants.prefixes:
name = constants.prefixes[token["namespace"]]
else:
name = token["namespace"]
name += " " + token["name"]
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
attrs = token["data"]
if attrs:
# TODO: Remove this if statement, attrs should always exist
for (namespace, name), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
outputname = constants.prefixes[namespace]
else:
outputname = namespace
outputname += " " + name
else:
outputname = name
output.append("%s%s=\"%s\"" % (" " * indent, outputname, value))
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent, token["name"],
token["publicId"],
token["systemId"] and token["systemId"] or ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent, token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type in ("Characters", "SpaceCharacters"):
output.append("%s\"%s\"" % (" " * indent, token["data"]))
else:
pass # TODO: what to do with errors?
return "\n".join(output)
import re
attrlist = re.compile(r"^(\s+)\w+=.*(\n\1\w+=.*)+", re.M)
def sortattrs(x):
lines = x.group(0).split("\n")
lines.sort()
return "\n".join(lines)
class TokenTestCase(unittest.TestCase):
def test_all_tokens(self):
expected = [
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
{'data': 'a', 'type': 'Characters'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
{'data': 'b', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
{'data': 'c', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'}
]
for treeName, treeCls in treeTypes.items():
p = html5parser.HTMLParser(tree=treeCls["builder"])
document = p.parse("<html><head></head><body>a<div>b</div>c</body></html>")
document = treeCls.get("adapter", lambda x: x)(document)
output = treeCls["walker"](document)
for expectedToken, outputToken in zip(expected, output):
self.assertEqual(expectedToken, outputToken)
def runTreewalkerTest(innerHTML, input, expected, errors, treeClass):
warnings.resetwarnings()
warnings.simplefilter("error")
try:
p = html5parser.HTMLParser(tree=treeClass["builder"])
if innerHTML:
document = p.parseFragment(input, innerHTML)
else:
document = p.parse(input)
except constants.DataLossWarning:
# Ignore testcases we know we don't pass
return
document = treeClass.get("adapter", lambda x: x)(document)
try:
output = convertTokens(treeClass["walker"](document))
output = attrlist.sub(sortattrs, output)
expected = attrlist.sub(sortattrs, convertExpected(expected))
diff = "".join(unified_diff([line + "\n" for line in expected.splitlines()],
[line + "\n" for line in output.splitlines()],
"Expected", "Received"))
assert expected == output, "\n".join([
"", "Input:", input,
"", "Expected:", expected,
"", "Received:", output,
"", "Diff:", diff,
])
except NotImplementedError:
pass # Amnesty for those that confess...
def test_treewalker():
sys.stdout.write('Testing tree walkers ' + " ".join(list(treeTypes.keys())) + "\n")
for treeName, treeCls in treeTypes.items():
files = get_data_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace(".dat", "")
if testName in ("template",):
continue
tests = TestData(filename, "data")
for index, test in enumerate(tests):
(input, errors,
innerHTML, expected) = [test[key] for key in ("data", "errors",
"document-fragment",
"document")]
errors = errors.split("\n")
yield runTreewalkerTest, innerHTML, input, expected, errors, treeCls
def set_attribute_on_first_child(docfrag, name, value, treeName):
"""naively sets an attribute on the first child of the document
fragment passed in"""
setter = {'ElementTree': lambda d: d[0].set,
'DOM': lambda d: d.firstChild.setAttribute}
setter['cElementTree'] = setter['ElementTree']
try:
setter.get(treeName, setter['DOM'])(docfrag)(name, value)
except AttributeError:
setter['ElementTree'](docfrag)(name, value)
def runTreewalkerEditTest(intext, expected, attrs_to_add, tree):
"""tests what happens when we add attributes to the intext"""
treeName, treeClass = tree
parser = html5parser.HTMLParser(tree=treeClass["builder"])
document = parser.parseFragment(intext)
for nom, val in attrs_to_add:
set_attribute_on_first_child(document, nom, val, treeName)
document = treeClass.get("adapter", lambda x: x)(document)
output = convertTokens(treeClass["walker"](document))
output = attrlist.sub(sortattrs, output)
if not output in expected:
raise AssertionError("TreewalkerEditTest: %s\nExpected:\n%s\nReceived:\n%s" % (treeName, expected, output))
def test_treewalker_six_mix():
"""Str/Unicode mix. If str attrs added to tree"""
# On Python 2.x string literals are of type str. Unless, like this
# file, the programmer imports unicode_literals from __future__.
# In that case, string literals become objects of type unicode.
# This test simulates a Py2 user, modifying attributes on a document
# fragment but not using the u'' syntax nor importing unicode_literals
sm_tests = [
('<a href="http://example.com">Example</a>',
[(str('class'), str('test123'))],
'<a>\n class="test123"\n href="http://example.com"\n "Example"'),
('<link href="http://example.com/cow">',
[(str('rel'), str('alternate'))],
'<link>\n href="http://example.com/cow"\n rel="alternate"\n "Example"')
]
for tree in treeTypes.items():
for intext, attrs, expected in sm_tests:
yield runTreewalkerEditTest, intext, expected, attrs, tree
| mpl-2.0 | -5,193,673,637,854,518,000 | 37.787535 | 115 | 0.545428 | false |
lsinfo/odoo | addons/calendar/contacts.py | 389 | 1414 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class calendar_contacts(osv.osv):
_name = 'calendar.contacts'
_columns = {
'user_id': fields.many2one('res.users','Me'),
'partner_id': fields.many2one('res.partner','Employee',required=True, domain=[]),
'active':fields.boolean('active'),
}
_defaults = {
'user_id': lambda self, cr, uid, ctx: uid,
'active' : True,
} | agpl-3.0 | -2,749,465,891,768,291,000 | 39.428571 | 89 | 0.586987 | false |
TeamExodus/external_chromium_org | tools/memory_inspector/memory_inspector/data/serialization.py | 89 | 4465 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module handles the JSON de/serialization of the core classes.
This is needed for both long term storage (e.g., loading/storing traces to local
files) and for short term data exchange (AJAX with the HTML UI).
The rationale of these serializers is to store data in an efficient (i.e. avoid
to store redundant information) and intelligible (i.e. flatten the classes
hierarchy keeping only the meaningful bits) format.
"""
import json
from memory_inspector.classification import results
from memory_inspector.core import backends
from memory_inspector.core import memory_map
from memory_inspector.core import native_heap
from memory_inspector.core import stacktrace
from memory_inspector.core import symbol
class Encoder(json.JSONEncoder):
def default(self, obj): # pylint: disable=E0202
if isinstance(obj, memory_map.Map):
return [entry.__dict__ for entry in obj.entries]
if isinstance(obj, symbol.Symbols):
return obj.symbols
if isinstance(obj, (symbol.Symbol, symbol.SourceInfo)):
return obj.__dict__
if isinstance(obj, native_heap.NativeHeap):
# Just keep the list of (distinct) stack frames from the index. Encoding
# it as a JSON dictionary would be redundant.
return {'stack_frames': obj.stack_frames.values(),
'allocations': obj.allocations}
if isinstance(obj, native_heap.Allocation):
return obj.__dict__
if isinstance(obj, stacktrace.Stacktrace):
# Keep just absolute addrs of stack frames. The full frame details will be
# kept in (and rebuilt from) |native_heap.NativeHeap.stack_frames|. See
# NativeHeapDecoder below.
return [frame.address for frame in obj.frames]
if isinstance(obj, stacktrace.Frame):
# Strip out the symbol information from stack frames. Symbols are stored
# (and will be loaded) separately. Rationale: different heap snapshots can
# share the same symbol db. Serializing the symbol information for each
# stack frame for each heap snapshot is a waste.
return {'address': obj.address,
'exec_file_rel_path': obj.exec_file_rel_path,
'offset': obj.offset}
if isinstance(obj, (backends.DeviceStats, backends.ProcessStats)):
return obj.__dict__
if isinstance(obj, results.AggreatedResults):
return {'keys': obj.keys, 'buckets': obj.total}
if isinstance(obj, results.Bucket):
return {obj.rule.name : {'values': obj.values, 'children': obj.children}}
return json.JSONEncoder.default(self, obj)
class MmapDecoder(json.JSONDecoder):
def decode(self, json_str): # pylint: disable=W0221
d = super(MmapDecoder, self).decode(json_str)
mmap = memory_map.Map()
for entry_dict in d:
entry = memory_map.MapEntry(**entry_dict)
mmap.Add(entry)
return mmap
class SymbolsDecoder(json.JSONDecoder):
def decode(self, json_str): # pylint: disable=W0221
d = super(SymbolsDecoder, self).decode(json_str)
symbols = symbol.Symbols()
for sym_key, sym_dict in d.iteritems():
sym = symbol.Symbol(sym_dict['name'])
for source_info in sym_dict['source_info']:
sym.AddSourceLineInfo(**source_info)
symbols.symbols[sym_key] = sym
return symbols
class NativeHeapDecoder(json.JSONDecoder):
def decode(self, json_str): # pylint: disable=W0221
d = super(NativeHeapDecoder, self).decode(json_str)
nh = native_heap.NativeHeap()
# First load and rebuild the stack_frame index.
for frame_dict in d['stack_frames']:
frame = nh.GetStackFrame(frame_dict['address'])
frame.SetExecFileInfo(frame_dict['exec_file_rel_path'],
frame_dict['offset'])
# Then load backtraces (reusing stack frames from the index above).
for alloc_dict in d['allocations']:
stack_trace = stacktrace.Stacktrace()
for absolute_addr in alloc_dict['stack_trace']:
stack_trace.Add(nh.GetStackFrame(absolute_addr))
nh.Add(native_heap.Allocation(start=alloc_dict['start'],
size=alloc_dict['size'],
stack_trace=stack_trace,
flags=alloc_dict['flags'],
resident_size=alloc_dict['resident_size']))
return nh | bsd-3-clause | -3,681,701,840,853,906,000 | 38.522124 | 80 | 0.677268 | false |
ToonTownInfiniteRepo/ToontownInfinite | Panda3D-1.9.0/python/Lib/distutils/command/check.py | 98 | 5557 | """distutils.command.check
Implements the Distutils 'check' command.
"""
__revision__ = "$Id$"
from distutils.core import Command
from distutils.dist import PKG_INFO_ENCODING
from distutils.errors import DistutilsSetupError
try:
# docutils is installed
from docutils.utils import Reporter
from docutils.parsers.rst import Parser
from docutils import frontend
from docutils import nodes
from StringIO import StringIO
class SilentReporter(Reporter):
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
self.messages = []
Reporter.__init__(self, source, report_level, halt_level, stream,
debug, encoding, error_handler)
def system_message(self, level, message, *children, **kwargs):
self.messages.append((level, message, children, kwargs))
return nodes.system_message(message, level=level,
type=self.levels[level],
*children, **kwargs)
HAS_DOCUTILS = True
except ImportError:
# docutils is not installed
HAS_DOCUTILS = False
class check(Command):
"""This command checks the meta-data of the package.
"""
description = ("perform some checks on the package")
user_options = [('metadata', 'm', 'Verify meta-data'),
('restructuredtext', 'r',
('Checks if long string meta-data syntax '
'are reStructuredText-compliant')),
('strict', 's',
'Will exit with an error if a check fails')]
boolean_options = ['metadata', 'restructuredtext', 'strict']
def initialize_options(self):
"""Sets default values for options."""
self.restructuredtext = 0
self.metadata = 1
self.strict = 0
self._warnings = 0
def finalize_options(self):
pass
def warn(self, msg):
"""Counts the number of warnings that occurs."""
self._warnings += 1
return Command.warn(self, msg)
def run(self):
"""Runs the command."""
# perform the various tests
if self.metadata:
self.check_metadata()
if self.restructuredtext:
if HAS_DOCUTILS:
self.check_restructuredtext()
elif self.strict:
raise DistutilsSetupError('The docutils package is needed.')
# let's raise an error in strict mode, if we have at least
# one warning
if self.strict and self._warnings > 0:
raise DistutilsSetupError('Please correct your package.')
def check_metadata(self):
"""Ensures that all required elements of meta-data are supplied.
name, version, URL, (author and author_email) or
(maintainer and maintainer_email)).
Warns if any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: %s" % ', '.join(missing))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' must be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' must be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"must be supplied")
def check_restructuredtext(self):
"""Checks if the long string fields are reST-compliant."""
data = self.distribution.get_long_description()
if not isinstance(data, unicode):
data = data.decode(PKG_INFO_ENCODING)
for warning in self._check_rst_data(data):
line = warning[-1].get('line')
if line is None:
warning = warning[1]
else:
warning = '%s (line %s)' % (warning[1], line)
self.warn(warning)
def _check_rst_data(self, data):
"""Returns warnings when the provided data doesn't compile."""
source_path = StringIO()
parser = Parser()
settings = frontend.OptionParser().get_default_values()
settings.tab_width = 4
settings.pep_references = None
settings.rfc_references = None
reporter = SilentReporter(source_path,
settings.report_level,
settings.halt_level,
stream=settings.warning_stream,
debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
try:
parser.parse(data, document)
except AttributeError:
reporter.messages.append((-1, 'Could not finish the parsing.',
'', {}))
return reporter.messages
| mit | -5,648,363,922,742,182,000 | 36.295302 | 78 | 0.567572 | false |
thaim/ansible | lib/ansible/modules/network/fortios/fortios_system_replacemsg_icap.py | 13 | 10063 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_replacemsg_icap
short_description: Replacement messages in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system_replacemsg feature and icap category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_replacemsg_icap:
description:
- Replacement messages.
default: null
type: dict
suboptions:
buffer:
description:
- Message string.
type: str
format:
description:
- Format flag.
type: str
choices:
- none
- text
- html
- wml
header:
description:
- Header flag.
type: str
choices:
- none
- http
- 8bit
msg_type:
description:
- Message type.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Replacement messages.
fortios_system_replacemsg_icap:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_replacemsg_icap:
buffer: "<your_own_value>"
format: "none"
header: "none"
msg_type: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_replacemsg_icap_data(json):
option_list = ['buffer', 'format', 'header',
'msg_type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_replacemsg_icap(data, fos):
vdom = data['vdom']
state = data['state']
system_replacemsg_icap_data = data['system_replacemsg_icap']
filtered_data = underscore_to_hyphen(filter_system_replacemsg_icap_data(system_replacemsg_icap_data))
if state == "present":
return fos.set('system.replacemsg',
'icap',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system.replacemsg',
'icap',
mkey=filtered_data['msg-type'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system_replacemsg(data, fos):
if data['system_replacemsg_icap']:
resp = system_replacemsg_icap(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_replacemsg_icap": {
"required": False, "type": "dict", "default": None,
"options": {
"buffer": {"required": False, "type": "str"},
"format": {"required": False, "type": "str",
"choices": ["none", "text", "html",
"wml"]},
"header": {"required": False, "type": "str",
"choices": ["none", "http", "8bit"]},
"msg_type": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system_replacemsg(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system_replacemsg(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| mit | -1,622,123,057,668,313,900 | 28.684366 | 105 | 0.584219 | false |
mydongistiny/external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/layout_tests/port/browser_test_driver.py | 21 | 4523 | # Copyright (C) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.layout_tests.port import driver
import time
import shutil
class BrowserTestDriver(driver.Driver):
"""Object for running print preview test(s) using browser_tests."""
def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
"""Invokes the constructor of driver.Driver."""
super(BrowserTestDriver, self).__init__(port, worker_number, pixel_tests, no_timeout)
def start(self, pixel_tests, per_test_args, deadline):
"""Same as Driver.start() however, it has an extra step. It waits for
a path to a file to be used for stdin to be printed by the browser test.
If a path is found by the deadline test test will open the file and
assign it to the stdin of the process that is owned by this driver's
server process.
"""
# FIXME(ivandavid): Need to handle case where the layout test doesn't
# get a file name.
new_cmd_line = self.cmd_line(pixel_tests, per_test_args)
if not self._server_process or new_cmd_line != self._current_cmd_line:
self._start(pixel_tests, per_test_args)
self._run_post_start_tasks()
self._open_stdin_path(deadline)
# Gets the path of the directory that the file for stdin communication is
# in. Since the browser test cannot clean it up, the layout test framework
# will. Everything the browser test uses is stored in the same directory as
# the stdin file, so deleting that directory recursively will remove all the
# other temp data, like the printed pdf. This function assumes the correct
# file path is sent. It won't delete files with only one component to avoid
# accidentally deleting files like /tmp.
def _open_stdin_path(self, deadline, test=False):
# FIXME(ivandavid): Come up with a way to test & see what happens when
# the file can't be opened.
path, found = self._read_stdin_path(deadline)
if found:
if test == False:
self._server_process._proc.stdin = open(path, 'wb', 0)
def _read_stdin_path(self, deadline):
# return (stdin_path, bool)
block = self._read_block(deadline)
if block.stdin_path:
return (block.stdin_path, True)
return (None, False)
def cmd_line(self, pixel_tests, per_test_args):
"""Command line arguments to run the browser test."""
cmd = self._command_wrapper(self._port.get_option('wrapper'))
cmd.append(self._port._path_to_driver())
cmd.append('--gtest_filter=PrintPreviewPdfGeneratedBrowserTest.MANUAL_LayoutTestDriver')
cmd.append('--run-manual')
cmd.append('--single_process')
cmd.extend(per_test_args)
cmd.extend(self._port.get_option('additional_drt_flag', []))
return cmd
def stop(self):
if self._server_process:
self._server_process.write('QUIT')
super(BrowserTestDriver, self).stop(self._port.driver_stop_timeout())
| bsd-3-clause | -2,603,503,911,096,511,000 | 48.703297 | 96 | 0.696662 | false |
amyliu345/zulip | zerver/views/events_register.py | 10 | 2542 | from __future__ import absolute_import
from django.http import HttpRequest, HttpResponse
from typing import Text
from typing import Iterable, Optional, Sequence
from zerver.lib.actions import do_events_register
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import check_string, check_list, check_bool
from zerver.models import UserProfile
def _default_all_public_streams(user_profile, all_public_streams):
# type: (UserProfile, Optional[bool]) -> bool
if all_public_streams is not None:
return all_public_streams
else:
return user_profile.default_all_public_streams
def _default_narrow(user_profile, narrow):
# type: (UserProfile, Iterable[Sequence[Text]]) -> Iterable[Sequence[Text]]
default_stream = user_profile.default_events_register_stream
if not narrow and user_profile.default_events_register_stream is not None:
narrow = [['stream', default_stream.name]]
return narrow
# Does not need to be authenticated because it's called from rest_dispatch
@has_request_variables
def api_events_register(request, user_profile,
apply_markdown=REQ(default=False, validator=check_bool),
all_public_streams=REQ(default=None, validator=check_bool)):
# type: (HttpRequest, UserProfile, bool, Optional[bool]) -> HttpResponse
return events_register_backend(request, user_profile,
apply_markdown=apply_markdown,
all_public_streams=all_public_streams)
@has_request_variables
def events_register_backend(request, user_profile, apply_markdown=True,
all_public_streams=None,
event_types=REQ(validator=check_list(check_string), default=None),
narrow=REQ(validator=check_list(check_list(check_string, length=2)), default=[]),
queue_lifespan_secs=REQ(converter=int, default=0)):
# type: (HttpRequest, UserProfile, bool, Optional[bool], Optional[Iterable[str]], Iterable[Sequence[Text]], int) -> HttpResponse
all_public_streams = _default_all_public_streams(user_profile, all_public_streams)
narrow = _default_narrow(user_profile, narrow)
ret = do_events_register(user_profile, request.client, apply_markdown,
event_types, queue_lifespan_secs, all_public_streams,
narrow=narrow)
return json_success(ret)
| apache-2.0 | -6,723,230,971,731,152,000 | 48.843137 | 132 | 0.675846 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.