repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
epssy/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/admin_widgets/urls.py
|
150
|
from __future__ import absolute_import
from django.conf.urls import patterns, include
from . import widgetadmin
urlpatterns = patterns('',
(r'^', include(widgetadmin.site.urls)),
)
|
FrankBian/kuma
|
refs/heads/master
|
vendor/packages/sqlparse/sqlparse/filters.py
|
6
|
# -*- coding: utf-8 -*-
import re
from sqlparse.engine import grouping
from sqlparse import tokens as T
from sqlparse import sql
class Filter(object):
def process(self, *args):
raise NotImplementedError
class TokenFilter(Filter):
def process(self, stack, stream):
raise NotImplementedError
# FIXME: Should be removed
def rstrip(stream):
buff = []
for token in stream:
if token.is_whitespace() and '\n' in token.value:
# assuming there's only one \n in value
before, rest = token.value.split('\n', 1)
token.value = '\n%s' % rest
buff = []
yield token
elif token.is_whitespace():
buff.append(token)
elif token.is_group():
token.tokens = list(rstrip(token.tokens))
# process group and look if it starts with a nl
if token.tokens and token.tokens[0].is_whitespace():
before, rest = token.tokens[0].value.split('\n', 1)
token.tokens[0].value = '\n%s' % rest
buff = []
while buff:
yield buff.pop(0)
yield token
else:
while buff:
yield buff.pop(0)
yield token
# --------------------------
# token process
class _CaseFilter(TokenFilter):
ttype = None
def __init__(self, case=None):
if case is None:
case = 'upper'
assert case in ['lower', 'upper', 'capitalize']
self.convert = getattr(unicode, case)
def process(self, stack, stream):
for ttype, value in stream:
if ttype in self.ttype:
value = self.convert(value)
yield ttype, value
class KeywordCaseFilter(_CaseFilter):
ttype = T.Keyword
class IdentifierCaseFilter(_CaseFilter):
ttype = (T.Name, T.String.Symbol)
# ----------------------
# statement process
class StripCommentsFilter(Filter):
def _process(self, tlist):
idx = 0
clss = set([x.__class__ for x in tlist.tokens])
while grouping.Comment in clss:
token = tlist.token_next_by_instance(0, grouping.Comment)
tidx = tlist.token_index(token)
prev = tlist.token_prev(tidx, False)
next_ = tlist.token_next(tidx, False)
# Replace by whitespace if prev and next exist and if they're not
# whitespaces. This doesn't apply if prev or next is a paranthesis.
if (prev is not None and next_ is not None
and not prev.is_whitespace() and not next_.is_whitespace()
and not (prev.match(T.Punctuation, '(')
or next_.match(T.Punctuation, ')'))):
tlist.tokens[tidx] = grouping.Token(T.Whitespace, ' ')
else:
tlist.tokens.pop(tidx)
clss = set([x.__class__ for x in tlist.tokens])
def process(self, stack, stmt):
[self.process(stack, sgroup) for sgroup in stmt.get_sublists()]
self._process(stmt)
class StripWhitespaceFilter(Filter):
def _stripws(self, tlist):
func_name = '_stripws_%s' % tlist.__class__.__name__.lower()
func = getattr(self, func_name, self._stripws_default)
func(tlist)
def _stripws_default(self, tlist):
last_was_ws = False
for token in tlist.tokens:
if token.is_whitespace():
if last_was_ws:
token.value = ''
else:
token.value = ' '
last_was_ws = token.is_whitespace()
def _stripws_parenthesis(self, tlist):
if tlist.tokens[1].is_whitespace():
tlist.tokens.pop(1)
if tlist.tokens[-2].is_whitespace():
tlist.tokens.pop(-2)
self._stripws_default(tlist)
def process(self, stack, stmt):
[self.process(stack, sgroup) for sgroup in stmt.get_sublists()]
self._stripws(stmt)
if stmt.tokens[-1].is_whitespace():
stmt.tokens.pop(-1)
class ReindentFilter(Filter):
def __init__(self, width=2, char=' ', line_width=None):
self.width = width
self.char = char
self.indent = 0
self.offset = 0
self.line_width = line_width
self._curr_stmt = None
self._last_stmt = None
def _get_offset(self, token):
all_ = list(self._curr_stmt.flatten())
idx = all_.index(token)
raw = ''.join(unicode(x) for x in all_[:idx+1])
line = raw.splitlines()[-1]
# Now take current offset into account and return relative offset.
full_offset = len(line)-(len(self.char*(self.width*self.indent)))
return full_offset - self.offset
def nl(self):
# TODO: newline character should be configurable
ws = '\n'+(self.char*((self.indent*self.width)+self.offset))
return grouping.Token(T.Whitespace, ws)
def _split_kwds(self, tlist):
split_words = ('FROM', 'JOIN$', 'AND', 'OR',
'GROUP', 'ORDER', 'UNION', 'VALUES',
'SET')
idx = 0
token = tlist.token_next_match(idx, T.Keyword, split_words,
regex=True)
while token:
prev = tlist.token_prev(tlist.token_index(token), False)
offset = 1
if prev and prev.is_whitespace():
tlist.tokens.pop(tlist.token_index(prev))
offset += 1
if (prev
and isinstance(prev, sql.Comment)
and (str(prev).endswith('\n')
or str(prev).endswith('\r'))):
nl = tlist.token_next(token)
else:
nl = self.nl()
tlist.insert_before(token, nl)
token = tlist.token_next_match(tlist.token_index(nl)+offset,
T.Keyword, split_words, regex=True)
def _split_statements(self, tlist):
idx = 0
token = tlist.token_next_by_type(idx, (T.Keyword.DDL, T.Keyword.DML))
while token:
prev = tlist.token_prev(tlist.token_index(token), False)
if prev and prev.is_whitespace():
tlist.tokens.pop(tlist.token_index(prev))
# only break if it's not the first token
if prev:
nl = self.nl()
tlist.insert_before(token, nl)
token = tlist.token_next_by_type(tlist.token_index(token)+1,
(T.Keyword.DDL, T.Keyword.DML))
def _process(self, tlist):
func_name = '_process_%s' % tlist.__class__.__name__.lower()
func = getattr(self, func_name, self._process_default)
func(tlist)
def _process_where(self, tlist):
token = tlist.token_next_match(0, T.Keyword, 'WHERE')
tlist.insert_before(token, self.nl())
self.indent += 1
self._process_default(tlist)
self.indent -= 1
def _process_parenthesis(self, tlist):
first = tlist.token_next(0)
indented = False
if first and first.ttype in (T.Keyword.DML, T.Keyword.DDL):
self.indent += 1
tlist.tokens.insert(0, self.nl())
indented = True
num_offset = self._get_offset(tlist.token_next_match(0,
T.Punctuation, '('))
self.offset += num_offset
self._process_default(tlist, stmts=not indented)
if indented:
self.indent -= 1
self.offset -= num_offset
def _process_identifierlist(self, tlist):
identifiers = tlist.get_identifiers()
if len(identifiers) > 1:
first = list(identifiers[0].flatten())[0]
num_offset = self._get_offset(first)-len(first.value)
self.offset += num_offset
for token in identifiers[1:]:
tlist.insert_before(token, self.nl())
self.offset -= num_offset
self._process_default(tlist)
def _process_case(self, tlist):
cases = tlist.get_cases()
is_first = True
num_offset = None
case = tlist.tokens[0]
outer_offset = self._get_offset(case)-len(case.value)
self.offset += outer_offset
for cond, value in tlist.get_cases():
if is_first:
is_first = False
num_offset = self._get_offset(cond[0])-len(cond[0].value)
self.offset += num_offset
continue
if cond is None:
token = value[0]
else:
token = cond[0]
tlist.insert_before(token, self.nl())
# Line breaks on group level are done. Now let's add an offset of
# 5 (=length of "when", "then", "else") and process subgroups.
self.offset += 5
self._process_default(tlist)
self.offset -= 5
if num_offset is not None:
self.offset -= num_offset
end = tlist.token_next_match(0, T.Keyword, 'END')
tlist.insert_before(end, self.nl())
self.offset -= outer_offset
def _process_default(self, tlist, stmts=True, kwds=True):
if stmts:
self._split_statements(tlist)
if kwds:
self._split_kwds(tlist)
[self._process(sgroup) for sgroup in tlist.get_sublists()]
def process(self, stack, stmt):
if isinstance(stmt, grouping.Statement):
self._curr_stmt = stmt
self._process(stmt)
if isinstance(stmt, grouping.Statement):
if self._last_stmt is not None:
if self._last_stmt.to_unicode().endswith('\n'):
nl = '\n'
else:
nl = '\n\n'
stmt.tokens.insert(0,
grouping.Token(T.Whitespace, nl))
if self._last_stmt != stmt:
self._last_stmt = stmt
# FIXME: Doesn't work ;)
class RightMarginFilter(Filter):
keep_together = (
# grouping.TypeCast, grouping.Identifier, grouping.Alias,
)
def __init__(self, width=79):
self.width = width
self.line = ''
def _process(self, stack, group, stream):
for token in stream:
if token.is_whitespace() and '\n' in token.value:
if token.value.endswith('\n'):
self.line = ''
else:
self.line = token.value.splitlines()[-1]
elif (token.is_group()
and not token.__class__ in self.keep_together):
token.tokens = self._process(stack, token, token.tokens)
else:
val = token.to_unicode()
if len(self.line) + len(val) > self.width:
match = re.search('^ +', self.line)
if match is not None:
indent = match.group()
else:
indent = ''
yield grouping.Token(T.Whitespace, '\n%s' % indent)
self.line = indent
self.line += val
yield token
def process(self, stack, group):
return
group.tokens = self._process(stack, group, group.tokens)
# ---------------------------
# postprocess
class SerializerUnicode(Filter):
def process(self, stack, stmt):
raw = stmt.to_unicode()
add_nl = raw.endswith('\n')
res = '\n'.join(line.rstrip() for line in raw.splitlines())
if add_nl:
res += '\n'
return res
class OutputPythonFilter(Filter):
def __init__(self, varname='sql'):
self.varname = varname
self.cnt = 0
def _process(self, stream, varname, count, has_nl):
if count > 1:
yield grouping.Token(T.Whitespace, '\n')
yield grouping.Token(T.Name, varname)
yield grouping.Token(T.Whitespace, ' ')
yield grouping.Token(T.Operator, '=')
yield grouping.Token(T.Whitespace, ' ')
if has_nl:
yield grouping.Token(T.Operator, '(')
yield grouping.Token(T.Text, "'")
cnt = 0
for token in stream:
cnt += 1
if token.is_whitespace() and '\n' in token.value:
if cnt == 1:
continue
after_lb = token.value.split('\n', 1)[1]
yield grouping.Token(T.Text, " '")
yield grouping.Token(T.Whitespace, '\n')
for i in range(len(varname)+4):
yield grouping.Token(T.Whitespace, ' ')
yield grouping.Token(T.Text, "'")
if after_lb: # it's the indendation
yield grouping.Token(T.Whitespace, after_lb)
continue
elif token.value and "'" in token.value:
token.value = token.value.replace("'", "\\'")
yield grouping.Token(T.Text, token.value or '')
yield grouping.Token(T.Text, "'")
if has_nl:
yield grouping.Token(T.Operator, ')')
def process(self, stack, stmt):
self.cnt += 1
if self.cnt > 1:
varname = '%s%d' % (self.varname, self.cnt)
else:
varname = self.varname
has_nl = len(stmt.to_unicode().strip().splitlines()) > 1
stmt.tokens = self._process(stmt.tokens, varname, self.cnt, has_nl)
return stmt
class OutputPHPFilter(Filter):
def __init__(self, varname='sql'):
self.varname = '$%s' % varname
self.count = 0
def _process(self, stream, varname):
if self.count > 1:
yield grouping.Token(T.Whitespace, '\n')
yield grouping.Token(T.Name, varname)
yield grouping.Token(T.Whitespace, ' ')
yield grouping.Token(T.Operator, '=')
yield grouping.Token(T.Whitespace, ' ')
yield grouping.Token(T.Text, '"')
cnt = 0
for token in stream:
if token.is_whitespace() and '\n' in token.value:
# cnt += 1
# if cnt == 1:
# continue
after_lb = token.value.split('\n', 1)[1]
yield grouping.Token(T.Text, ' "')
yield grouping.Token(T.Operator, ';')
yield grouping.Token(T.Whitespace, '\n')
yield grouping.Token(T.Name, varname)
yield grouping.Token(T.Whitespace, ' ')
yield grouping.Token(T.Punctuation, '.')
yield grouping.Token(T.Operator, '=')
yield grouping.Token(T.Whitespace, ' ')
yield grouping.Token(T.Text, '"')
if after_lb:
yield grouping.Token(T.Text, after_lb)
continue
elif '"' in token.value:
token.value = token.value.replace('"', '\\"')
yield grouping.Token(T.Text, token.value)
yield grouping.Token(T.Text, '"')
yield grouping.Token(T.Punctuation, ';')
def process(self, stack, stmt):
self.count += 1
if self.count > 1:
varname = '%s%d' % (self.varname, self.count)
else:
varname = self.varname
stmt.tokens = tuple(self._process(stmt.tokens, varname))
return stmt
|
Jayflux/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/pytest/_pytest/assertion/rewrite.py
|
14
|
"""Rewrite assertion AST to produce nice error messages"""
from __future__ import absolute_import, division, print_function
import ast
import _ast
import errno
import itertools
import imp
import marshal
import os
import re
import six
import struct
import sys
import types
import py
from _pytest.assertion import util
# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
if hasattr(sys, "pypy_version_info"):
impl = "pypy"
elif sys.platform == "java":
impl = "jython"
else:
impl = "cpython"
ver = sys.version_info
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
if sys.version_info >= (3, 5):
ast_Call = ast.Call
else:
def ast_Call(a, b, c):
return ast.Call(a, b, c, None, None)
class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self, config):
self.config = config
self.fnpats = config.getini("python_files")
self.session = None
self.modules = {}
self._rewritten_names = set()
self._register_with_pkg_resources()
self._must_rewrite = set()
def set_session(self, session):
self.session = session
def find_module(self, name, path=None):
state = self.config._assertstate
state.trace("find_module called for: %s" % name)
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
if path is not None:
# Starting with Python 3.3, path is a _NamespacePath(), which
# causes problems if not converted to list.
path = list(path)
if len(path) == 1:
pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
except ImportError:
return None
if fd is not None:
fd.close()
tp = desc[2]
if tp == imp.PY_COMPILED:
if hasattr(imp, "source_from_cache"):
try:
fn = imp.source_from_cache(fn)
except ValueError:
# Python 3 doesn't like orphaned but still-importable
# .pyc files.
fn = fn[:-1]
else:
fn = fn[:-1]
elif tp != imp.PY_SOURCE:
# Don't know what this is.
return None
else:
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
fn_pypath = py.path.local(fn)
if not self._should_rewrite(name, fn_pypath, state):
return None
self._rewritten_names.add(name)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(fn_pypath.dirname, "__pycache__")
if write:
try:
os.mkdir(cache_dir)
except OSError:
e = sys.exc_info()[1].errno
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
state.trace("read only directory: %r" % fn_pypath.dirname)
write = False
else:
raise
cache_name = fn_pypath.basename[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc, state.trace)
if co is None:
state.trace("rewriting %r" % (fn,))
source_stat, co = _rewrite_test(self.config, fn_pypath)
if co is None:
# Probably a SyntaxError in the test.
return None
if write:
_make_rewritten_pyc(state, source_stat, pyc, co)
else:
state.trace("found cached rewritten pyc for %r" % (fn,))
self.modules[name] = co, pyc
return self
def _should_rewrite(self, name, fn_pypath, state):
# always rewrite conftest files
fn = str(fn_pypath)
if fn_pypath.basename == 'conftest.py':
state.trace("rewriting conftest file: %r" % (fn,))
return True
if self.session is not None:
if self.session.isinitpath(fn):
state.trace("matched test file (was specified on cmdline): %r" %
(fn,))
return True
# modules not passed explicitly on the command line are only
# rewritten if they match the naming convention for test files
for pat in self.fnpats:
if fn_pypath.fnmatch(pat):
state.trace("matched test file %r" % (fn,))
return True
for marked in self._must_rewrite:
if name == marked or name.startswith(marked + '.'):
state.trace("matched marked file %r (from %r)" % (name, marked))
return True
return False
def mark_rewrite(self, *names):
"""Mark import names as needing to be rewritten.
The named module or package as well as any nested modules will
be rewritten on import.
"""
already_imported = set(names).intersection(set(sys.modules))
if already_imported:
for name in already_imported:
if name not in self._rewritten_names:
self._warn_already_imported(name)
self._must_rewrite.update(names)
def _warn_already_imported(self, name):
self.config.warn(
'P1',
'Module already imported so cannot be rewritten: %s' % name)
def load_module(self, name):
# If there is an existing module object named 'fullname' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
if name in sys.modules:
return sys.modules[name]
co, pyc = self.modules.pop(name)
# I wish I could just call imp.load_compiled here, but __file__ has to
# be set properly. In Python 3.2+, this all would be handled correctly
# by load_compiled.
mod = sys.modules[name] = imp.new_module(name)
try:
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except: # noqa
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name]
def is_package(self, name):
try:
fd, fn, desc = imp.find_module(name)
except ImportError:
return False
if fd is not None:
fd.close()
tp = desc[2]
return tp == imp.PKG_DIRECTORY
@classmethod
def _register_with_pkg_resources(cls):
"""
Ensure package resources can be loaded from this loader. May be called
multiple times, as the operation is idempotent.
"""
try:
import pkg_resources
# access an attribute in case a deferred importer is present
pkg_resources.__name__
except ImportError:
return
# Since pytest tests are always located in the file system, the
# DefaultProvider is appropriate.
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, 'rb') as f:
return f.read()
def _write_pyc(state, co, source_stat, pyc):
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
# import. However, there's little reason deviate, and I hope
# sometime to be able to use imp.load_compiled to load them. (See
# the comment in load_module above.)
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
state.trace("error writing pyc file at %s: errno=%s" % (pyc, err))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, __pycache__ being a
# file etc.
return False
try:
fp.write(imp.get_magic())
mtime = int(source_stat.mtime)
size = source_stat.size & 0xFFFFFFFF
fp.write(struct.pack("<ll", mtime, size))
marshal.dump(co, fp)
finally:
fp.close()
return True
RN = "\r\n".encode("utf-8")
N = "\n".encode("utf-8")
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = '\xef\xbb\xbf'
def _rewrite_test(config, fn):
"""Try to read and rewrite *fn* and return the code object."""
state = config._assertstate
try:
stat = fn.stat()
source = fn.read("rb")
except EnvironmentError:
return None, None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (not source.startswith(BOM_UTF8) and
cookie_re.match(source[0:end1]) is None and
cookie_re.match(source[end1 + 1:end2]) is None):
if hasattr(state, "_indecode"):
# encodings imported us again, so don't rewrite.
return None, None
state._indecode = True
try:
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None, None
finally:
del state._indecode
try:
tree = ast.parse(source)
except SyntaxError:
# Let this pop up again in the real import.
state.trace("failed to parse: %r" % (fn,))
return None, None
rewrite_asserts(tree, fn, config)
try:
co = compile(tree, fn.strpath, "exec", dont_inherit=True)
except SyntaxError:
# It's possible that this error is from some bug in the
# assertion rewriting, but I don't know of a fast way to tell.
state.trace("failed to compile: %r" % (fn,))
return None, None
return stat, co
def _make_rewritten_pyc(state, source_stat, pyc, co):
"""Try to dump rewritten code to *pyc*."""
if sys.platform.startswith("win"):
# Windows grants exclusive access to open files and doesn't have atomic
# rename, so just write into the final file.
_write_pyc(state, co, source_stat, pyc)
else:
# When not on windows, assume rename is atomic. Dump the code object
# into a file specific to this process and atomically replace it.
proc_pyc = pyc + "." + str(os.getpid())
if _write_pyc(state, co, source_stat, proc_pyc):
os.rename(proc_pyc, pyc)
def _read_pyc(source, pyc, trace=lambda x: None):
"""Possibly read a pytest pyc containing rewritten code.
Return rewritten code if successful or None if not.
"""
try:
fp = open(pyc, "rb")
except IOError:
return None
with fp:
try:
mtime = int(source.mtime())
size = source.size()
data = fp.read(12)
except EnvironmentError as e:
trace('_read_pyc(%s): EnvironmentError %s' % (source, e))
return None
# Check for invalid or out of date pyc file.
if (len(data) != 12 or data[:4] != imp.get_magic() or
struct.unpack("<ll", data[4:]) != (mtime, size)):
trace('_read_pyc(%s): invalid or out of date pyc' % source)
return None
try:
co = marshal.load(fp)
except Exception as e:
trace('_read_pyc(%s): marshal.load error %s' % (source, e))
return None
if not isinstance(co, types.CodeType):
trace('_read_pyc(%s): not a code object' % source)
return None
return co
def rewrite_asserts(mod, module_path=None, config=None):
"""Rewrite the assert statements in mod."""
AssertionRewriter(module_path, config).run(mod)
def _saferepr(obj):
"""Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
Normally assertion.util.format_explanation() does this but for a
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs.
"""
repr = py.io.saferepr(obj)
if isinstance(repr, six.text_type):
t = six.text_type
else:
t = six.binary_type
return repr.replace(t("\n"), t("\\n"))
from _pytest.assertion.util import format_explanation as _format_explanation # noqa
def _format_assertmsg(obj):
"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects py.io.saferepr() is used first.
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
if isinstance(obj, six.text_type) or isinstance(obj, six.binary_type):
s = obj
is_repr = False
else:
s = py.io.saferepr(obj)
is_repr = True
if isinstance(s, six.text_type):
t = six.text_type
else:
t = six.binary_type
s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
if is_repr:
s = s.replace(t("\\n"), t("\n~"))
return s
def _should_repr_global_name(obj):
return not hasattr(obj, "__name__") and not callable(obj)
def _format_boolop(explanations, is_or):
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
if isinstance(explanation, six.text_type):
t = six.text_type
else:
t = six.binary_type
return explanation.replace(t('%'), t('%%'))
def _call_reprcompare(ops, results, expls, each_obj):
for i, res, expl in zip(range(len(ops)), results, expls):
try:
done = not res
except Exception:
done = True
if done:
break
if util._reprcompare is not None:
custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
if custom is not None:
return custom
return expl
unary_map = {
ast.Not: "not %s",
ast.Invert: "~%s",
ast.USub: "-%s",
ast.UAdd: "+%s"
}
binop_map = {
ast.BitOr: "|",
ast.BitXor: "^",
ast.BitAnd: "&",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Mod: "%%", # escaped for string formatting
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Pow: "**",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in"
}
# Python 3.5+ compatibility
try:
binop_map[ast.MatMult] = "@"
except AttributeError:
pass
# Python 3.4+ compatibility
if hasattr(ast, "NameConstant"):
_NameConstant = ast.NameConstant
else:
def _NameConstant(c):
return ast.Name(str(c), ast.Load())
def set_location(node, lineno, col_offset):
"""Set node location information recursively."""
def _fix(node, lineno, col_offset):
if "lineno" in node._attributes:
node.lineno = lineno
if "col_offset" in node._attributes:
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, lineno, col_offset)
return node
class AssertionRewriter(ast.NodeVisitor):
"""Assertion rewriting implementation.
The main entrypoint is to call .run() with an ast.Module instance,
this will then find all the assert statements and rewrite them to
provide intermediate values and a detailed assertion error. See
http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
for an overview of how this works.
The entry point here is .run() which will iterate over all the
statements in an ast.Module and for each ast.Assert statement it
finds call .visit() with it. Then .visit_Assert() takes over and
is responsible for creating new ast statements to replace the
original assert statement: it rewrites the test of an assertion
to provide intermediate values and replace it with an if statement
which raises an assertion error with a detailed explanation in
case the expression is false.
For this .visit_Assert() uses the visitor pattern to visit all the
AST nodes of the ast.Assert.test field, each visit call returning
an AST node and the corresponding explanation string. During this
state is kept in several instance attributes:
:statements: All the AST statements which will replace the assert
statement.
:variables: This is populated by .variable() with each variable
used by the statements so that they can all be set to None at
the end of the statements.
:variable_counter: Counter to create new unique variables needed
by statements. Variables are created using .variable() and
have the form of "@py_assert0".
:on_failure: The AST statements which will be executed if the
assertion test fails. This is the code which will construct
the failure message and raises the AssertionError.
:explanation_specifiers: A dict filled by .explanation_param()
with %-formatting placeholders and their corresponding
expressions to use in the building of an assertion message.
This is used by .pop_format_context() to build a message.
:stack: A stack of the explanation_specifiers dicts maintained by
.push_format_context() and .pop_format_context() which allows
to build another %-formatted string while already building one.
This state is reset on every new assert statement visited and used
by the other visitors.
"""
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("_pytest.assertion.rewrite", "@pytest_ar")]
doc = getattr(mod, "docstring", None)
expect_docstring = doc is None
if doc is not None and self.is_rewrite_disabled(doc):
return
pos = 0
lineno = 1
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if self.is_rewrite_disabled(doc):
return
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
else:
lineno = item.lineno
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def is_rewrite_disabled(self, docstring):
return "PYTEST_DONT_REWRITE" in docstring
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@pytest_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
"""Return a new named %-formatting placeholder for expr.
This creates a %-formatting placeholder for expr in the
current formatting context, e.g. ``%(py0)s``. The placeholder
and expr are placed in the current format context so that it
can be used on the next call to .pop_format_context().
"""
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
"""Create a new formatting context.
The format context is used for when an explanation wants to
have a variable value formatted in the assertion message. In
this case the value required can be added using
.explanation_param(). Finally .pop_format_context() is used
to format a string of %-formatted values as added by
.explanation_param().
"""
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
"""Format the %-formatted string with current format context.
The expl_expr should be an ast.Str instance constructed from
the %-placeholders created by .explanation_param(). This will
add the required code to format said string to .on_failure and
return the ast.Name instance of the formatted string.
"""
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This rewrites the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: # **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
tmikov/jscomp
|
refs/heads/develop
|
runtime/deps/gyp/test/win/gyptest-link-embed-manifest.py
|
244
|
#!/usr/bin/env python
# Copyright (c) 2013 Yandex LLC. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure manifests are embedded in binaries properly. Handling of
AdditionalManifestFiles is tested too.
"""
import TestGyp
import sys
if sys.platform == 'win32':
import pywintypes
import win32api
import winerror
RT_MANIFEST = 24
class LoadLibrary(object):
"""Context manager for loading and releasing binaries in Windows.
Yields the handle of the binary loaded."""
def __init__(self, path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = win32api.LoadLibrary(self._path)
return self._handle
def __exit__(self, type, value, traceback):
win32api.FreeLibrary(self._handle)
def extract_manifest(path, resource_name):
"""Reads manifest from |path| and returns it as a string.
Returns None is there is no such manifest."""
with LoadLibrary(path) as handle:
try:
return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
except pywintypes.error as error:
if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
return None
else:
raise
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('embed-manifest.gyp', chdir=CHDIR)
test.build('embed-manifest.gyp', test.ALL, chdir=CHDIR)
# The following binaries must contain a manifest embedded.
test.fail_test(not extract_manifest(test.built_file_path(
'test_manifest_exe.exe', chdir=CHDIR), 1))
test.fail_test(not extract_manifest(test.built_file_path(
'test_manifest_exe_inc.exe', chdir=CHDIR), 1))
test.fail_test(not extract_manifest(test.built_file_path(
'test_manifest_dll.dll', chdir=CHDIR), 2))
test.fail_test(not extract_manifest(test.built_file_path(
'test_manifest_dll_inc.dll', chdir=CHDIR), 2))
# Must contain the Win7 support GUID, but not the Vista one (from
# extra2.manifest).
test.fail_test(
'35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in
extract_manifest(test.built_file_path('test_manifest_extra1.exe',
chdir=CHDIR), 1))
test.fail_test(
'e2011457-1546-43c5-a5fe-008deee3d3f0' in
extract_manifest(test.built_file_path('test_manifest_extra1.exe',
chdir=CHDIR), 1))
# Must contain both.
test.fail_test(
'35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in
extract_manifest(test.built_file_path('test_manifest_extra2.exe',
chdir=CHDIR), 1))
test.fail_test(
'e2011457-1546-43c5-a5fe-008deee3d3f0' not in
extract_manifest(test.built_file_path('test_manifest_extra2.exe',
chdir=CHDIR), 1))
# Same as extra2, but using list syntax instead.
test.fail_test(
'35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in
extract_manifest(test.built_file_path('test_manifest_extra_list.exe',
chdir=CHDIR), 1))
test.fail_test(
'e2011457-1546-43c5-a5fe-008deee3d3f0' not in
extract_manifest(test.built_file_path('test_manifest_extra_list.exe',
chdir=CHDIR), 1))
# Test that incremental linking doesn't force manifest embedding.
test.fail_test(extract_manifest(test.built_file_path(
'test_manifest_exe_inc_no_embed.exe', chdir=CHDIR), 1))
test.pass_test()
|
sy0302/lammps_qtb
|
refs/heads/master
|
tools/moltemplate/src/nbody_alternate_symmetry/gaff_imp.py
|
19
|
from nbody_graph_search import Ugraph
# This file defines how improper interactions are generated in AMBER (GAFF).
# To use it, add "(gaff_imp.py)" to the name of the "Data Impropers By Type"
# section, and make sure this file is located in the "common" directory.
# For example:
# write_once("Data Impropers By Type (gaff_imp.py)") {
# ...
# }
# To find 4-body "improper" interactions,
# (by default, most of the time), we would use this subgraph:
# 0
# * 1st bond connects atoms 2 and 0
# | => 2nd bond connects atoms 2 and 1
# _.*._ 3rd bond connects atoms 2 and 3
# *' 2 `*
# 1 3
#
# In AMBER/GAFF, the central atom is the third atom ("2").
# http://archive.ambermd.org/201307/0519.html
# This differs from other force-fields.
# We take this detail into account in the line below:
bond_pattern = Ugraph([(2,0), (2,1), (2,3)])
# As with other force-fields, the improper-angle is the angle between the planes
# defined by the first three atoms (0,1,2) and last three atoms (1,2,3).
# (This is implemented in LAMMPS using an improper_style which requires
# that the atoms in the interaction will be listed in this order: 0,1,2,3.)
def canonical_order(match):
"""
Before defining a new interaction, we must check to see if an
interaction between these same 4 atoms has already been created
(perhaps listed in a different, but equivalent order).
If we don't check for this this, we will create many unnecessary redundant
interactions (which can slow down he simulation).
To avoid this, I define a "canonical_order" function which sorts the atoms
and bonds in a way which is consistent with the symmetry of the interaction
being generated... Later the re-ordered list of atom and bond ids will be
tested against the list of atom/bond ids in the matches-found-so-far,
before it is added to the list of interactions found so far. Note that
the energy of an improper interactions is a function of the improper angle.
The improper-angle is usually defined as the angle between planes formed
by atoms 0,1,2 & 1,2,3. (Alternately, it is sometimes defined as the
angle between the 0,1,2 plane and atom 3.)
This angle does not change when swapping the OUTER pair of atoms (0 and 3)
(except for a change of sign, which does not matter since the energy functions
used are typically sign invariant. Furthermore, neither of OUTER pair of atoms
are the central atom. There are 3!=6 ways of ordering the remaining 3 atoms.)
Consequently it does not make sense to define a separate 4-body improper-
interaction between atoms 0,1,2,3 AS WELL AS between 3,1,2,0.
So we sort the atoms and bonds so that the first atom has a always has
a lower atomID than the last atom. (Later we will check to see if we
have already defined an interaction between these 4 atoms. If not then
we create a new one.)
"""
atom0 = match[0][0]
atom1 = match[0][1]
atom2 = match[0][2]
atom3 = match[0][3]
# match[1][0:2] contains the ID numbers for the 3 bonds
bond0 = match[1][0]
bond1 = match[1][1]
bond2 = match[1][2]
if atom0 <= atom3:
#return ((atom0,atom1,atom2,atom3), (bond0, bond1, bond2))
# But this is the same thing as:
return match
else:
return ((atom3,atom1,atom2,atom0), (bond2,bond1,bond0))
|
lgiordani/punch
|
refs/heads/master
|
punch/vcs_use_cases/release.py
|
1
|
from __future__ import print_function, absolute_import, division
from punch.vcs_use_cases import use_case
class VCSReleaseUseCase(use_case.VCSUseCase):
pass
|
mistio/libcloud
|
refs/heads/trunk
|
contrib/trigger_rtd_build.py
|
6
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
# Old deprecated API
url = 'https://readthedocs.org/build/8284/'
r = requests.post(url)
print(r.text)
# New API (which doesn't apear to be working)
token = os.environ['RTD_TOKEN']
url = 'https://readthedocs.org/api/v2/webhook/libcloud/87656/'
r = requests.post(url, data={'token': token, 'branches': 'trunk'})
print(r.text)
|
p4datasystems/CarnotKE
|
refs/heads/master
|
jyhton/Lib/test/test_zlib_jy.py
|
23
|
"""Misc zlib tests
Made for Jython.
"""
import unittest
import zlib
from array import array
from test import test_support
class ArrayTestCase(unittest.TestCase):
def test_array(self):
self._test_array(zlib.compress, zlib.decompress)
def test_array_compressobj(self):
def compress(value):
co = zlib.compressobj()
return co.compress(value) + co.flush()
def decompress(value):
dco = zlib.decompressobj()
return dco.decompress(value) + dco.flush()
self._test_array(compress, decompress)
def _test_array(self, compress, decompress):
self.assertEqual(compress(array('c', 'jython')), compress('jython'))
intarray = array('i', range(5))
self.assertEqual(compress(intarray), compress(intarray.tostring()))
compressed = array('c', compress('jython'))
self.assertEqual('jython', decompress(compressed))
def test_main():
test_support.run_unittest(ArrayTestCase)
if __name__ == '__main__':
test_main()
|
christianurich/VIBe2UrbanSim
|
refs/heads/master
|
3rdparty/opus/src/opus_core/hierarchical_linear_utilities.py
|
2
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from numpy import ones, zeros, where, compress
from opus_core.linear_utilities import linear_utilities
class hierarchical_linear_utilities(linear_utilities):
"""
Class for computing linear utilities in hierarchical models with multiple nests.
"""
def run(self, data, coefficients, resources=None):
"""
'data' is a 4D numpy array (nobservations x nequations x ncoefficients x number of nests)
and it can be created by InteractionDataset.create_logit_data(...).
'coefficients' is either a 1D array (ncoefficients + number of nests) used
for estimating, or a 3D array (nequations x nvariables x number of nests) used for simulating.
In the former case, the additional elements (number of nests) contain additional parameters
(not used in the utility computation, such as scaling parameters). In the latter case,
the additional parameters are extracted from the array. They correspond to variables called '__logsum'.
The method returns a tuple (u, mu) where u is
a 2D array of utilities (nobservations x number of elemental alternatives).
mu is an array of additional parameters from the coefficient arrays not used in the utility
computation.
The method calls its parent's run method (class lineear_utilities) for each nest.
The class can be paired with the probabilities class opus_core.nl_probabilities.
"""
nobs, nalts, nvars, M = data.shape
result = zeros((nobs, nalts))
addpar = zeros(M)
if coefficients.ndim > 2:
coef_object = resources.get('specified_coefficients', None)
for nest in range(M):
d=data[:,:,:,nest]
if coefficients.ndim == 1:
coef = coefficients[0:nvars]
addpar[nest] = coefficients[nvars+nest]
elif coefficients.ndim == 3:
idx_logsum = where(array(coef_object.get_variable_names()) == '__logsum')[0]
coef = coefficients[:,:,nest]
filter = ones(coef.shape[1], dtype='bool8')
filter[idx_logsum]= False
coef = coef.compress(filter, axis=1)
d = d.compress(filter, axis=2)
addpar[nest] = coefficients[:,idx_logsum, nest].sum()
else:
raise StandardError, "Coefficients have wrong dimension."
u = linear_utilities.run(self, d, coef, resources)
result = result+u
return (result, addpar)
from opus_core.tests import opus_unittest
from numpy import array, repeat, reshape
from numpy import ma
class HierarchicalLinearUtilitiesTests(opus_unittest.OpusTestCase):
def test_hierarchical_linear_utilities_coef_1D(self):
data = array([[[[3,0], [5,0], [6,0], [5,0]], [[2,0], [1,0], [0,0], [0,0]], [[7,0], [2,0], [3,0], [5,0]]] + \
[[[0,3], [0,5], [0,6], [0,5]], [[0,2], [0,1], [0,0], [0,0]], [[0,7], [0,2], [0,3], [0,5]]],
[[[5,0], [1,0], [5,0], [2,0]], [[4,0], [7,0], [9,0], [2,0]], [[7,0], [2,0], [3,0], [5,0]]] + \
[[[0,5], [0,1], [0,5], [0,2]], [[0,4], [0,7], [0,9], [0,2]], [[0,7], [0,2], [0,1], [0,3]]]])
#data = repeat(reshape(data, list(data.shape)+[1]), repeats=2, axis=3)
coefficients = array([2.5, 1.2, 4, 9, 0, 1])
utilities, mu = hierarchical_linear_utilities().run(data, coefficients)
should_be1 = array([[ 82.5, 6.2, 76.9, 82.5, 6.2, 76.9],
[ 51.7, 72.4, 76.9, 51.7, 72.4, 50.9]])
should_be2 = (array([0, 1]))
self.assertEqual(ma.allclose(utilities, should_be1, rtol=1e-05),
True, msg = "Error in test_hierarchical_linear_utilities_2d_tree_structure (1)")
self.assertEqual(mu.size == should_be2.size,
True, msg = "Error in test_hierarchical_linear_utilities_2d_tree_structure (2)")
self.assertEqual(ma.allclose(mu, should_be2, rtol=1e-05),
True, msg = "Error in test_hierarchical_linear_utilities_2d_tree_structure (3)")
if __name__ == '__main__':
opus_unittest.main()
|
pydata/xarray
|
refs/heads/main
|
xarray/plot/plot.py
|
1
|
"""
Use this module directly:
import xarray.plot as xplt
Or use the methods on a DataArray or Dataset:
DataArray.plot._____
Dataset.plot._____
"""
import functools
import numpy as np
import pandas as pd
from .facetgrid import _easy_facetgrid
from .utils import (
_add_colorbar,
_assert_valid_xy,
_ensure_plottable,
_infer_interval_breaks,
_infer_xy_labels,
_process_cmap_cbar_kwargs,
_rescale_imshow_rgb,
_resolve_intervals_1dplot,
_resolve_intervals_2dplot,
_update_axes,
get_axis,
import_matplotlib_pyplot,
label_from_attrs,
)
def _infer_line_data(darray, x, y, hue):
ndims = len(darray.dims)
if x is not None and y is not None:
raise ValueError("Cannot specify both x and y kwargs for line plots.")
if x is not None:
_assert_valid_xy(darray, x, "x")
if y is not None:
_assert_valid_xy(darray, y, "y")
if ndims == 1:
huename = None
hueplt = None
huelabel = ""
if x is not None:
xplt = darray[x]
yplt = darray
elif y is not None:
xplt = darray
yplt = darray[y]
else: # Both x & y are None
dim = darray.dims[0]
xplt = darray[dim]
yplt = darray
else:
if x is None and y is None and hue is None:
raise ValueError("For 2D inputs, please specify either hue, x or y.")
if y is None:
if hue is not None:
_assert_valid_xy(darray, hue, "hue")
xname, huename = _infer_xy_labels(darray=darray, x=x, y=hue)
xplt = darray[xname]
if xplt.ndim > 1:
if huename in darray.dims:
otherindex = 1 if darray.dims.index(huename) == 0 else 0
otherdim = darray.dims[otherindex]
yplt = darray.transpose(otherdim, huename, transpose_coords=False)
xplt = xplt.transpose(otherdim, huename, transpose_coords=False)
else:
raise ValueError(
"For 2D inputs, hue must be a dimension"
" i.e. one of " + repr(darray.dims)
)
else:
(xdim,) = darray[xname].dims
(huedim,) = darray[huename].dims
yplt = darray.transpose(xdim, huedim)
else:
yname, huename = _infer_xy_labels(darray=darray, x=y, y=hue)
yplt = darray[yname]
if yplt.ndim > 1:
if huename in darray.dims:
otherindex = 1 if darray.dims.index(huename) == 0 else 0
otherdim = darray.dims[otherindex]
xplt = darray.transpose(otherdim, huename, transpose_coords=False)
yplt = yplt.transpose(otherdim, huename, transpose_coords=False)
else:
raise ValueError(
"For 2D inputs, hue must be a dimension"
" i.e. one of " + repr(darray.dims)
)
else:
(ydim,) = darray[yname].dims
(huedim,) = darray[huename].dims
xplt = darray.transpose(ydim, huedim)
huelabel = label_from_attrs(darray[huename])
hueplt = darray[huename]
return xplt, yplt, hueplt, huelabel
def plot(
darray,
row=None,
col=None,
col_wrap=None,
ax=None,
hue=None,
rtol=0.01,
subplot_kws=None,
**kwargs,
):
"""
Default plot of DataArray using :py:mod:`matplotlib:matplotlib.pyplot`.
Calls xarray plotting function based on the dimensions of
the squeezed DataArray.
=============== ===========================
Dimensions Plotting function
=============== ===========================
1 :py:func:`xarray.plot.line`
2 :py:func:`xarray.plot.pcolormesh`
Anything else :py:func:`xarray.plot.hist`
=============== ===========================
Parameters
----------
darray : DataArray
row : str, optional
If passed, make row faceted plots on this dimension name.
col : str, optional
If passed, make column faceted plots on this dimension name.
hue : str, optional
If passed, make faceted line plots with hue on this dimension name.
col_wrap : int, optional
Use together with ``col`` to wrap faceted plots.
ax : matplotlib axes object, optional
If ``None``, use the current axes. Not applicable when using facets.
rtol : float, optional
Relative tolerance used to determine if the indexes
are uniformly spaced. Usually a small positive number.
subplot_kws : dict, optional
Dictionary of keyword arguments for Matplotlib subplots
(see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`).
**kwargs : optional
Additional keyword arguments for Matplotlib.
See Also
--------
xarray.DataArray.squeeze
"""
darray = darray.squeeze().compute()
plot_dims = set(darray.dims)
plot_dims.discard(row)
plot_dims.discard(col)
plot_dims.discard(hue)
ndims = len(plot_dims)
error_msg = (
"Only 1d and 2d plots are supported for facets in xarray. "
"See the package `Seaborn` for more options."
)
if ndims in [1, 2]:
if row or col:
kwargs["subplot_kws"] = subplot_kws
kwargs["row"] = row
kwargs["col"] = col
kwargs["col_wrap"] = col_wrap
if ndims == 1:
plotfunc = line
kwargs["hue"] = hue
elif ndims == 2:
if hue:
plotfunc = line
kwargs["hue"] = hue
else:
plotfunc = pcolormesh
kwargs["subplot_kws"] = subplot_kws
else:
if row or col or hue:
raise ValueError(error_msg)
plotfunc = hist
kwargs["ax"] = ax
return plotfunc(darray, **kwargs)
# This function signature should not change so that it can use
# matplotlib format strings
def line(
darray,
*args,
row=None,
col=None,
figsize=None,
aspect=None,
size=None,
ax=None,
hue=None,
x=None,
y=None,
xincrease=None,
yincrease=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
add_legend=True,
_labels=True,
**kwargs,
):
"""
Line plot of DataArray values.
Wraps :py:func:`matplotlib:matplotlib.pyplot.plot`.
Parameters
----------
darray : DataArray
Either 1D or 2D. If 2D, one of ``hue``, ``x`` or ``y`` must be provided.
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the *width* in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size:
*height* (in inches) of each plot. See also: ``aspect``.
ax : matplotlib axes object, optional
Axes on which to plot. By default, the current is used.
Mutually exclusive with ``size`` and ``figsize``.
hue : str, optional
Dimension or coordinate for which you want multiple lines plotted.
If plotting against a 2D coordinate, ``hue`` must be a dimension.
x, y : str, optional
Dimension, coordinate or multi-index level for *x*, *y* axis.
Only one of these may be specified.
The other will be used for values from the DataArray on which this
plot method is called.
xscale, yscale : {'linear', 'symlog', 'log', 'logit'}, optional
Specifies scaling for the *x*- and *y*-axis, respectively.
xticks, yticks : array-like, optional
Specify tick locations for *x*- and *y*-axis.
xlim, ylim : array-like, optional
Specify *x*- and *y*-axis limits.
xincrease : None, True, or False, optional
Should the values on the *x* axis be increasing from left to right?
if ``None``, use the default for the Matplotlib function.
yincrease : None, True, or False, optional
Should the values on the *y* axis be increasing from top to bottom?
if ``None``, use the default for the Matplotlib function.
add_legend : bool, optional
Add legend with *y* axis coordinates (2D inputs only).
*args, **kwargs : optional
Additional arguments to :py:func:`matplotlib:matplotlib.pyplot.plot`.
"""
# Handle facetgrids first
if row or col:
allargs = locals().copy()
allargs.update(allargs.pop("kwargs"))
allargs.pop("darray")
return _easy_facetgrid(darray, line, kind="line", **allargs)
ndims = len(darray.dims)
if ndims > 2:
raise ValueError(
"Line plots are for 1- or 2-dimensional DataArrays. "
"Passed DataArray has {ndims} "
"dimensions".format(ndims=ndims)
)
# The allargs dict passed to _easy_facetgrid above contains args
if args == ():
args = kwargs.pop("args", ())
else:
assert "args" not in kwargs
ax = get_axis(figsize, size, aspect, ax)
xplt, yplt, hueplt, hue_label = _infer_line_data(darray, x, y, hue)
# Remove pd.Intervals if contained in xplt.values and/or yplt.values.
xplt_val, yplt_val, x_suffix, y_suffix, kwargs = _resolve_intervals_1dplot(
xplt.values, yplt.values, kwargs
)
xlabel = label_from_attrs(xplt, extra=x_suffix)
ylabel = label_from_attrs(yplt, extra=y_suffix)
_ensure_plottable(xplt_val, yplt_val)
primitive = ax.plot(xplt_val, yplt_val, *args, **kwargs)
if _labels:
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.set_title(darray._title_for_slice())
if darray.ndim == 2 and add_legend:
ax.legend(handles=primitive, labels=list(hueplt.values), title=hue_label)
# Rotate dates on xlabels
# Do this without calling autofmt_xdate so that x-axes ticks
# on other subplots (if any) are not deleted.
# https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots
if np.issubdtype(xplt.dtype, np.datetime64):
for xlabels in ax.get_xticklabels():
xlabels.set_rotation(30)
xlabels.set_ha("right")
_update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim)
return primitive
def step(darray, *args, where="pre", drawstyle=None, ds=None, **kwargs):
"""
Step plot of DataArray values.
Similar to :py:func:`matplotlib:matplotlib.pyplot.step`.
Parameters
----------
where : {'pre', 'post', 'mid'}, default: 'pre'
Define where the steps should be placed:
- ``'pre'``: The y value is continued constantly to the left from
every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the
value ``y[i]``.
- ``'post'``: The y value is continued constantly to the right from
every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the
value ``y[i]``.
- ``'mid'``: Steps occur half-way between the *x* positions.
Note that this parameter is ignored if one coordinate consists of
:py:class:`pandas.Interval` values, e.g. as a result of
:py:func:`xarray.Dataset.groupby_bins`. In this case, the actual
boundaries of the interval are used.
*args, **kwargs : optional
Additional arguments for :py:func:`xarray.plot.line`.
"""
if where not in {"pre", "post", "mid"}:
raise ValueError("'where' argument to step must be 'pre', 'post' or 'mid'")
if ds is not None:
if drawstyle is None:
drawstyle = ds
else:
raise TypeError("ds and drawstyle are mutually exclusive")
if drawstyle is None:
drawstyle = ""
drawstyle = "steps-" + where + drawstyle
return line(darray, *args, drawstyle=drawstyle, **kwargs)
def hist(
darray,
figsize=None,
size=None,
aspect=None,
ax=None,
xincrease=None,
yincrease=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
"""
Histogram of DataArray.
Wraps :py:func:`matplotlib:matplotlib.pyplot.hist`.
Plots *N*-dimensional arrays by first flattening the array.
Parameters
----------
darray : DataArray
Can have any number of dimensions.
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the *width* in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size:
*height* (in inches) of each plot. See also: ``aspect``.
ax : matplotlib axes object, optional
Axes on which to plot. By default, use the current axes.
Mutually exclusive with ``size`` and ``figsize``.
**kwargs : optional
Additional keyword arguments to :py:func:`matplotlib:matplotlib.pyplot.hist`.
"""
ax = get_axis(figsize, size, aspect, ax)
no_nan = np.ravel(darray.values)
no_nan = no_nan[pd.notnull(no_nan)]
primitive = ax.hist(no_nan, **kwargs)
ax.set_title("Histogram")
ax.set_xlabel(label_from_attrs(darray))
_update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim)
return primitive
# MUST run before any 2d plotting functions are defined since
# _plot2d decorator adds them as methods here.
class _PlotMethods:
"""
Enables use of xarray.plot functions as attributes on a DataArray.
For example, DataArray.plot.imshow
"""
__slots__ = ("_da",)
def __init__(self, darray):
self._da = darray
def __call__(self, **kwargs):
return plot(self._da, **kwargs)
# we can't use functools.wraps here since that also modifies the name / qualname
__doc__ = __call__.__doc__ = plot.__doc__
__call__.__wrapped__ = plot # type: ignore[attr-defined]
__call__.__annotations__ = plot.__annotations__
@functools.wraps(hist)
def hist(self, ax=None, **kwargs):
return hist(self._da, ax=ax, **kwargs)
@functools.wraps(line)
def line(self, *args, **kwargs):
return line(self._da, *args, **kwargs)
@functools.wraps(step)
def step(self, *args, **kwargs):
return step(self._da, *args, **kwargs)
def override_signature(f):
def wrapper(func):
func.__wrapped__ = f
return func
return wrapper
def _plot2d(plotfunc):
"""
Decorator for common 2d plotting logic
Also adds the 2d plot method to class _PlotMethods
"""
commondoc = """
Parameters
----------
darray : DataArray
Must be two-dimensional, unless creating faceted plots.
x : str, optional
Coordinate for *x* axis. If ``None``, use ``darray.dims[1]``.
y : str, optional
Coordinate for *y* axis. If ``None``, use ``darray.dims[0]``.
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the *width* in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size:
*height* (in inches) of each plot. See also: ``aspect``.
ax : matplotlib axes object, optional
Axes on which to plot. By default, use the current axes.
Mutually exclusive with ``size`` and ``figsize``.
row : string, optional
If passed, make row faceted plots on this dimension name.
col : string, optional
If passed, make column faceted plots on this dimension name.
col_wrap : int, optional
Use together with ``col`` to wrap faceted plots.
xscale, yscale : {'linear', 'symlog', 'log', 'logit'}, optional
Specifies scaling for the *x*- and *y*-axis, respectively.
xticks, yticks : array-like, optional
Specify tick locations for *x*- and *y*-axis.
xlim, ylim : array-like, optional
Specify *x*- and *y*-axis limits.
xincrease : None, True, or False, optional
Should the values on the *x* axis be increasing from left to right?
If ``None``, use the default for the Matplotlib function.
yincrease : None, True, or False, optional
Should the values on the *y* axis be increasing from top to bottom?
If ``None``, use the default for the Matplotlib function.
add_colorbar : bool, optional
Add colorbar to axes.
add_labels : bool, optional
Use xarray metadata to label axes.
norm : matplotlib.colors.Normalize, optional
If ``norm`` has ``vmin`` or ``vmax`` specified, the corresponding
kwarg must be ``None``.
vmin, vmax : float, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments. When a diverging dataset is inferred,
setting one of these values will fix the other by symmetry around
``center``. Setting both values prevents use of a diverging colormap.
If discrete levels are provided as an explicit list, both of these
values are ignored.
cmap : matplotlib colormap name or colormap, optional
The mapping from data values to color space. If not provided, this
will be either be ``'viridis'`` (if the function infers a sequential
dataset) or ``'RdBu_r'`` (if the function infers a diverging dataset).
See :doc:`Choosing Colormaps in Matplotlib <matplotlib:tutorials/colors/colormaps>`
for more information.
If *seaborn* is installed, ``cmap`` may also be a
`seaborn color palette <https://seaborn.pydata.org/tutorial/color_palettes.html>`_.
Note: if ``cmap`` is a seaborn color palette and the plot type
is not ``'contour'`` or ``'contourf'``, ``levels`` must also be specified.
colors : str or array-like of color-like, optional
A single color or a sequence of colors. If the plot type is not ``'contour'``
or ``'contourf'``, the ``levels`` argument is required.
center : float, optional
The value at which to center the colormap. Passing this value implies
use of a diverging colormap. Setting it to ``False`` prevents use of a
diverging colormap.
robust : bool, optional
If ``True`` and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with 2nd and 98th percentiles instead of the extreme values.
extend : {'neither', 'both', 'min', 'max'}, optional
How to draw arrows extending the colorbar beyond its limits. If not
provided, ``extend`` is inferred from ``vmin``, ``vmax`` and the data limits.
levels : int or array-like, optional
Split the colormap (``cmap``) into discrete color intervals. If an integer
is provided, "nice" levels are chosen based on the data range: this can
imply that the final number of levels is not exactly the expected one.
Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to
setting ``levels=np.linspace(vmin, vmax, N)``.
infer_intervals : bool, optional
Only applies to pcolormesh. If ``True``, the coordinate intervals are
passed to pcolormesh. If ``False``, the original coordinates are used
(this can be useful for certain map projections). The default is to
always infer intervals, unless the mesh is irregular and plotted on
a map projection.
subplot_kws : dict, optional
Dictionary of keyword arguments for Matplotlib subplots. Only used
for 2D and faceted plots.
(see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`).
cbar_ax : matplotlib axes object, optional
Axes in which to draw the colorbar.
cbar_kwargs : dict, optional
Dictionary of keyword arguments to pass to the colorbar
(see :meth:`matplotlib:matplotlib.figure.Figure.colorbar`).
**kwargs : optional
Additional keyword arguments to wrapped Matplotlib function.
Returns
-------
artist :
The same type of primitive artist that the wrapped Matplotlib
function returns.
"""
# Build on the original docstring
plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}"
# plotfunc and newplotfunc have different signatures:
# - plotfunc: (x, y, z, ax, **kwargs)
# - newplotfunc: (darray, x, y, **kwargs)
# where plotfunc accepts numpy arrays, while newplotfunc accepts a DataArray
# and variable names. newplotfunc also explicitly lists most kwargs, so we
# need to shorten it
def signature(darray, x, y, **kwargs):
pass
@override_signature(signature)
@functools.wraps(plotfunc)
def newplotfunc(
darray,
x=None,
y=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_colorbar=None,
add_labels=True,
vmin=None,
vmax=None,
cmap=None,
center=None,
robust=False,
extend=None,
levels=None,
infer_intervals=None,
colors=None,
subplot_kws=None,
cbar_ax=None,
cbar_kwargs=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
norm=None,
**kwargs,
):
# All 2d plots in xarray share this function signature.
# Method signature below should be consistent.
# Decide on a default for the colorbar before facetgrids
if add_colorbar is None:
add_colorbar = True
if plotfunc.__name__ == "contour" or (
plotfunc.__name__ == "surface" and cmap is None
):
add_colorbar = False
imshow_rgb = plotfunc.__name__ == "imshow" and darray.ndim == (
3 + (row is not None) + (col is not None)
)
if imshow_rgb:
# Don't add a colorbar when showing an image with explicit colors
add_colorbar = False
# Matplotlib does not support normalising RGB data, so do it here.
# See eg. https://github.com/matplotlib/matplotlib/pull/10220
if robust or vmax is not None or vmin is not None:
darray = _rescale_imshow_rgb(darray, vmin, vmax, robust)
vmin, vmax, robust = None, None, False
if subplot_kws is None:
subplot_kws = dict()
if plotfunc.__name__ == "surface" and not kwargs.get("_is_facetgrid", False):
if ax is None:
# TODO: Importing Axes3D is no longer necessary in matplotlib >= 3.2.
# Remove when minimum requirement of matplotlib is 3.2:
from mpl_toolkits.mplot3d import Axes3D # type: ignore # noqa: F401
# delete so it does not end up in locals()
del Axes3D
# Need to create a "3d" Axes instance for surface plots
subplot_kws["projection"] = "3d"
# In facet grids, shared axis labels don't make sense for surface plots
sharex = False
sharey = False
# Handle facetgrids first
if row or col:
allargs = locals().copy()
del allargs["darray"]
del allargs["imshow_rgb"]
allargs.update(allargs.pop("kwargs"))
# Need the decorated plotting function
allargs["plotfunc"] = globals()[plotfunc.__name__]
return _easy_facetgrid(darray, kind="dataarray", **allargs)
plt = import_matplotlib_pyplot()
if (
plotfunc.__name__ == "surface"
and not kwargs.get("_is_facetgrid", False)
and ax is not None
):
import mpl_toolkits # type: ignore
if not isinstance(ax, mpl_toolkits.mplot3d.Axes3D):
raise ValueError(
"If ax is passed to surface(), it must be created with "
'projection="3d"'
)
rgb = kwargs.pop("rgb", None)
if rgb is not None and plotfunc.__name__ != "imshow":
raise ValueError('The "rgb" keyword is only valid for imshow()')
elif rgb is not None and not imshow_rgb:
raise ValueError(
'The "rgb" keyword is only valid for imshow()'
"with a three-dimensional array (per facet)"
)
xlab, ylab = _infer_xy_labels(
darray=darray, x=x, y=y, imshow=imshow_rgb, rgb=rgb
)
xval = darray[xlab]
yval = darray[ylab]
if xval.ndim > 1 or yval.ndim > 1 or plotfunc.__name__ == "surface":
# Passing 2d coordinate values, need to ensure they are transposed the same
# way as darray.
# Also surface plots always need 2d coordinates
xval = xval.broadcast_like(darray)
yval = yval.broadcast_like(darray)
dims = darray.dims
else:
dims = (yval.dims[0], xval.dims[0])
# better to pass the ndarrays directly to plotting functions
xval = xval.values
yval = yval.values
# May need to transpose for correct x, y labels
# xlab may be the name of a coord, we have to check for dim names
if imshow_rgb:
# For RGB[A] images, matplotlib requires the color dimension
# to be last. In Xarray the order should be unimportant, so
# we transpose to (y, x, color) to make this work.
yx_dims = (ylab, xlab)
dims = yx_dims + tuple(d for d in darray.dims if d not in yx_dims)
if dims != darray.dims:
darray = darray.transpose(*dims, transpose_coords=True)
# Pass the data as a masked ndarray too
zval = darray.to_masked_array(copy=False)
# Replace pd.Intervals if contained in xval or yval.
xplt, xlab_extra = _resolve_intervals_2dplot(xval, plotfunc.__name__)
yplt, ylab_extra = _resolve_intervals_2dplot(yval, plotfunc.__name__)
_ensure_plottable(xplt, yplt, zval)
cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(
plotfunc,
zval.data,
**locals(),
_is_facetgrid=kwargs.pop("_is_facetgrid", False),
)
if "contour" in plotfunc.__name__:
# extend is a keyword argument only for contour and contourf, but
# passing it to the colorbar is sufficient for imshow and
# pcolormesh
kwargs["extend"] = cmap_params["extend"]
kwargs["levels"] = cmap_params["levels"]
# if colors == a single color, matplotlib draws dashed negative
# contours. we lose this feature if we pass cmap and not colors
if isinstance(colors, str):
cmap_params["cmap"] = None
kwargs["colors"] = colors
if "pcolormesh" == plotfunc.__name__:
kwargs["infer_intervals"] = infer_intervals
if "imshow" == plotfunc.__name__ and isinstance(aspect, str):
# forbid usage of mpl strings
raise ValueError("plt.imshow's `aspect` kwarg is not available in xarray")
ax = get_axis(figsize, size, aspect, ax, **subplot_kws)
primitive = plotfunc(
xplt,
yplt,
zval,
ax=ax,
cmap=cmap_params["cmap"],
vmin=cmap_params["vmin"],
vmax=cmap_params["vmax"],
norm=cmap_params["norm"],
**kwargs,
)
# Label the plot with metadata
if add_labels:
ax.set_xlabel(label_from_attrs(darray[xlab], xlab_extra))
ax.set_ylabel(label_from_attrs(darray[ylab], ylab_extra))
ax.set_title(darray._title_for_slice())
if plotfunc.__name__ == "surface":
ax.set_zlabel(label_from_attrs(darray))
if add_colorbar:
if add_labels and "label" not in cbar_kwargs:
cbar_kwargs["label"] = label_from_attrs(darray)
cbar = _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params)
elif cbar_ax is not None or cbar_kwargs:
# inform the user about keywords which aren't used
raise ValueError(
"cbar_ax and cbar_kwargs can't be used with add_colorbar=False."
)
# origin kwarg overrides yincrease
if "origin" in kwargs:
yincrease = None
_update_axes(
ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim
)
# Rotate dates on xlabels
# Do this without calling autofmt_xdate so that x-axes ticks
# on other subplots (if any) are not deleted.
# https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots
if np.issubdtype(xplt.dtype, np.datetime64):
for xlabels in ax.get_xticklabels():
xlabels.set_rotation(30)
xlabels.set_ha("right")
return primitive
# For use as DataArray.plot.plotmethod
@functools.wraps(newplotfunc)
def plotmethod(
_PlotMethods_obj,
x=None,
y=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_colorbar=None,
add_labels=True,
vmin=None,
vmax=None,
cmap=None,
colors=None,
center=None,
robust=False,
extend=None,
levels=None,
infer_intervals=None,
subplot_kws=None,
cbar_ax=None,
cbar_kwargs=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
norm=None,
**kwargs,
):
"""
The method should have the same signature as the function.
This just makes the method work on Plotmethods objects,
and passes all the other arguments straight through.
"""
allargs = locals()
allargs["darray"] = _PlotMethods_obj._da
allargs.update(kwargs)
for arg in ["_PlotMethods_obj", "newplotfunc", "kwargs"]:
del allargs[arg]
return newplotfunc(**allargs)
# Add to class _PlotMethods
setattr(_PlotMethods, plotmethod.__name__, plotmethod)
return newplotfunc
@_plot2d
def imshow(x, y, z, ax, **kwargs):
"""
Image plot of 2D DataArray.
Wraps :py:func:`matplotlib:matplotlib.pyplot.imshow`.
While other plot methods require the DataArray to be strictly
two-dimensional, ``imshow`` also accepts a 3D array where some
dimension can be interpreted as RGB or RGBA color channels and
allows this dimension to be specified via the kwarg ``rgb=``.
Unlike :py:func:`matplotlib:matplotlib.pyplot.imshow`, which ignores ``vmin``/``vmax``
for RGB(A) data,
xarray *will* use ``vmin`` and ``vmax`` for RGB(A) data
by applying a single scaling factor and offset to all bands.
Passing ``robust=True`` infers ``vmin`` and ``vmax``
:ref:`in the usual way <robust-plotting>`.
.. note::
This function needs uniformly spaced coordinates to
properly label the axes. Call :py:meth:`DataArray.plot` to check.
The pixels are centered on the coordinates. For example, if the coordinate
value is 3.2, then the pixels for those coordinates will be centered on 3.2.
"""
if x.ndim != 1 or y.ndim != 1:
raise ValueError(
"imshow requires 1D coordinates, try using pcolormesh or contour(f)"
)
def _center_pixels(x):
"""Center the pixels on the coordinates."""
if np.issubdtype(x.dtype, str):
# When using strings as inputs imshow converts it to
# integers. Choose extent values which puts the indices in
# in the center of the pixels:
return 0 - 0.5, len(x) - 0.5
try:
# Center the pixels assuming uniform spacing:
xstep = 0.5 * (x[1] - x[0])
except IndexError:
# Arbitrary default value, similar to matplotlib behaviour:
xstep = 0.1
return x[0] - xstep, x[-1] + xstep
# Center the pixels:
left, right = _center_pixels(x)
top, bottom = _center_pixels(y)
defaults = {"origin": "upper", "interpolation": "nearest"}
if not hasattr(ax, "projection"):
# not for cartopy geoaxes
defaults["aspect"] = "auto"
# Allow user to override these defaults
defaults.update(kwargs)
if defaults["origin"] == "upper":
defaults["extent"] = [left, right, bottom, top]
else:
defaults["extent"] = [left, right, top, bottom]
if z.ndim == 3:
# matplotlib imshow uses black for missing data, but Xarray makes
# missing data transparent. We therefore add an alpha channel if
# there isn't one, and set it to transparent where data is masked.
if z.shape[-1] == 3:
alpha = np.ma.ones(z.shape[:2] + (1,), dtype=z.dtype)
if np.issubdtype(z.dtype, np.integer):
alpha *= 255
z = np.ma.concatenate((z, alpha), axis=2)
else:
z = z.copy()
z[np.any(z.mask, axis=-1), -1] = 0
primitive = ax.imshow(z, **defaults)
# If x or y are strings the ticklabels have been replaced with
# integer indices. Replace them back to strings:
for axis, v in [("x", x), ("y", y)]:
if np.issubdtype(v.dtype, str):
getattr(ax, f"set_{axis}ticks")(np.arange(len(v)))
getattr(ax, f"set_{axis}ticklabels")(v)
return primitive
@_plot2d
def contour(x, y, z, ax, **kwargs):
"""
Contour plot of 2D DataArray.
Wraps :py:func:`matplotlib:matplotlib.pyplot.contour`.
"""
primitive = ax.contour(x, y, z, **kwargs)
return primitive
@_plot2d
def contourf(x, y, z, ax, **kwargs):
"""
Filled contour plot of 2D DataArray.
Wraps :py:func:`matplotlib:matplotlib.pyplot.contourf`.
"""
primitive = ax.contourf(x, y, z, **kwargs)
return primitive
@_plot2d
def pcolormesh(x, y, z, ax, infer_intervals=None, **kwargs):
"""
Pseudocolor plot of 2D DataArray.
Wraps :py:func:`matplotlib:matplotlib.pyplot.pcolormesh`.
"""
# decide on a default for infer_intervals (GH781)
x = np.asarray(x)
if infer_intervals is None:
if hasattr(ax, "projection"):
if len(x.shape) == 1:
infer_intervals = True
else:
infer_intervals = False
else:
infer_intervals = True
if (
infer_intervals
and not np.issubdtype(x.dtype, str)
and (
(np.shape(x)[0] == np.shape(z)[1])
or ((x.ndim > 1) and (np.shape(x)[1] == np.shape(z)[1]))
)
):
if len(x.shape) == 1:
x = _infer_interval_breaks(x, check_monotonic=True)
else:
# we have to infer the intervals on both axes
x = _infer_interval_breaks(x, axis=1)
x = _infer_interval_breaks(x, axis=0)
if (
infer_intervals
and not np.issubdtype(y.dtype, str)
and (np.shape(y)[0] == np.shape(z)[0])
):
if len(y.shape) == 1:
y = _infer_interval_breaks(y, check_monotonic=True)
else:
# we have to infer the intervals on both axes
y = _infer_interval_breaks(y, axis=1)
y = _infer_interval_breaks(y, axis=0)
primitive = ax.pcolormesh(x, y, z, **kwargs)
# by default, pcolormesh picks "round" values for bounds
# this results in ugly looking plots with lots of surrounding whitespace
if not hasattr(ax, "projection") and x.ndim == 1 and y.ndim == 1:
# not a cartopy geoaxis
ax.set_xlim(x[0], x[-1])
ax.set_ylim(y[0], y[-1])
return primitive
@_plot2d
def surface(x, y, z, ax, **kwargs):
"""
Surface plot of 2D DataArray.
Wraps :py:meth:`matplotlib:mpl_toolkits.mplot3d.axes3d.Axes3D.plot_surface`.
"""
primitive = ax.plot_surface(x, y, z, **kwargs)
return primitive
|
geodrinx/gearthview
|
refs/heads/master
|
ext-libs/twisted/words/xish/xmlstream.py
|
49
|
# -*- test-case-name: twisted.words.test.test_xmlstream -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
XML Stream processing.
An XML Stream is defined as a connection over which two XML documents are
exchanged during the lifetime of the connection, one for each direction. The
unit of interaction is a direct child element of the root element (stanza).
The most prominent use of XML Streams is Jabber, but this module is generically
usable. See Twisted Words for Jabber specific protocol support.
Maintainer: Ralph Meijer
"""
from twisted.python import failure
from twisted.internet import protocol
from twisted.words.xish import domish, utility
STREAM_CONNECTED_EVENT = intern("//event/stream/connected")
STREAM_START_EVENT = intern("//event/stream/start")
STREAM_END_EVENT = intern("//event/stream/end")
STREAM_ERROR_EVENT = intern("//event/stream/error")
class XmlStream(protocol.Protocol, utility.EventDispatcher):
""" Generic Streaming XML protocol handler.
This protocol handler will parse incoming data as XML and dispatch events
accordingly. Incoming stanzas can be handled by registering observers using
XPath-like expressions that are matched against each stanza. See
L{utility.EventDispatcher} for details.
"""
def __init__(self):
utility.EventDispatcher.__init__(self)
self.stream = None
self.rawDataOutFn = None
self.rawDataInFn = None
def _initializeStream(self):
""" Sets up XML Parser. """
self.stream = domish.elementStream()
self.stream.DocumentStartEvent = self.onDocumentStart
self.stream.ElementEvent = self.onElement
self.stream.DocumentEndEvent = self.onDocumentEnd
### --------------------------------------------------------------
###
### Protocol events
###
### --------------------------------------------------------------
def connectionMade(self):
""" Called when a connection is made.
Sets up the XML parser and dispatches the L{STREAM_CONNECTED_EVENT}
event indicating the connection has been established.
"""
self._initializeStream()
self.dispatch(self, STREAM_CONNECTED_EVENT)
def dataReceived(self, data):
""" Called whenever data is received.
Passes the data to the XML parser. This can result in calls to the
DOM handlers. If a parse error occurs, the L{STREAM_ERROR_EVENT} event
is called to allow for cleanup actions, followed by dropping the
connection.
"""
try:
if self.rawDataInFn:
self.rawDataInFn(data)
self.stream.parse(data)
except domish.ParserError:
self.dispatch(failure.Failure(), STREAM_ERROR_EVENT)
self.transport.loseConnection()
def connectionLost(self, reason):
""" Called when the connection is shut down.
Dispatches the L{STREAM_END_EVENT}.
"""
self.dispatch(reason, STREAM_END_EVENT)
self.stream = None
### --------------------------------------------------------------
###
### DOM events
###
### --------------------------------------------------------------
def onDocumentStart(self, rootElement):
""" Called whenever the start tag of a root element has been received.
Dispatches the L{STREAM_START_EVENT}.
"""
self.dispatch(self, STREAM_START_EVENT)
def onElement(self, element):
""" Called whenever a direct child element of the root element has
been received.
Dispatches the received element.
"""
self.dispatch(element)
def onDocumentEnd(self):
""" Called whenever the end tag of the root element has been received.
Closes the connection. This causes C{connectionLost} being called.
"""
self.transport.loseConnection()
def setDispatchFn(self, fn):
""" Set another function to handle elements. """
self.stream.ElementEvent = fn
def resetDispatchFn(self):
""" Set the default function (C{onElement}) to handle elements. """
self.stream.ElementEvent = self.onElement
def send(self, obj):
""" Send data over the stream.
Sends the given C{obj} over the connection. C{obj} may be instances of
L{domish.Element}, C{unicode} and C{str}. The first two will be
properly serialized and/or encoded. C{str} objects must be in UTF-8
encoding.
Note: because it is easy to make mistakes in maintaining a properly
encoded C{str} object, it is advised to use C{unicode} objects
everywhere when dealing with XML Streams.
@param obj: Object to be sent over the stream.
@type obj: L{domish.Element}, L{domish} or C{str}
"""
if domish.IElement.providedBy(obj):
obj = obj.toXml()
if isinstance(obj, unicode):
obj = obj.encode('utf-8')
if self.rawDataOutFn:
self.rawDataOutFn(obj)
self.transport.write(obj)
class BootstrapMixin(object):
"""
XmlStream factory mixin to install bootstrap event observers.
This mixin is for factories providing
L{IProtocolFactory<twisted.internet.interfaces.IProtocolFactory>} to make
sure bootstrap event observers are set up on protocols, before incoming
data is processed. Such protocols typically derive from
L{utility.EventDispatcher}, like L{XmlStream}.
You can set up bootstrap event observers using C{addBootstrap}. The
C{event} and C{fn} parameters correspond with the C{event} and
C{observerfn} arguments to L{utility.EventDispatcher.addObserver}.
@since: 8.2.
@ivar bootstraps: The list of registered bootstrap event observers.
@type bootstrap: C{list}
"""
def __init__(self):
self.bootstraps = []
def installBootstraps(self, dispatcher):
"""
Install registered bootstrap observers.
@param dispatcher: Event dispatcher to add the observers to.
@type dispatcher: L{utility.EventDispatcher}
"""
for event, fn in self.bootstraps:
dispatcher.addObserver(event, fn)
def addBootstrap(self, event, fn):
"""
Add a bootstrap event handler.
@param event: The event to register an observer for.
@type event: C{str} or L{xpath.XPathQuery}
@param fn: The observer callable to be registered.
"""
self.bootstraps.append((event, fn))
def removeBootstrap(self, event, fn):
"""
Remove a bootstrap event handler.
@param event: The event the observer is registered for.
@type event: C{str} or L{xpath.XPathQuery}
@param fn: The registered observer callable.
"""
self.bootstraps.remove((event, fn))
class XmlStreamFactoryMixin(BootstrapMixin):
"""
XmlStream factory mixin that takes care of event handlers.
All positional and keyword arguments passed to create this factory are
passed on as-is to the protocol.
@ivar args: Positional arguments passed to the protocol upon instantiation.
@type args: C{tuple}.
@ivar kwargs: Keyword arguments passed to the protocol upon instantiation.
@type kwargs: C{dict}.
"""
def __init__(self, *args, **kwargs):
BootstrapMixin.__init__(self)
self.args = args
self.kwargs = kwargs
def buildProtocol(self, addr):
"""
Create an instance of XmlStream.
The returned instance will have bootstrap event observers registered
and will proceed to handle input on an incoming connection.
"""
xs = self.protocol(*self.args, **self.kwargs)
xs.factory = self
self.installBootstraps(xs)
return xs
class XmlStreamFactory(XmlStreamFactoryMixin,
protocol.ReconnectingClientFactory):
"""
Factory for XmlStream protocol objects as a reconnection client.
"""
protocol = XmlStream
def buildProtocol(self, addr):
"""
Create a protocol instance.
Overrides L{XmlStreamFactoryMixin.buildProtocol} to work with
a L{ReconnectingClientFactory}. As this is called upon having an
connection established, we are resetting the delay for reconnection
attempts when the connection is lost again.
"""
self.resetDelay()
return XmlStreamFactoryMixin.buildProtocol(self, addr)
|
svisser/cookiecutter
|
refs/heads/master
|
cookiecutter/config.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cookiecutter.config
-------------------
Global configuration handling
"""
from __future__ import unicode_literals
import copy
import os
import io
import yaml
from .exceptions import ConfigDoesNotExistException
from .exceptions import InvalidConfiguration
DEFAULT_CONFIG = {
'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),
'default_context': {}
}
def get_config(config_path):
"""
Retrieve the config from the specified path, returning it as a config dict.
"""
if not os.path.exists(config_path):
raise ConfigDoesNotExistException
print("config_path is {0}".format(config_path))
with io.open(config_path, encoding="utf-8") as file_handle:
try:
yaml_dict = yaml.safe_load(file_handle)
except yaml.scanner.ScannerError:
raise InvalidConfiguration(
"%s is no a valid YAML file" % config_path)
config_dict = copy.copy(DEFAULT_CONFIG)
config_dict.update(yaml_dict)
return config_dict
def get_user_config():
"""
Retrieve config from the user's ~/.cookiecutterrc, if it exists.
Otherwise, return None.
"""
# TODO: test on windows...
USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')
if os.path.exists(USER_CONFIG_PATH):
return get_config(USER_CONFIG_PATH)
return copy.copy(DEFAULT_CONFIG)
|
fxfitz/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/cumulus/_cl_license.py
|
33
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cl_license
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Install licenses for Cumulus Linux
deprecated:
why: The M(nclu) module is designed to be easier to use for individuals who are new to Cumulus Linux by exposing the NCLU interface in an automatable way.
removed_in: "2.5"
alternative: Use M(nclu) instead.
description:
- Installs a Cumulus Linux license. The module reports no change of status
when a license is installed.
For more details go the Cumulus Linux License Documentation at
U(http://docs.cumulusnetwork.com) and the Licensing KB Site at
U(https://support.cumulusnetworks.com/hc/en-us/sections/200507688)
notes:
- To activate a license for the FIRST time, the switchd service must be
restarted. This action is disruptive. The license renewal process occurs
via the Cumulus Networks Customer Portal -
U(http://customers.cumulusnetworks.com).
- A non-EULA license is REQUIRED for automation. Manually install the
license on a test switch, using the command "cl-license -i <license_file>"
to confirm the license is a Non-EULA license.
See EXAMPLES, for the proper way to issue this notify action.
options:
src:
description:
- The full path to the license. Can be local path or HTTP URL.
required: true
force:
description:
- Force installation of a license. Typically not needed.
It is recommended to manually run this command via the ansible
command. A reload of switchd is not required. Running the force
option in a playbook will break the idempotent state machine of
the module and cause the switchd notification to kick in all the
time, causing a disruption.
choices:
- yes
- no
'''
EXAMPLES = '''
# Example playbook using the cl_license module to manage licenses on Cumulus Linux
- hosts: all
tasks:
- name: install license using http url
cl_license:
src: http://10.1.1.1/license.txt
notify: restart switchd
- name: Triggers switchd to be restarted right away, before play, or role
is over. This is desired behaviour
meta: flush_handlers
- name: Configure interfaces
template:
src: interfaces.j2
dest: /etc/network/interfaces
notify: restart networking
handlers:
- name: restart switchd
service:
name: switchd
state: restarted
- name: restart networking
service:
name: networking
state: reloaded
# Force all switches to accept a new license. Typically not needed
# ansible -m cl_license -a "src='http://10.1.1.1/new_lic' force=yes" -u root all
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
removed_module()
|
dhxkgozj/DirEngine
|
refs/heads/master
|
lib/capstone/bindings/python/capstone/arm64.py
|
9
|
# Capstone Python bindings, by Nguyen Anh Quynnh <[email protected]>
import ctypes
from . import copy_ctypes_list
from .arm64_const import *
# define the API
class Arm64OpMem(ctypes.Structure):
_fields_ = (
('base', ctypes.c_uint),
('index', ctypes.c_uint),
('disp', ctypes.c_int32),
)
class Arm64OpShift(ctypes.Structure):
_fields_ = (
('type', ctypes.c_uint),
('value', ctypes.c_uint),
)
class Arm64OpValue(ctypes.Union):
_fields_ = (
('reg', ctypes.c_uint),
('imm', ctypes.c_int64),
('fp', ctypes.c_double),
('mem', Arm64OpMem),
('pstate', ctypes.c_int),
('sys', ctypes.c_uint),
('prefetch', ctypes.c_int),
('barrier', ctypes.c_int),
)
class Arm64Op(ctypes.Structure):
_fields_ = (
('vector_index', ctypes.c_int),
('vas', ctypes.c_int),
('vess', ctypes.c_int),
('shift', Arm64OpShift),
('ext', ctypes.c_uint),
('type', ctypes.c_uint),
('value', Arm64OpValue),
)
@property
def imm(self):
return self.value.imm
@property
def reg(self):
return self.value.reg
@property
def fp(self):
return self.value.fp
@property
def mem(self):
return self.value.mem
@property
def pstate(self):
return self.value.pstate
@property
def sys(self):
return self.value.sys
@property
def prefetch(self):
return self.value.prefetch
@property
def barrier(self):
return self.value.barrier
class CsArm64(ctypes.Structure):
_fields_ = (
('cc', ctypes.c_uint),
('update_flags', ctypes.c_bool),
('writeback', ctypes.c_bool),
('op_count', ctypes.c_uint8),
('operands', Arm64Op * 8),
)
def get_arch_info(a):
return (a.cc, a.update_flags, a.writeback, copy_ctypes_list(a.operands[:a.op_count]))
|
Cinntax/home-assistant
|
refs/heads/dev
|
homeassistant/components/recollect_waste/__init__.py
|
27
|
"""The recollect_waste component."""
|
kmonsoor/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Doc/includes/sqlite3/simple_tableprinter.py
|
96
|
import sqlite3
FIELD_MAX_WIDTH = 20
TABLE_NAME = 'people'
SELECT = 'select * from %s order by age, name_last' % TABLE_NAME
con = sqlite3.connect("mydb")
cur = con.cursor()
cur.execute(SELECT)
# Print a header.
for fieldDesc in cur.description:
print(fieldDesc[0].ljust(FIELD_MAX_WIDTH), end=' ')
print() # Finish the header with a newline.
print('-' * 78)
# For each row, print the value of each field left-justified within
# the maximum possible width of that field.
fieldIndices = range(len(cur.description))
for row in cur:
for fieldIndex in fieldIndices:
fieldValue = str(row[fieldIndex])
print(fieldValue.ljust(FIELD_MAX_WIDTH), end=' ')
print() # Finish the row with a newline.
|
7kbird/chrome
|
refs/heads/master
|
third_party/cython/src/Cython/Compiler/Visitor.py
|
90
|
# cython: infer_types=True
#
# Tree visitor and transform framework
#
import inspect
from Cython.Compiler import TypeSlots
from Cython.Compiler import Builtin
from Cython.Compiler import Nodes
from Cython.Compiler import ExprNodes
from Cython.Compiler import Errors
from Cython.Compiler import DebugFlags
import cython
class TreeVisitor(object):
"""
Base class for writing visitors for a Cython tree, contains utilities for
recursing such trees using visitors. Each node is
expected to have a child_attrs iterable containing the names of attributes
containing child nodes or lists of child nodes. Lists are not considered
part of the tree structure (i.e. contained nodes are considered direct
children of the parent node).
visit_children visits each of the children of a given node (see the visit_children
documentation). When recursing the tree using visit_children, an attribute
access_path is maintained which gives information about the current location
in the tree as a stack of tuples: (parent_node, attrname, index), representing
the node, attribute and optional list index that was taken in each step in the path to
the current node.
Example:
>>> class SampleNode(object):
... child_attrs = ["head", "body"]
... def __init__(self, value, head=None, body=None):
... self.value = value
... self.head = head
... self.body = body
... def __repr__(self): return "SampleNode(%s)" % self.value
...
>>> tree = SampleNode(0, SampleNode(1), [SampleNode(2), SampleNode(3)])
>>> class MyVisitor(TreeVisitor):
... def visit_SampleNode(self, node):
... print "in", node.value, self.access_path
... self.visitchildren(node)
... print "out", node.value
...
>>> MyVisitor().visit(tree)
in 0 []
in 1 [(SampleNode(0), 'head', None)]
out 1
in 2 [(SampleNode(0), 'body', 0)]
out 2
in 3 [(SampleNode(0), 'body', 1)]
out 3
out 0
"""
def __init__(self):
super(TreeVisitor, self).__init__()
self.dispatch_table = {}
self.access_path = []
def dump_node(self, node, indent=0):
ignored = list(node.child_attrs or []) + [u'child_attrs', u'pos',
u'gil_message', u'cpp_message',
u'subexprs']
values = []
pos = getattr(node, 'pos', None)
if pos:
source = pos[0]
if source:
import os.path
source = os.path.basename(source.get_description())
values.append(u'%s:%s:%s' % (source, pos[1], pos[2]))
attribute_names = dir(node)
attribute_names.sort()
for attr in attribute_names:
if attr in ignored:
continue
if attr.startswith(u'_') or attr.endswith(u'_'):
continue
try:
value = getattr(node, attr)
except AttributeError:
continue
if value is None or value == 0:
continue
elif isinstance(value, list):
value = u'[...]/%d' % len(value)
elif not isinstance(value, (str, unicode, long, int, float)):
continue
else:
value = repr(value)
values.append(u'%s = %s' % (attr, value))
return u'%s(%s)' % (node.__class__.__name__,
u',\n '.join(values))
def _find_node_path(self, stacktrace):
import os.path
last_traceback = stacktrace
nodes = []
while hasattr(stacktrace, 'tb_frame'):
frame = stacktrace.tb_frame
node = frame.f_locals.get(u'self')
if isinstance(node, Nodes.Node):
code = frame.f_code
method_name = code.co_name
pos = (os.path.basename(code.co_filename),
frame.f_lineno)
nodes.append((node, method_name, pos))
last_traceback = stacktrace
stacktrace = stacktrace.tb_next
return (last_traceback, nodes)
def _raise_compiler_error(self, child, e):
import sys
trace = ['']
for parent, attribute, index in self.access_path:
node = getattr(parent, attribute)
if index is None:
index = ''
else:
node = node[index]
index = u'[%d]' % index
trace.append(u'%s.%s%s = %s' % (
parent.__class__.__name__, attribute, index,
self.dump_node(node)))
stacktrace, called_nodes = self._find_node_path(sys.exc_info()[2])
last_node = child
for node, method_name, pos in called_nodes:
last_node = node
trace.append(u"File '%s', line %d, in %s: %s" % (
pos[0], pos[1], method_name, self.dump_node(node)))
raise Errors.CompilerCrash(
getattr(last_node, 'pos', None), self.__class__.__name__,
u'\n'.join(trace), e, stacktrace)
@cython.final
def find_handler(self, obj):
# to resolve, try entire hierarchy
cls = type(obj)
pattern = "visit_%s"
mro = inspect.getmro(cls)
handler_method = None
for mro_cls in mro:
handler_method = getattr(self, pattern % mro_cls.__name__, None)
if handler_method is not None:
return handler_method
print type(self), cls
if self.access_path:
print self.access_path
print self.access_path[-1][0].pos
print self.access_path[-1][0].__dict__
raise RuntimeError("Visitor %r does not accept object: %s" % (self, obj))
def visit(self, obj):
return self._visit(obj)
@cython.final
def _visit(self, obj):
try:
try:
handler_method = self.dispatch_table[type(obj)]
except KeyError:
handler_method = self.find_handler(obj)
self.dispatch_table[type(obj)] = handler_method
return handler_method(obj)
except Errors.CompileError:
raise
except Errors.AbortError:
raise
except Exception, e:
if DebugFlags.debug_no_exception_intercept:
raise
self._raise_compiler_error(obj, e)
@cython.final
def _visitchild(self, child, parent, attrname, idx):
self.access_path.append((parent, attrname, idx))
result = self._visit(child)
self.access_path.pop()
return result
def visitchildren(self, parent, attrs=None):
return self._visitchildren(parent, attrs)
@cython.final
@cython.locals(idx=int)
def _visitchildren(self, parent, attrs):
"""
Visits the children of the given parent. If parent is None, returns
immediately (returning None).
The return value is a dictionary giving the results for each
child (mapping the attribute name to either the return value
or a list of return values (in the case of multiple children
in an attribute)).
"""
if parent is None: return None
result = {}
for attr in parent.child_attrs:
if attrs is not None and attr not in attrs: continue
child = getattr(parent, attr)
if child is not None:
if type(child) is list:
childretval = [self._visitchild(x, parent, attr, idx) for idx, x in enumerate(child)]
else:
childretval = self._visitchild(child, parent, attr, None)
assert not isinstance(childretval, list), 'Cannot insert list here: %s in %r' % (attr, parent)
result[attr] = childretval
return result
class VisitorTransform(TreeVisitor):
"""
A tree transform is a base class for visitors that wants to do stream
processing of the structure (rather than attributes etc.) of a tree.
It implements __call__ to simply visit the argument node.
It requires the visitor methods to return the nodes which should take
the place of the visited node in the result tree (which can be the same
or one or more replacement). Specifically, if the return value from
a visitor method is:
- [] or None; the visited node will be removed (set to None if an attribute and
removed if in a list)
- A single node; the visited node will be replaced by the returned node.
- A list of nodes; the visited nodes will be replaced by all the nodes in the
list. This will only work if the node was already a member of a list; if it
was not, an exception will be raised. (Typically you want to ensure that you
are within a StatListNode or similar before doing this.)
"""
def visitchildren(self, parent, attrs=None):
result = self._visitchildren(parent, attrs)
for attr, newnode in result.iteritems():
if type(newnode) is not list:
setattr(parent, attr, newnode)
else:
# Flatten the list one level and remove any None
newlist = []
for x in newnode:
if x is not None:
if type(x) is list:
newlist += x
else:
newlist.append(x)
setattr(parent, attr, newlist)
return result
def recurse_to_children(self, node):
self.visitchildren(node)
return node
def __call__(self, root):
return self._visit(root)
class CythonTransform(VisitorTransform):
"""
Certain common conventions and utilities for Cython transforms.
- Sets up the context of the pipeline in self.context
- Tracks directives in effect in self.current_directives
"""
def __init__(self, context):
super(CythonTransform, self).__init__()
self.context = context
def __call__(self, node):
import ModuleNode
if isinstance(node, ModuleNode.ModuleNode):
self.current_directives = node.directives
return super(CythonTransform, self).__call__(node)
def visit_CompilerDirectivesNode(self, node):
old = self.current_directives
self.current_directives = node.directives
self.visitchildren(node)
self.current_directives = old
return node
def visit_Node(self, node):
self.visitchildren(node)
return node
class ScopeTrackingTransform(CythonTransform):
# Keeps track of type of scopes
#scope_type: can be either of 'module', 'function', 'cclass', 'pyclass', 'struct'
#scope_node: the node that owns the current scope
def visit_ModuleNode(self, node):
self.scope_type = 'module'
self.scope_node = node
self.visitchildren(node)
return node
def visit_scope(self, node, scope_type):
prev = self.scope_type, self.scope_node
self.scope_type = scope_type
self.scope_node = node
self.visitchildren(node)
self.scope_type, self.scope_node = prev
return node
def visit_CClassDefNode(self, node):
return self.visit_scope(node, 'cclass')
def visit_PyClassDefNode(self, node):
return self.visit_scope(node, 'pyclass')
def visit_FuncDefNode(self, node):
return self.visit_scope(node, 'function')
def visit_CStructOrUnionDefNode(self, node):
return self.visit_scope(node, 'struct')
class EnvTransform(CythonTransform):
"""
This transformation keeps a stack of the environments.
"""
def __call__(self, root):
self.env_stack = []
self.enter_scope(root, root.scope)
return super(EnvTransform, self).__call__(root)
def current_env(self):
return self.env_stack[-1][1]
def current_scope_node(self):
return self.env_stack[-1][0]
def global_scope(self):
return self.current_env().global_scope()
def enter_scope(self, node, scope):
self.env_stack.append((node, scope))
def exit_scope(self):
self.env_stack.pop()
def visit_FuncDefNode(self, node):
self.enter_scope(node, node.local_scope)
self.visitchildren(node)
self.exit_scope()
return node
def visit_GeneratorBodyDefNode(self, node):
self.visitchildren(node)
return node
def visit_ClassDefNode(self, node):
self.enter_scope(node, node.scope)
self.visitchildren(node)
self.exit_scope()
return node
def visit_CStructOrUnionDefNode(self, node):
self.enter_scope(node, node.scope)
self.visitchildren(node)
self.exit_scope()
return node
def visit_ScopedExprNode(self, node):
if node.expr_scope:
self.enter_scope(node, node.expr_scope)
self.visitchildren(node)
self.exit_scope()
else:
self.visitchildren(node)
return node
def visit_CArgDeclNode(self, node):
# default arguments are evaluated in the outer scope
if node.default:
attrs = [ attr for attr in node.child_attrs if attr != 'default' ]
self.visitchildren(node, attrs)
self.enter_scope(node, self.current_env().outer_scope)
self.visitchildren(node, ('default',))
self.exit_scope()
else:
self.visitchildren(node)
return node
class NodeRefCleanupMixin(object):
"""
Clean up references to nodes that were replaced.
NOTE: this implementation assumes that the replacement is
done first, before hitting any further references during
normal tree traversal. This needs to be arranged by calling
"self.visitchildren()" at a proper place in the transform
and by ordering the "child_attrs" of nodes appropriately.
"""
def __init__(self, *args):
super(NodeRefCleanupMixin, self).__init__(*args)
self._replacements = {}
def visit_CloneNode(self, node):
arg = node.arg
if arg not in self._replacements:
self.visitchildren(node)
arg = node.arg
node.arg = self._replacements.get(arg, arg)
return node
def visit_ResultRefNode(self, node):
expr = node.expression
if expr is None or expr not in self._replacements:
self.visitchildren(node)
expr = node.expression
if expr is not None:
node.expression = self._replacements.get(expr, expr)
return node
def replace(self, node, replacement):
self._replacements[node] = replacement
return replacement
find_special_method_for_binary_operator = {
'<': '__lt__',
'<=': '__le__',
'==': '__eq__',
'!=': '__ne__',
'>=': '__ge__',
'>': '__gt__',
'+': '__add__',
'&': '__and__',
'/': '__truediv__',
'//': '__floordiv__',
'<<': '__lshift__',
'%': '__mod__',
'*': '__mul__',
'|': '__or__',
'**': '__pow__',
'>>': '__rshift__',
'-': '__sub__',
'^': '__xor__',
'in': '__contains__',
}.get
find_special_method_for_unary_operator = {
'not': '__not__',
'~': '__inv__',
'-': '__neg__',
'+': '__pos__',
}.get
class MethodDispatcherTransform(EnvTransform):
"""
Base class for transformations that want to intercept on specific
builtin functions or methods of builtin types, including special
methods triggered by Python operators. Must run after declaration
analysis when entries were assigned.
Naming pattern for handler methods is as follows:
* builtin functions: _handle_(general|simple|any)_function_NAME
* builtin methods: _handle_(general|simple|any)_method_TYPENAME_METHODNAME
"""
# only visit call nodes and Python operations
def visit_GeneralCallNode(self, node):
self.visitchildren(node)
function = node.function
if not function.type.is_pyobject:
return node
arg_tuple = node.positional_args
if not isinstance(arg_tuple, ExprNodes.TupleNode):
return node
keyword_args = node.keyword_args
if keyword_args and not isinstance(keyword_args, ExprNodes.DictNode):
# can't handle **kwargs
return node
args = arg_tuple.args
return self._dispatch_to_handler(node, function, args, keyword_args)
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
function = node.function
if function.type.is_pyobject:
arg_tuple = node.arg_tuple
if not isinstance(arg_tuple, ExprNodes.TupleNode):
return node
args = arg_tuple.args
else:
args = node.args
return self._dispatch_to_handler(node, function, args, None)
def visit_PrimaryCmpNode(self, node):
if node.cascade:
# not currently handled below
self.visitchildren(node)
return node
return self._visit_binop_node(node)
def visit_BinopNode(self, node):
return self._visit_binop_node(node)
def _visit_binop_node(self, node):
self.visitchildren(node)
# FIXME: could special case 'not_in'
special_method_name = find_special_method_for_binary_operator(node.operator)
if special_method_name:
operand1, operand2 = node.operand1, node.operand2
if special_method_name == '__contains__':
operand1, operand2 = operand2, operand1
obj_type = operand1.type
if obj_type.is_builtin_type:
type_name = obj_type.name
else:
type_name = "object" # safety measure
node = self._dispatch_to_method_handler(
special_method_name, None, False, type_name,
node, None, [operand1, operand2], None)
return node
def visit_UnopNode(self, node):
self.visitchildren(node)
special_method_name = find_special_method_for_unary_operator(node.operator)
if special_method_name:
operand = node.operand
obj_type = operand.type
if obj_type.is_builtin_type:
type_name = obj_type.name
else:
type_name = "object" # safety measure
node = self._dispatch_to_method_handler(
special_method_name, None, False, type_name,
node, None, [operand], None)
return node
### dispatch to specific handlers
def _find_handler(self, match_name, has_kwargs):
call_type = has_kwargs and 'general' or 'simple'
handler = getattr(self, '_handle_%s_%s' % (call_type, match_name), None)
if handler is None:
handler = getattr(self, '_handle_any_%s' % match_name, None)
return handler
def _delegate_to_assigned_value(self, node, function, arg_list, kwargs):
assignment = function.cf_state[0]
value = assignment.rhs
if value.is_name:
if not value.entry or len(value.entry.cf_assignments) > 1:
# the variable might have been reassigned => play safe
return node
elif value.is_attribute and value.obj.is_name:
if not value.obj.entry or len(value.obj.entry.cf_assignments) > 1:
# the underlying variable might have been reassigned => play safe
return node
else:
return node
return self._dispatch_to_handler(
node, value, arg_list, kwargs)
def _dispatch_to_handler(self, node, function, arg_list, kwargs):
if function.is_name:
# we only consider functions that are either builtin
# Python functions or builtins that were already replaced
# into a C function call (defined in the builtin scope)
if not function.entry:
return node
is_builtin = (
function.entry.is_builtin or
function.entry is self.current_env().builtin_scope().lookup_here(function.name))
if not is_builtin:
if function.cf_state and function.cf_state.is_single:
# we know the value of the variable
# => see if it's usable instead
return self._delegate_to_assigned_value(
node, function, arg_list, kwargs)
return node
function_handler = self._find_handler(
"function_%s" % function.name, kwargs)
if function_handler is None:
return self._handle_function(node, function.name, function, arg_list, kwargs)
if kwargs:
return function_handler(node, function, arg_list, kwargs)
else:
return function_handler(node, function, arg_list)
elif function.is_attribute and function.type.is_pyobject:
attr_name = function.attribute
self_arg = function.obj
obj_type = self_arg.type
is_unbound_method = False
if obj_type.is_builtin_type:
if (obj_type is Builtin.type_type and self_arg.is_name and
arg_list and arg_list[0].type.is_pyobject):
# calling an unbound method like 'list.append(L,x)'
# (ignoring 'type.mro()' here ...)
type_name = self_arg.name
self_arg = None
is_unbound_method = True
else:
type_name = obj_type.name
else:
type_name = "object" # safety measure
return self._dispatch_to_method_handler(
attr_name, self_arg, is_unbound_method, type_name,
node, function, arg_list, kwargs)
else:
return node
def _dispatch_to_method_handler(self, attr_name, self_arg,
is_unbound_method, type_name,
node, function, arg_list, kwargs):
method_handler = self._find_handler(
"method_%s_%s" % (type_name, attr_name), kwargs)
if method_handler is None:
if (attr_name in TypeSlots.method_name_to_slot
or attr_name == '__new__'):
method_handler = self._find_handler(
"slot%s" % attr_name, kwargs)
if method_handler is None:
return self._handle_method(
node, type_name, attr_name, function,
arg_list, is_unbound_method, kwargs)
if self_arg is not None:
arg_list = [self_arg] + list(arg_list)
if kwargs:
return method_handler(
node, function, arg_list, is_unbound_method, kwargs)
else:
return method_handler(
node, function, arg_list, is_unbound_method)
def _handle_function(self, node, function_name, function, arg_list, kwargs):
"""Fallback handler"""
return node
def _handle_method(self, node, type_name, attr_name, function,
arg_list, is_unbound_method, kwargs):
"""Fallback handler"""
return node
class RecursiveNodeReplacer(VisitorTransform):
"""
Recursively replace all occurrences of a node in a subtree by
another node.
"""
def __init__(self, orig_node, new_node):
super(RecursiveNodeReplacer, self).__init__()
self.orig_node, self.new_node = orig_node, new_node
def visit_Node(self, node):
self.visitchildren(node)
if node is self.orig_node:
return self.new_node
else:
return node
def recursively_replace_node(tree, old_node, new_node):
replace_in = RecursiveNodeReplacer(old_node, new_node)
replace_in(tree)
class NodeFinder(TreeVisitor):
"""
Find out if a node appears in a subtree.
"""
def __init__(self, node):
super(NodeFinder, self).__init__()
self.node = node
self.found = False
def visit_Node(self, node):
if self.found:
pass # short-circuit
elif node is self.node:
self.found = True
else:
self._visitchildren(node, None)
def tree_contains(tree, node):
finder = NodeFinder(node)
finder.visit(tree)
return finder.found
# Utils
def replace_node(ptr, value):
"""Replaces a node. ptr is of the form used on the access path stack
(parent, attrname, listidx|None)
"""
parent, attrname, listidx = ptr
if listidx is None:
setattr(parent, attrname, value)
else:
getattr(parent, attrname)[listidx] = value
class PrintTree(TreeVisitor):
"""Prints a representation of the tree to standard output.
Subclass and override repr_of to provide more information
about nodes. """
def __init__(self):
TreeVisitor.__init__(self)
self._indent = ""
def indent(self):
self._indent += " "
def unindent(self):
self._indent = self._indent[:-2]
def __call__(self, tree, phase=None):
print("Parse tree dump at phase '%s'" % phase)
self.visit(tree)
return tree
# Don't do anything about process_list, the defaults gives
# nice-looking name[idx] nodes which will visually appear
# under the parent-node, not displaying the list itself in
# the hierarchy.
def visit_Node(self, node):
if len(self.access_path) == 0:
name = "(root)"
else:
parent, attr, idx = self.access_path[-1]
if idx is not None:
name = "%s[%d]" % (attr, idx)
else:
name = attr
print("%s- %s: %s" % (self._indent, name, self.repr_of(node)))
self.indent()
self.visitchildren(node)
self.unindent()
return node
def repr_of(self, node):
if node is None:
return "(none)"
else:
result = node.__class__.__name__
if isinstance(node, ExprNodes.NameNode):
result += "(type=%s, name=\"%s\")" % (repr(node.type), node.name)
elif isinstance(node, Nodes.DefNode):
result += "(name=\"%s\")" % node.name
elif isinstance(node, ExprNodes.ExprNode):
t = node.type
result += "(type=%s)" % repr(t)
elif node.pos:
pos = node.pos
path = pos[0].get_description()
if '/' in path:
path = path.split('/')[-1]
if '\\' in path:
path = path.split('\\')[-1]
result += "(pos=(%s:%s:%s))" % (path, pos[1], pos[2])
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
|
nekulin/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/BaseHTTPServer.py
|
59
|
"""HTTP server base class.
Note: the class in this module doesn't implement any HTTP request; see
SimpleHTTPServer for simple implementations of GET, HEAD and POST
(including CGI scripts). It does, however, optionally implement HTTP/1.1
persistent connections, as of version 0.3.
Contents:
- BaseHTTPRequestHandler: HTTP request handler base class
- test: test function
XXX To do:
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
#
# and
#
# Network Working Group R. Fielding
# Request for Comments: 2616 et al
# Obsoletes: 2068 June 1999
# Category: Standards Track
#
# URL: http://www.faqs.org/rfcs/rfc2616.html
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.3"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import sys
import time
import socket # For gethostbyaddr()
from warnings import filterwarnings, catch_warnings
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
import SocketServer
# Default error message template
DEFAULT_ERROR_MESSAGE = """\
<head>
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code %(code)d.
<p>Message: %(message)s.
<p>Error code explanation: %(code)s = %(explain)s.
</body>
"""
DEFAULT_ERROR_CONTENT_TYPE = "text/html"
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class HTTPServer(SocketServer.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
<path> is encoded using the URL encoding scheme (using %xx to signify
the ASCII character with hex code xx).
The specification specifies that lines are separated by CRLF but
for compatibility with the widest range of clients recommends
servers also handle LF. Similarly, whitespace in the request line
is treated sensibly (allowing multiple spaces between components
and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.x protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of mimetools.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
# The default request version. This only affects responses up until
# the point where the request line is parsed, so it mainly decides what
# the client gets back when sending a malformed request line.
# Most web servers default to HTTP 0.9, i.e. don't send a status line.
default_request_version = "HTTP/0.9"
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = 1
requestline = self.raw_requestline
if requestline[-2:] == '\r\n':
requestline = requestline[:-2]
elif requestline[-1:] == '\n':
requestline = requestline[:-1]
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
[command, path, version] = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
[command, path] = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive
self.headers = self.MessageClass(self.rfile, 0)
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
return True
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request(): # An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content)
error_message_format = DEFAULT_ERROR_MESSAGE
error_content_type = DEFAULT_ERROR_CONTENT_TYPE
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
# print (self.protocol_version, code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("\r\n")
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, format, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(format, *args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client host and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address[:2]
return socket.getfqdn(host)
# Essentially static class variables
# The version of the HTTP protocol we support.
# Set this to HTTP/1.1 to enable automatic keepalive
protocol_version = "HTTP/1.0"
# The Message-like class used to parse headers
MessageClass = mimetools.Message
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See RFC 2616.
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this server.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
test()
|
canglade/NLP
|
refs/heads/master
|
logging/cloud-client/export_test.py
|
4
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import string
from gcp_devrel.testing import eventually_consistent
from google.cloud import logging
import pytest
import export
BUCKET = os.environ['CLOUD_STORAGE_BUCKET']
TEST_SINK_NAME_TMPL = 'example_sink_{}'
TEST_SINK_FILTER = 'severity>=CRITICAL'
def _random_id():
return ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(6))
@pytest.yield_fixture
def example_sink():
client = logging.Client()
sink = client.sink(
TEST_SINK_NAME_TMPL.format(_random_id()),
TEST_SINK_FILTER,
'storage.googleapis.com/{bucket}'.format(bucket=BUCKET))
sink.create()
yield sink
try:
sink.delete()
except:
pass
def test_list(example_sink, capsys):
@eventually_consistent.call
def _():
export.list_sinks()
out, _ = capsys.readouterr()
assert example_sink.name in out
def test_create(capsys):
sink_name = TEST_SINK_NAME_TMPL.format(_random_id())
try:
export.create_sink(
sink_name,
BUCKET,
TEST_SINK_FILTER)
# Clean-up the temporary sink.
finally:
try:
logging.Client().sink(sink_name).delete()
except:
pass
out, _ = capsys.readouterr()
assert sink_name in out
def test_update(example_sink, capsys):
updated_filter = 'severity>=INFO'
export.update_sink(example_sink.name, updated_filter)
example_sink.reload()
assert example_sink.filter_ == updated_filter
def test_delete(example_sink, capsys):
export.delete_sink(example_sink.name)
assert not example_sink.exists()
|
mozilla/olympia
|
refs/heads/master
|
src/olympia/shelves/migrations/0003_auto_20200720_1509.py
|
6
|
# Generated by Django 2.2.14 on 2020-07-20 15:09
from django.db import migrations, models
import olympia.shelves.models
class Migration(migrations.Migration):
dependencies = [
('shelves', '0002_auto_20200716_1254'),
]
operations = [
migrations.AlterField(
model_name='shelf',
name='criteria',
field=models.CharField(help_text='e.g., ?recommended=true&sort=random&type=extension', max_length=200,),
),
migrations.AlterField(
model_name='shelf',
name='shelf_type',
field=models.CharField(choices=[('categories', 'categories'), ('collections', 'collections'), ('extension', 'extension'), ('recommendations', 'recommendations'), ('search', 'search'), ('theme', 'theme')], max_length=200, verbose_name='type'),
),
]
|
acsone/odoo
|
refs/heads/8.0
|
addons/website_forum/models/forum.py
|
233
|
# -*- coding: utf-8 -*-
from datetime import datetime
import uuid
from werkzeug.exceptions import Forbidden
import logging
import openerp
from openerp import api, tools
from openerp import SUPERUSER_ID
from openerp.addons.website.models.website import slug
from openerp.exceptions import Warning
from openerp.osv import osv, fields
from openerp.tools import html2plaintext
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class KarmaError(Forbidden):
""" Karma-related error, used for forum and posts. """
pass
class Forum(osv.Model):
"""TDE TODO: set karma values for actions dynamic for a given forum"""
_name = 'forum.forum'
_description = 'Forums'
_inherit = ['mail.thread', 'website.seo.metadata']
def init(self, cr):
""" Add forum uuid for user email validation. """
forum_uuids = self.pool['ir.config_parameter'].search(cr, SUPERUSER_ID, [('key', '=', 'website_forum.uuid')])
if not forum_uuids:
self.pool['ir.config_parameter'].set_param(cr, SUPERUSER_ID, 'website_forum.uuid', str(uuid.uuid4()), ['base.group_system'])
_columns = {
'name': fields.char('Name', required=True, translate=True),
'faq': fields.html('Guidelines'),
'description': fields.html('Description', translate=True),
# karma generation
'karma_gen_question_new': fields.integer('Asking a question'),
'karma_gen_question_upvote': fields.integer('Question upvoted'),
'karma_gen_question_downvote': fields.integer('Question downvoted'),
'karma_gen_answer_upvote': fields.integer('Answer upvoted'),
'karma_gen_answer_downvote': fields.integer('Answer downvoted'),
'karma_gen_answer_accept': fields.integer('Accepting an answer'),
'karma_gen_answer_accepted': fields.integer('Answer accepted'),
'karma_gen_answer_flagged': fields.integer('Answer flagged'),
# karma-based actions
'karma_ask': fields.integer('Ask a question'),
'karma_answer': fields.integer('Answer a question'),
'karma_edit_own': fields.integer('Edit its own posts'),
'karma_edit_all': fields.integer('Edit all posts'),
'karma_close_own': fields.integer('Close its own posts'),
'karma_close_all': fields.integer('Close all posts'),
'karma_unlink_own': fields.integer('Delete its own posts'),
'karma_unlink_all': fields.integer('Delete all posts'),
'karma_upvote': fields.integer('Upvote'),
'karma_downvote': fields.integer('Downvote'),
'karma_answer_accept_own': fields.integer('Accept an answer on its own questions'),
'karma_answer_accept_all': fields.integer('Accept an answer to all questions'),
'karma_editor_link_files': fields.integer('Linking files (Editor)'),
'karma_editor_clickable_link': fields.integer('Clickable links (Editor)'),
'karma_comment_own': fields.integer('Comment its own posts'),
'karma_comment_all': fields.integer('Comment all posts'),
'karma_comment_convert_own': fields.integer('Convert its own answers to comments and vice versa'),
'karma_comment_convert_all': fields.integer('Convert all answers to comments and vice versa'),
'karma_comment_unlink_own': fields.integer('Unlink its own comments'),
'karma_comment_unlink_all': fields.integer('Unlink all comments'),
'karma_retag': fields.integer('Change question tags'),
'karma_flag': fields.integer('Flag a post as offensive'),
}
def _get_default_faq(self, cr, uid, context=None):
fname = openerp.modules.get_module_resource('website_forum', 'data', 'forum_default_faq.html')
with open(fname, 'r') as f:
return f.read()
return False
_defaults = {
'description': 'This community is for professionals and enthusiasts of our products and services.',
'faq': _get_default_faq,
'karma_gen_question_new': 0, # set to null for anti spam protection
'karma_gen_question_upvote': 5,
'karma_gen_question_downvote': -2,
'karma_gen_answer_upvote': 10,
'karma_gen_answer_downvote': -2,
'karma_gen_answer_accept': 2,
'karma_gen_answer_accepted': 15,
'karma_gen_answer_flagged': -100,
'karma_ask': 3, # set to not null for anti spam protection
'karma_answer': 3, # set to not null for anti spam protection
'karma_edit_own': 1,
'karma_edit_all': 300,
'karma_close_own': 100,
'karma_close_all': 500,
'karma_unlink_own': 500,
'karma_unlink_all': 1000,
'karma_upvote': 5,
'karma_downvote': 50,
'karma_answer_accept_own': 20,
'karma_answer_accept_all': 500,
'karma_editor_link_files': 20,
'karma_editor_clickable_link': 20,
'karma_comment_own': 3,
'karma_comment_all': 5,
'karma_comment_convert_own': 50,
'karma_comment_convert_all': 500,
'karma_comment_unlink_own': 50,
'karma_comment_unlink_all': 500,
'karma_retag': 75,
'karma_flag': 500,
}
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
create_context = dict(context, mail_create_nolog=True)
return super(Forum, self).create(cr, uid, values, context=create_context)
def _tag_to_write_vals(self, cr, uid, ids, tags='', context=None):
User = self.pool['res.users']
Tag = self.pool['forum.tag']
result = {}
for forum in self.browse(cr, uid, ids, context=context):
post_tags = []
existing_keep = []
for tag in filter(None, tags.split(',')):
if tag.startswith('_'): # it's a new tag
# check that not already created meanwhile or maybe excluded by the limit on the search
tag_ids = Tag.search(cr, uid, [('name', '=', tag[1:])], context=context)
if tag_ids:
existing_keep.append(int(tag_ids[0]))
else:
# check if user have Karma needed to create need tag
user = User.browse(cr, uid, uid, context=context)
if user.exists() and user.karma >= forum.karma_retag:
post_tags.append((0, 0, {'name': tag[1:], 'forum_id': forum.id}))
else:
existing_keep.append(int(tag))
post_tags.insert(0, [6, 0, existing_keep])
result[forum.id] = post_tags
return result
class Post(osv.Model):
_name = 'forum.post'
_description = 'Forum Post'
_inherit = ['mail.thread', 'website.seo.metadata']
_order = "is_correct DESC, vote_count DESC, write_date DESC"
def _get_user_vote(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
vote_ids = self.pool['forum.post.vote'].search(cr, uid, [('post_id', 'in', ids), ('user_id', '=', uid)], context=context)
for vote in self.pool['forum.post.vote'].browse(cr, uid, vote_ids, context=context):
res[vote.post_id.id] = vote.vote
return res
def _get_vote_count(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
for vote in post.vote_ids:
res[post.id] += int(vote.vote)
return res
def _get_post_from_vote(self, cr, uid, ids, context=None):
result = {}
for vote in self.pool['forum.post.vote'].browse(cr, uid, ids, context=context):
result[vote.post_id.id] = True
return result.keys()
def _get_user_favourite(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
if uid in [f.id for f in post.favourite_ids]:
res[post.id] = True
return res
def _get_favorite_count(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] += len(post.favourite_ids)
return res
def _get_post_from_hierarchy(self, cr, uid, ids, context=None):
post_ids = set(ids)
for post in self.browse(cr, SUPERUSER_ID, ids, context=context):
if post.parent_id:
post_ids.add(post.parent_id.id)
return list(post_ids)
def _get_child_count(self, cr, uid, ids, field_name=False, arg={}, context=None):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
if post.parent_id:
res[post.parent_id.id] = len(post.parent_id.child_ids)
else:
res[post.id] = len(post.child_ids)
return res
def _get_uid_answered(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] = any(answer.create_uid.id == uid for answer in post.child_ids)
return res
def _get_has_validated_answer(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
ans_ids = self.search(cr, uid, [('parent_id', 'in', ids), ('is_correct', '=', True)], context=context)
for answer in self.browse(cr, uid, ans_ids, context=context):
res[answer.parent_id.id] = True
return res
def _is_self_reply(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] = post.parent_id and post.parent_id.create_uid == post.create_uid or False
return res
def _get_post_karma_rights(self, cr, uid, ids, field_name, arg, context=None):
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
res = dict.fromkeys(ids, False)
for post in self.browse(cr, SUPERUSER_ID, ids, context=context):
res[post.id] = {
'karma_ask': post.forum_id.karma_ask,
'karma_answer': post.forum_id.karma_answer,
'karma_accept': post.parent_id and post.parent_id.create_uid.id == uid and post.forum_id.karma_answer_accept_own or post.forum_id.karma_answer_accept_all,
'karma_edit': post.create_uid.id == uid and post.forum_id.karma_edit_own or post.forum_id.karma_edit_all,
'karma_close': post.create_uid.id == uid and post.forum_id.karma_close_own or post.forum_id.karma_close_all,
'karma_unlink': post.create_uid.id == uid and post.forum_id.karma_unlink_own or post.forum_id.karma_unlink_all,
'karma_upvote': post.forum_id.karma_upvote,
'karma_downvote': post.forum_id.karma_downvote,
'karma_comment': post.create_uid.id == uid and post.forum_id.karma_comment_own or post.forum_id.karma_comment_all,
'karma_comment_convert': post.create_uid.id == uid and post.forum_id.karma_comment_convert_own or post.forum_id.karma_comment_convert_all,
}
res[post.id].update({
'can_ask': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_ask'],
'can_answer': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_answer'],
'can_accept': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_accept'],
'can_edit': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_edit'],
'can_close': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_close'],
'can_unlink': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_unlink'],
'can_upvote': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_upvote'],
'can_downvote': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_downvote'],
'can_comment': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_comment'],
'can_comment_convert': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_comment_convert'],
'can_view': (uid == SUPERUSER_ID or
user.karma >= res[post.id]['karma_close'] or
post.create_uid.karma > 0),
})
return res
_columns = {
'name': fields.char('Title'),
'forum_id': fields.many2one('forum.forum', 'Forum', required=True),
'content': fields.html('Content', strip_style=True),
'tag_ids': fields.many2many('forum.tag', 'forum_tag_rel', 'forum_id', 'forum_tag_id', 'Tags'),
'state': fields.selection([('active', 'Active'), ('close', 'Close'), ('offensive', 'Offensive')], 'Status'),
'views': fields.integer('Number of Views'),
'active': fields.boolean('Active'),
'is_correct': fields.boolean('Valid Answer', help='Correct Answer or Answer on this question accepted.'),
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', 'in', ['email', 'comment'])
],
string='Post Messages', help="Comments on forum post",
),
# history
'create_date': fields.datetime('Asked on', select=True, readonly=True),
'create_uid': fields.many2one('res.users', 'Created by', select=True, readonly=True),
'write_date': fields.datetime('Update on', select=True, readonly=True),
'write_uid': fields.many2one('res.users', 'Updated by', select=True, readonly=True),
# vote fields
'vote_ids': fields.one2many('forum.post.vote', 'post_id', 'Votes'),
'user_vote': fields.function(_get_user_vote, string='My Vote', type='integer'),
'vote_count': fields.function(
_get_vote_count, string="Votes", type='integer',
store={
'forum.post': (lambda self, cr, uid, ids, c={}: ids, ['vote_ids'], 10),
'forum.post.vote': (_get_post_from_vote, [], 10),
}),
# favorite fields
'favourite_ids': fields.many2many('res.users', string='Favourite'),
'user_favourite': fields.function(_get_user_favourite, string="My Favourite", type='boolean'),
'favourite_count': fields.function(
_get_favorite_count, string='Favorite Count', type='integer',
store={
'forum.post': (lambda self, cr, uid, ids, c={}: ids, ['favourite_ids'], 10),
}),
# hierarchy
'parent_id': fields.many2one('forum.post', 'Question', ondelete='cascade'),
'self_reply': fields.function(
_is_self_reply, 'Reply to own question', type='boolean',
store={
'forum.post': (lambda self, cr, uid, ids, c={}: ids, ['parent_id', 'create_uid'], 10),
}),
'child_ids': fields.one2many('forum.post', 'parent_id', 'Answers'),
'child_count': fields.function(
_get_child_count, string="Answers", type='integer',
store={
'forum.post': (_get_post_from_hierarchy, ['parent_id', 'child_ids'], 10),
}),
'uid_has_answered': fields.function(
_get_uid_answered, string='Has Answered', type='boolean',
),
'has_validated_answer': fields.function(
_get_has_validated_answer, string='Has a Validated Answered', type='boolean',
store={
'forum.post': (_get_post_from_hierarchy, ['parent_id', 'child_ids', 'is_correct'], 10),
}
),
# closing
'closed_reason_id': fields.many2one('forum.post.reason', 'Reason'),
'closed_uid': fields.many2one('res.users', 'Closed by', select=1),
'closed_date': fields.datetime('Closed on', readonly=True),
# karma
'karma_ask': fields.function(_get_post_karma_rights, string='Karma to ask', type='integer', multi='_get_post_karma_rights'),
'karma_answer': fields.function(_get_post_karma_rights, string='Karma to answer', type='integer', multi='_get_post_karma_rights'),
'karma_accept': fields.function(_get_post_karma_rights, string='Karma to accept this answer', type='integer', multi='_get_post_karma_rights'),
'karma_edit': fields.function(_get_post_karma_rights, string='Karma to edit', type='integer', multi='_get_post_karma_rights'),
'karma_close': fields.function(_get_post_karma_rights, string='Karma to close', type='integer', multi='_get_post_karma_rights'),
'karma_unlink': fields.function(_get_post_karma_rights, string='Karma to unlink', type='integer', multi='_get_post_karma_rights'),
'karma_upvote': fields.function(_get_post_karma_rights, string='Karma to upvote', type='integer', multi='_get_post_karma_rights'),
'karma_downvote': fields.function(_get_post_karma_rights, string='Karma to downvote', type='integer', multi='_get_post_karma_rights'),
'karma_comment': fields.function(_get_post_karma_rights, string='Karma to comment', type='integer', multi='_get_post_karma_rights'),
'karma_comment_convert': fields.function(_get_post_karma_rights, string='karma to convert as a comment', type='integer', multi='_get_post_karma_rights'),
# access rights
'can_ask': fields.function(_get_post_karma_rights, string='Can Ask', type='boolean', multi='_get_post_karma_rights'),
'can_answer': fields.function(_get_post_karma_rights, string='Can Answer', type='boolean', multi='_get_post_karma_rights'),
'can_accept': fields.function(_get_post_karma_rights, string='Can Accept', type='boolean', multi='_get_post_karma_rights'),
'can_edit': fields.function(_get_post_karma_rights, string='Can Edit', type='boolean', multi='_get_post_karma_rights'),
'can_close': fields.function(_get_post_karma_rights, string='Can Close', type='boolean', multi='_get_post_karma_rights'),
'can_unlink': fields.function(_get_post_karma_rights, string='Can Unlink', type='boolean', multi='_get_post_karma_rights'),
'can_upvote': fields.function(_get_post_karma_rights, string='Can Upvote', type='boolean', multi='_get_post_karma_rights'),
'can_downvote': fields.function(_get_post_karma_rights, string='Can Downvote', type='boolean', multi='_get_post_karma_rights'),
'can_comment': fields.function(_get_post_karma_rights, string='Can Comment', type='boolean', multi='_get_post_karma_rights'),
'can_comment_convert': fields.function(_get_post_karma_rights, string='Can Convert to Comment', type='boolean', multi='_get_post_karma_rights'),
'can_view': fields.function(_get_post_karma_rights, string='Can View', type='boolean', multi='_get_post_karma_rights'),
}
_defaults = {
'state': 'active',
'views': 0,
'active': True,
'vote_ids': list(),
'favourite_ids': list(),
'child_ids': list(),
}
def name_get(self, cr, uid, ids, context=None):
result = []
for post in self.browse(cr, uid, ids, context=context):
if post.parent_id and not post.name:
result.append((post.id, '%s (%s)' % (post.parent_id.name, post.id)))
else:
result.append((post.id, '%s' % (post.name)))
return result
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
create_context = dict(context, mail_create_nolog=True)
post_id = super(Post, self).create(cr, uid, vals, context=create_context)
post = self.browse(cr, uid, post_id, context=context)
# karma-based access
if not post.parent_id and not post.can_ask:
raise KarmaError('Not enough karma to create a new question')
elif post.parent_id and not post.can_answer:
raise KarmaError('Not enough karma to answer to a question')
# messaging and chatter
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
if post.parent_id:
body = _(
'<p>A new answer for <i>%s</i> has been posted. <a href="%s/forum/%s/question/%s">Click here to access the post.</a></p>' %
(post.parent_id.name, base_url, slug(post.parent_id.forum_id), slug(post.parent_id))
)
self.message_post(cr, uid, post.parent_id.id, subject=_('Re: %s') % post.parent_id.name, body=body, subtype='website_forum.mt_answer_new', context=context)
else:
body = _(
'<p>A new question <i>%s</i> has been asked on %s. <a href="%s/forum/%s/question/%s">Click here to access the question.</a></p>' %
(post.name, post.forum_id.name, base_url, slug(post.forum_id), slug(post))
)
self.message_post(cr, uid, post_id, subject=post.name, body=body, subtype='website_forum.mt_question_new', context=context)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [uid], post.forum_id.karma_gen_question_new, context=context)
return post_id
def check_mail_message_access(self, cr, uid, mids, operation, model_obj=None, context=None):
for post in self.browse(cr, uid, mids, context=context):
# Make sure only author or moderator can edit/delete messages
if operation in ('write', 'unlink') and not post.can_edit:
raise KarmaError('Not enough karma to edit a post.')
return super(Post, self).check_mail_message_access(
cr, uid, mids, operation, model_obj=model_obj, context=context)
def write(self, cr, uid, ids, vals, context=None):
posts = self.browse(cr, uid, ids, context=context)
if 'state' in vals:
if vals['state'] in ['active', 'close'] and any(not post.can_close for post in posts):
raise KarmaError('Not enough karma to close or reopen a post.')
if 'active' in vals:
if any(not post.can_unlink for post in posts):
raise KarmaError('Not enough karma to delete or reactivate a post')
if 'is_correct' in vals:
if any(not post.can_accept for post in posts):
raise KarmaError('Not enough karma to accept or refuse an answer')
# update karma except for self-acceptance
mult = 1 if vals['is_correct'] else -1
for post in self.browse(cr, uid, ids, context=context):
if vals['is_correct'] != post.is_correct and post.create_uid.id != uid:
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [post.create_uid.id], post.forum_id.karma_gen_answer_accepted * mult, context=context)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [uid], post.forum_id.karma_gen_answer_accept * mult, context=context)
if any(key not in ['state', 'active', 'is_correct', 'closed_uid', 'closed_date', 'closed_reason_id'] for key in vals.keys()) and any(not post.can_edit for post in posts):
raise KarmaError('Not enough karma to edit a post.')
res = super(Post, self).write(cr, uid, ids, vals, context=context)
# if post content modify, notify followers
if 'content' in vals or 'name' in vals:
for post in posts:
if post.parent_id:
body, subtype = _('Answer Edited'), 'website_forum.mt_answer_edit'
obj_id = post.parent_id.id
else:
body, subtype = _('Question Edited'), 'website_forum.mt_question_edit'
obj_id = post.id
self.message_post(cr, uid, obj_id, body=body, subtype=subtype, context=context)
return res
def reopen(self, cr, uid, ids, context=None):
if any(post.parent_id or post.state != 'close'
for post in self.browse(cr, uid, ids, context=context)):
return False
reason_offensive = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'website_forum.reason_7')
reason_spam = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'website_forum.reason_8')
for post in self.browse(cr, uid, ids, context=context):
if post.closed_reason_id.id in (reason_offensive, reason_spam):
_logger.info('Upvoting user <%s>, reopening spam/offensive question',
post.create_uid)
# TODO: in master, consider making this a tunable karma parameter
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [post.create_uid.id],
post.forum_id.karma_gen_question_downvote * -5,
context=context)
self.pool['forum.post'].write(cr, SUPERUSER_ID, ids, {'state': 'active'}, context=context)
def close(self, cr, uid, ids, reason_id, context=None):
if any(post.parent_id for post in self.browse(cr, uid, ids, context=context)):
return False
reason_offensive = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'website_forum.reason_7')
reason_spam = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'website_forum.reason_8')
if reason_id in (reason_offensive, reason_spam):
for post in self.browse(cr, uid, ids, context=context):
_logger.info('Downvoting user <%s> for posting spam/offensive contents',
post.create_uid)
# TODO: in master, consider making this a tunable karma parameter
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [post.create_uid.id],
post.forum_id.karma_gen_question_downvote * 5,
context=context)
self.pool['forum.post'].write(cr, uid, ids, {
'state': 'close',
'closed_uid': uid,
'closed_date': datetime.today().strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT),
'closed_reason_id': reason_id,
}, context=context)
def unlink(self, cr, uid, ids, context=None):
posts = self.browse(cr, uid, ids, context=context)
if any(not post.can_unlink for post in posts):
raise KarmaError('Not enough karma to unlink a post')
# if unlinking an answer with accepted answer: remove provided karma
for post in posts:
if post.is_correct:
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [post.create_uid.id], post.forum_id.karma_gen_answer_accepted * -1, context=context)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [uid], post.forum_id.karma_gen_answer_accept * -1, context=context)
return super(Post, self).unlink(cr, uid, ids, context=context)
def vote(self, cr, uid, ids, upvote=True, context=None):
Vote = self.pool['forum.post.vote']
vote_ids = Vote.search(cr, uid, [('post_id', 'in', ids), ('user_id', '=', uid)], context=context)
new_vote = '1' if upvote else '-1'
voted_forum_ids = set()
if vote_ids:
for vote in Vote.browse(cr, uid, vote_ids, context=context):
if upvote:
new_vote = '0' if vote.vote == '-1' else '1'
else:
new_vote = '0' if vote.vote == '1' else '-1'
Vote.write(cr, uid, vote_ids, {'vote': new_vote}, context=context)
voted_forum_ids.add(vote.post_id.id)
for post_id in set(ids) - voted_forum_ids:
for post_id in ids:
Vote.create(cr, uid, {'post_id': post_id, 'vote': new_vote}, context=context)
return {'vote_count': self._get_vote_count(cr, uid, ids, None, None, context=context)[ids[0]], 'user_vote': new_vote}
def convert_answer_to_comment(self, cr, uid, id, context=None):
""" Tools to convert an answer (forum.post) to a comment (mail.message).
The original post is unlinked and a new comment is posted on the question
using the post create_uid as the comment's author. """
post = self.browse(cr, SUPERUSER_ID, id, context=context)
if not post.parent_id:
return False
# karma-based action check: use the post field that computed own/all value
if not post.can_comment_convert:
raise KarmaError('Not enough karma to convert an answer to a comment')
# post the message
question = post.parent_id
values = {
'author_id': post.create_uid.partner_id.id,
'body': html2plaintext(post.content),
'type': 'comment',
'subtype': 'mail.mt_comment',
'date': post.create_date,
}
message_id = self.pool['forum.post'].message_post(
cr, uid, question.id,
context=dict(context, mail_create_nosubscribe=True),
**values)
# unlink the original answer, using SUPERUSER_ID to avoid karma issues
self.pool['forum.post'].unlink(cr, SUPERUSER_ID, [post.id], context=context)
return message_id
def convert_comment_to_answer(self, cr, uid, message_id, default=None, context=None):
""" Tool to convert a comment (mail.message) into an answer (forum.post).
The original comment is unlinked and a new answer from the comment's author
is created. Nothing is done if the comment's author already answered the
question. """
comment = self.pool['mail.message'].browse(cr, SUPERUSER_ID, message_id, context=context)
post = self.pool['forum.post'].browse(cr, uid, comment.res_id, context=context)
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if not comment.author_id or not comment.author_id.user_ids: # only comment posted by users can be converted
return False
# karma-based action check: must check the message's author to know if own / all
karma_convert = comment.author_id.id == user.partner_id.id and post.forum_id.karma_comment_convert_own or post.forum_id.karma_comment_convert_all
can_convert = uid == SUPERUSER_ID or user.karma >= karma_convert
if not can_convert:
raise KarmaError('Not enough karma to convert a comment to an answer')
# check the message's author has not already an answer
question = post.parent_id if post.parent_id else post
post_create_uid = comment.author_id.user_ids[0]
if any(answer.create_uid.id == post_create_uid.id for answer in question.child_ids):
return False
# create the new post
post_values = {
'forum_id': question.forum_id.id,
'content': comment.body,
'parent_id': question.id,
}
# done with the author user to have create_uid correctly set
new_post_id = self.pool['forum.post'].create(cr, post_create_uid.id, post_values, context=context)
# delete comment
self.pool['mail.message'].unlink(cr, SUPERUSER_ID, [comment.id], context=context)
return new_post_id
def unlink_comment(self, cr, uid, id, message_id, context=None):
comment = self.pool['mail.message'].browse(cr, SUPERUSER_ID, message_id, context=context)
post = self.pool['forum.post'].browse(cr, uid, id, context=context)
user = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
if not comment.model == 'forum.post' or not comment.res_id == id:
return False
# karma-based action check: must check the message's author to know if own or all
karma_unlink = comment.author_id.id == user.partner_id.id and post.forum_id.karma_comment_unlink_own or post.forum_id.karma_comment_unlink_all
can_unlink = uid == SUPERUSER_ID or user.karma >= karma_unlink
if not can_unlink:
raise KarmaError('Not enough karma to unlink a comment')
return self.pool['mail.message'].unlink(cr, SUPERUSER_ID, [message_id], context=context)
def set_viewed(self, cr, uid, ids, context=None):
cr.execute("""UPDATE forum_post SET views = views+1 WHERE id IN %s""", (tuple(ids),))
return True
def _get_access_link(self, cr, uid, mail, partner, context=None):
post = self.pool['forum.post'].browse(cr, uid, mail.res_id, context=context)
res_id = post.parent_id and "%s#answer-%s" % (post.parent_id.id, post.id) or post.id
return "/forum/%s/question/%s" % (post.forum_id.id, res_id)
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, type='notification', subtype=None, context=None, **kwargs):
if thread_id and type == 'comment': # user comments have a restriction on karma
if isinstance(thread_id, (list, tuple)):
post_id = thread_id[0]
else:
post_id = thread_id
post = self.browse(cr, uid, post_id, context=context)
if not post.can_comment:
raise KarmaError('Not enough karma to comment')
return super(Post, self).message_post(cr, uid, thread_id, type=type, subtype=subtype, context=context, **kwargs)
class PostReason(osv.Model):
_name = "forum.post.reason"
_description = "Post Closing Reason"
_order = 'name'
_columns = {
'name': fields.char('Post Reason', required=True, translate=True),
}
class Vote(osv.Model):
_name = 'forum.post.vote'
_description = 'Vote'
_columns = {
'post_id': fields.many2one('forum.post', 'Post', ondelete='cascade', required=True),
'user_id': fields.many2one('res.users', 'User', required=True),
'vote': fields.selection([('1', '1'), ('-1', '-1'), ('0', '0')], 'Vote', required=True),
'create_date': fields.datetime('Create Date', select=True, readonly=True),
# TODO master: store these two
'forum_id': fields.related('post_id', 'forum_id', type='many2one', relation='forum.forum', string='Forum'),
'recipient_id': fields.related('post_id', 'create_uid', type='many2one', relation='res.users', string='To', help="The user receiving the vote"),
}
_defaults = {
'user_id': lambda self, cr, uid, ctx: uid,
'vote': lambda *args: '1',
}
def _get_karma_value(self, old_vote, new_vote, up_karma, down_karma):
_karma_upd = {
'-1': {'-1': 0, '0': -1 * down_karma, '1': -1 * down_karma + up_karma},
'0': {'-1': 1 * down_karma, '0': 0, '1': up_karma},
'1': {'-1': -1 * up_karma + down_karma, '0': -1 * up_karma, '1': 0}
}
return _karma_upd[old_vote][new_vote]
def create(self, cr, uid, vals, context=None):
vote_id = super(Vote, self).create(cr, uid, vals, context=context)
vote = self.browse(cr, uid, vote_id, context=context)
# own post check
if vote.user_id.id == vote.post_id.create_uid.id:
raise Warning('Not allowed to vote for its own post')
# karma check
if vote.vote == '1' and not vote.post_id.can_upvote:
raise KarmaError('Not enough karma to upvote.')
elif vote.vote == '-1' and not vote.post_id.can_downvote:
raise KarmaError('Not enough karma to downvote.')
# karma update
if vote.post_id.parent_id:
karma_value = self._get_karma_value('0', vote.vote, vote.forum_id.karma_gen_answer_upvote, vote.forum_id.karma_gen_answer_downvote)
else:
karma_value = self._get_karma_value('0', vote.vote, vote.forum_id.karma_gen_question_upvote, vote.forum_id.karma_gen_question_downvote)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [vote.recipient_id.id], karma_value, context=context)
return vote_id
def write(self, cr, uid, ids, values, context=None):
if 'vote' in values:
for vote in self.browse(cr, uid, ids, context=context):
# own post check
if vote.user_id.id == vote.post_id.create_uid.id:
raise Warning('Not allowed to vote for its own post')
# karma check
if (values['vote'] == '1' or vote.vote == '-1' and values['vote'] == '0') and not vote.post_id.can_upvote:
raise KarmaError('Not enough karma to upvote.')
elif (values['vote'] == '-1' or vote.vote == '1' and values['vote'] == '0') and not vote.post_id.can_downvote:
raise KarmaError('Not enough karma to downvote.')
# karma update
if vote.post_id.parent_id:
karma_value = self._get_karma_value(vote.vote, values['vote'], vote.forum_id.karma_gen_answer_upvote, vote.forum_id.karma_gen_answer_downvote)
else:
karma_value = self._get_karma_value(vote.vote, values['vote'], vote.forum_id.karma_gen_question_upvote, vote.forum_id.karma_gen_question_downvote)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [vote.recipient_id.id], karma_value, context=context)
res = super(Vote, self).write(cr, uid, ids, values, context=context)
return res
class Tags(osv.Model):
_name = "forum.tag"
_description = "Tag"
_inherit = ['website.seo.metadata']
def _get_posts_count(self, cr, uid, ids, field_name, arg, context=None):
return dict((tag_id, self.pool['forum.post'].search_count(cr, uid, [('tag_ids', 'in', tag_id)], context=context)) for tag_id in ids)
def _get_tag_from_post(self, cr, uid, ids, context=None):
return list(set(
[tag.id for post in self.pool['forum.post'].browse(cr, SUPERUSER_ID, ids, context=context) for tag in post.tag_ids]
))
_columns = {
'name': fields.char('Name', required=True),
'forum_id': fields.many2one('forum.forum', 'Forum', required=True),
'post_ids': fields.many2many('forum.post', 'forum_tag_rel', 'tag_id', 'post_id', 'Posts'),
'posts_count': fields.function(
_get_posts_count, type='integer', string="Number of Posts",
store={
'forum.post': (_get_tag_from_post, ['tag_ids'], 10),
}
),
'create_uid': fields.many2one('res.users', 'Created by', readonly=True),
}
|
eamonnmag/invenio-search
|
refs/heads/master
|
docs/_ext/ultramock.py
|
164
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Hijacks `mock` to fake as many non-available modules as possible."""
import sys
import types
try:
import unittest.mock as mock
except ImportError:
import mock
# skip `_is_magic` check.
orig_is_magic = mock._is_magic
def always_false(*args, **kwargs):
return False
# avoid spec configuration for mocked classes with super classes.
# honestly this does not happen very often and is kind of a tricky case.
orig_mock_add_spec = mock.NonCallableMock._mock_add_spec
def mock_add_spec_fake(self, spec, spec_set):
orig_mock_add_spec(self, None, None)
# special MagicMock with empty docs
class MyMagicMock(mock.MagicMock):
""""""
# set up a fake class-metaclass hierarchy
class SuperMockMetaMeta(MyMagicMock):
__metaclass__ = MyMagicMock()
class SuperMockMeta(MyMagicMock):
__metaclass__ = SuperMockMetaMeta
class SuperMock(MyMagicMock):
__metaclass__ = SuperMockMeta
class MockedModule(types.ModuleType):
def __init__(self, name):
super(types.ModuleType, self).__init__(name)
self.__name__ = super.__name__
self.__file__ = self.__name__.replace('.', '/') + '.py'
sys.modules[self.__name__] = self
def __getattr__(self, key):
obj = SuperMock
setattr(self, key, obj)
return obj
# overwrite imports
orig_import = __import__
def import_mock(name, *args, **kwargs):
try:
return orig_import(name, *args, **kwargs)
except ImportError:
return MockedModule(name)
import_patch = mock.patch('__builtin__.__import__', side_effect=import_mock)
# public methods
def activate():
mock._is_magic = always_false
mock.NonCallableMock._mock_add_spec = mock_add_spec_fake
import_patch.start()
def deactivate():
import_patch.stop()
mock.NonCallableMock._mock_add_spec = orig_mock_add_spec
mock._is_magic = orig_is_magic
|
abadger/ansible-modules-core
|
refs/heads/devel
|
cloud/openstack/os_subnets_facts.py
|
4
|
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_subnets_facts
short_description: Retrieve facts about one or more OpenStack subnets.
version_added: "2.0"
author: "Davide Agnello (@dagnello)"
description:
- Retrieve facts about one or more subnets from OpenStack.
requirements:
- "python >= 2.6"
- "shade"
options:
subnet:
description:
- Name or ID of the subnet
required: false
filters:
description:
- A dictionary of meta data to use for further filtering. Elements of
this dictionary may be additional dictionaries.
required: false
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
- name: Gather facts about previously created subnets
os_subnets_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
- name: Show openstack subnets
debug:
var: openstack_subnets
- name: Gather facts about a previously created subnet by name
os_subnets_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
name: subnet1
- name: Show openstack subnets
debug:
var: openstack_subnets
- name: Gather facts about a previously created subnet with filter
# Note: name and filters parameters are not mutually exclusive
os_subnets_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
filters:
tenant_id: 55e2ce24b2a245b09f181bf025724cbe
- name: Show openstack subnets
debug:
var: openstack_subnets
'''
RETURN = '''
openstack_subnets:
description: has all the openstack facts about the subnets
returned: always, but can be null
type: complex
contains:
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the subnet.
returned: success
type: string
network_id:
description: Network ID this subnet belongs in.
returned: success
type: string
cidr:
description: Subnet's CIDR.
returned: success
type: string
gateway_ip:
description: Subnet's gateway ip.
returned: success
type: string
enable_dhcp:
description: DHCP enable flag for this subnet.
returned: success
type: bool
ip_version:
description: IP version for this subnet.
returned: success
type: int
tenant_id:
description: Tenant id associated with this subnet.
returned: success
type: string
dns_nameservers:
description: DNS name servers for this subnet.
returned: success
type: list of strings
allocation_pools:
description: Allocation pools associated with this subnet.
returned: success
type: list of dicts
'''
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
filters=dict(required=False, type='dict', default=None)
)
module = AnsibleModule(argument_spec)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
subnets = cloud.search_subnets(module.params['name'],
module.params['filters'])
module.exit_json(changed=False, ansible_facts=dict(
openstack_subnets=subnets))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
viki9698/jizhanggroup
|
refs/heads/master
|
django/contrib/gis/gdal/tests/test_srs.py
|
351
|
from django.contrib.gis.gdal import SpatialReference, CoordTransform, OGRException, SRSException
from django.utils import unittest
class TestSRS:
def __init__(self, wkt, **kwargs):
self.wkt = wkt
for key, value in kwargs.items():
setattr(self, key, value)
# Some Spatial Reference examples
srlist = (TestSRS('GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
proj='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ',
epsg=4326, projected=False, geographic=True, local=False,
lin_name='unknown', ang_name='degree', lin_units=1.0, ang_units=0.0174532925199,
auth={'GEOGCS' : ('EPSG', '4326'), 'spheroid' : ('EPSG', '7030')},
attr=(('DATUM', 'WGS_1984'), (('SPHEROID', 1), '6378137'),('primem|authority', 'EPSG'),),
),
TestSRS('PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]',
proj=None, epsg=32140, projected=True, geographic=False, local=False,
lin_name='metre', ang_name='degree', lin_units=1.0, ang_units=0.0174532925199,
auth={'PROJCS' : ('EPSG', '32140'), 'spheroid' : ('EPSG', '7019'), 'unit' : ('EPSG', '9001'),},
attr=(('DATUM', 'North_American_Datum_1983'),(('SPHEROID', 2), '298.257222101'),('PROJECTION','Lambert_Conformal_Conic_2SP'),),
),
TestSRS('PROJCS["NAD_1983_StatePlane_Texas_South_Central_FIPS_4204_Feet",GEOGCS["GCS_North_American_1983",DATUM["North_American_Datum_1983",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["False_Easting",1968500.0],PARAMETER["False_Northing",13123333.33333333],PARAMETER["Central_Meridian",-99.0],PARAMETER["Standard_Parallel_1",28.38333333333333],PARAMETER["Standard_Parallel_2",30.28333333333334],PARAMETER["Latitude_Of_Origin",27.83333333333333],UNIT["Foot_US",0.3048006096012192]]',
proj=None, epsg=None, projected=True, geographic=False, local=False,
lin_name='Foot_US', ang_name='Degree', lin_units=0.3048006096012192, ang_units=0.0174532925199,
auth={'PROJCS' : (None, None),},
attr=(('PROJCS|GeOgCs|spheroid', 'GRS_1980'),(('projcs', 9), 'UNIT'), (('projcs', 11), None),),
),
# This is really ESRI format, not WKT -- but the import should work the same
TestSRS('LOCAL_CS["Non-Earth (Meter)",LOCAL_DATUM["Local Datum",0],UNIT["Meter",1.0],AXIS["X",EAST],AXIS["Y",NORTH]]',
esri=True, proj=None, epsg=None, projected=False, geographic=False, local=True,
lin_name='Meter', ang_name='degree', lin_units=1.0, ang_units=0.0174532925199,
attr=(('LOCAL_DATUM', 'Local Datum'), ('unit', 'Meter')),
),
)
# Well-Known Names
well_known = (TestSRS('GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]', wk='WGS84', name='WGS 84', attrs=(('GEOGCS|AUTHORITY', 1, '4326'), ('SPHEROID', 'WGS 84'))),
TestSRS('GEOGCS["WGS 72",DATUM["WGS_1972",SPHEROID["WGS 72",6378135,298.26,AUTHORITY["EPSG","7043"]],AUTHORITY["EPSG","6322"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4322"]]', wk='WGS72', name='WGS 72', attrs=(('GEOGCS|AUTHORITY', 1, '4322'), ('SPHEROID', 'WGS 72'))),
TestSRS('GEOGCS["NAD27",DATUM["North_American_Datum_1927",SPHEROID["Clarke 1866",6378206.4,294.9786982138982,AUTHORITY["EPSG","7008"]],AUTHORITY["EPSG","6267"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4267"]]', wk='NAD27', name='NAD27', attrs=(('GEOGCS|AUTHORITY', 1, '4267'), ('SPHEROID', 'Clarke 1866'))),
TestSRS('GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]]', wk='NAD83', name='NAD83', attrs=(('GEOGCS|AUTHORITY', 1, '4269'), ('SPHEROID', 'GRS 1980'))),
TestSRS('PROJCS["NZGD49 / Karamea Circuit",GEOGCS["NZGD49",DATUM["New_Zealand_Geodetic_Datum_1949",SPHEROID["International 1924",6378388,297,AUTHORITY["EPSG","7022"]],TOWGS84[59.47,-5.04,187.44,0.47,-0.1,1.024,-4.5993],AUTHORITY["EPSG","6272"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4272"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",-41.28991152777778],PARAMETER["central_meridian",172.1090281944444],PARAMETER["scale_factor",1],PARAMETER["false_easting",300000],PARAMETER["false_northing",700000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","27216"]]', wk='EPSG:27216', name='NZGD49 / Karamea Circuit', attrs=(('PROJECTION','Transverse_Mercator'), ('SPHEROID', 'International 1924'))),
)
bad_srlist = ('Foobar', 'OOJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]',)
class SpatialRefTest(unittest.TestCase):
def test01_wkt(self):
"Testing initialization on valid OGC WKT."
for s in srlist:
srs = SpatialReference(s.wkt)
def test02_bad_wkt(self):
"Testing initialization on invalid WKT."
for bad in bad_srlist:
try:
srs = SpatialReference(bad)
srs.validate()
except (SRSException, OGRException):
pass
else:
self.fail('Should not have initialized on bad WKT "%s"!')
def test03_get_wkt(self):
"Testing getting the WKT."
for s in srlist:
srs = SpatialReference(s.wkt)
self.assertEqual(s.wkt, srs.wkt)
def test04_proj(self):
"Test PROJ.4 import and export."
for s in srlist:
if s.proj:
srs1 = SpatialReference(s.wkt)
srs2 = SpatialReference(s.proj)
self.assertEqual(srs1.proj, srs2.proj)
def test05_epsg(self):
"Test EPSG import."
for s in srlist:
if s.epsg:
srs1 = SpatialReference(s.wkt)
srs2 = SpatialReference(s.epsg)
srs3 = SpatialReference(str(s.epsg))
srs4 = SpatialReference('EPSG:%d' % s.epsg)
for srs in (srs1, srs2, srs3, srs4):
for attr, expected in s.attr:
self.assertEqual(expected, srs[attr])
def test07_boolean_props(self):
"Testing the boolean properties."
for s in srlist:
srs = SpatialReference(s.wkt)
self.assertEqual(s.projected, srs.projected)
self.assertEqual(s.geographic, srs.geographic)
def test08_angular_linear(self):
"Testing the linear and angular units routines."
for s in srlist:
srs = SpatialReference(s.wkt)
self.assertEqual(s.ang_name, srs.angular_name)
self.assertEqual(s.lin_name, srs.linear_name)
self.assertAlmostEqual(s.ang_units, srs.angular_units, 9)
self.assertAlmostEqual(s.lin_units, srs.linear_units, 9)
def test09_authority(self):
"Testing the authority name & code routines."
for s in srlist:
if hasattr(s, 'auth'):
srs = SpatialReference(s.wkt)
for target, tup in s.auth.items():
self.assertEqual(tup[0], srs.auth_name(target))
self.assertEqual(tup[1], srs.auth_code(target))
def test10_attributes(self):
"Testing the attribute retrieval routines."
for s in srlist:
srs = SpatialReference(s.wkt)
for tup in s.attr:
att = tup[0] # Attribute to test
exp = tup[1] # Expected result
self.assertEqual(exp, srs[att])
def test11_wellknown(self):
"Testing Well Known Names of Spatial References."
for s in well_known:
srs = SpatialReference(s.wk)
self.assertEqual(s.name, srs.name)
for tup in s.attrs:
if len(tup) == 2:
key = tup[0]
exp = tup[1]
elif len(tup) == 3:
key = tup[:2]
exp = tup[2]
self.assertEqual(srs[key], exp)
def test12_coordtransform(self):
"Testing initialization of a CoordTransform."
target = SpatialReference('WGS84')
for s in srlist:
if s.proj:
ct = CoordTransform(SpatialReference(s.wkt), target)
def test13_attr_value(self):
"Testing the attr_value() method."
s1 = SpatialReference('WGS84')
self.assertRaises(TypeError, s1.__getitem__, 0)
self.assertRaises(TypeError, s1.__getitem__, ('GEOGCS', 'foo'))
self.assertEqual('WGS 84', s1['GEOGCS'])
self.assertEqual('WGS_1984', s1['DATUM'])
self.assertEqual('EPSG', s1['AUTHORITY'])
self.assertEqual(4326, int(s1['AUTHORITY', 1]))
self.assertEqual(None, s1['FOOBAR'])
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(SpatialRefTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
eahneahn/free
|
refs/heads/master
|
lib/python2.7/site-packages/pygments/scanner.py
|
365
|
# -*- coding: utf-8 -*-
"""
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner(object):
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos."""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolen that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
|
Philippe12/external_chromium_org
|
refs/heads/kitkat
|
third_party/protobuf/__init__.py
|
45382
| |
Hikari-no-Tenshi/android_external_skia
|
refs/heads/10.0
|
infra/bots/assets/opencl_ocl_icd_linux/download.py
|
264
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Download the current version of the asset."""
import common
if __name__ == '__main__':
common.run('download')
|
jacquev6/LowVoltage
|
refs/heads/master
|
LowVoltage/compounds/tests/__init__.py
|
2
|
# coding: utf8
# Copyright 2014-2015 Vincent Jacques <[email protected]>
|
HSU-MilitaryLogisticsClub/pysatcatcher
|
refs/heads/master
|
antenna.py
|
2
|
# -*- coding: utf-8 -*-
import unittest
import serial
import threading
import time
class RAC805:
def __init__(self):
#self._ser = serial.serial('/dev/tty',9600)
pass
def connect(self,port):
self._ser = serial.Serial(port, 9600, timeout=0)
def moveazel(self,az,el):
if(el>=0.0):
command = "AZ"+str(az)+" EL"+str(el)+"\r"
self._ser.write(command)
return True
def stop(self):
command = "\r"
self._ser.write(command+"\r")
return True
def recieve(self):
result=""
while not((">>" in result) or ("" == result)):
#while(False):
print('recieve')
time.sleep(0.0001)
result=self._ser.readline()
print(result)
return True
def close(self):
self._ser.close()
return True
class Antenna(object):
def __init__(self,rotatormodel):
if rotatormodel == "RAC805":
self._radio = RAC805()
def connect(self,port):
self._radio.connect(port)
def moveazel(self,az,el):
return self._radio.moveazel(az,el)
def stop(self):
return self._radio.stop()
def recieve(self):
self._radio.recieve()
#t=threading.Thread(target=self._radio.recieve())
#t.setDaemon(True)
#t.start()
#print "threadstart"
def close(self):
return self._radio.close()
|
johnbren85/GrowChinook
|
refs/heads/master
|
fisheries/TestSens.py
|
2
|
#!/usr/bin/python
import os
import glob
import cgi
import PrintPages as pt
address = cgi.escape(os.environ["REMOTE_ADDR"])
script = "Sensitivity Form"
pt.write_log_entry(script, address)
pt.print_header('GrowChinook', 'Sens')
pt.print_full_form(None, None, 'Sens_in', 'RunModelSens.py')
extension = 'csv'
os.chdir('uploads')
result = [i for i in glob.glob('*.csv')]
print('''
{}
</div>
</body>
'''.format(result))
print ('</html>')
|
danylaksono/inasafe
|
refs/heads/master
|
safe/impact_functions/test_real_impact_functions.py
|
5
|
"""Works with real library impact functions rather than test examples
"""
import unittest
from safe.impact_functions.core import get_admissible_plugins
from safe.impact_functions.core import requirements_collect
class Test_real_plugins(unittest.TestCase):
"""Tests of Risiko calculations
"""
def test_filtering_of_impact_functions(self):
"""Impact functions are filtered correctly
"""
# Check empty call returns all
P = get_admissible_plugins([])
# List the known impact function names
# based on their class names - not their titles
msg = 'Available impact functions are: %s' % str(P.keys())
#print msg
assert 'Flood Evacuation Function Vector Hazard' in P, msg
assert 'I T B Earthquake Building Damage Function' in P, msg
assert 'Earthquake Building Impact Function' in P, msg
assert 'P A G Fatality Function' in P, msg
assert 'Flood Evacuation Function' in P, msg
assert 'Flood Building Impact Function' in P, msg
assert 'I T B Fatality Function' in P, msg
assert 'Volcano Building Impact' in P, msg
assert 'Volcano Polygon Hazard Population' in P, msg
# This one should get 2 earthquake building impact functions
D1 = {'category': 'hazard', 'subcategory': 'earthquake', 'unit': 'MMI'}
D2 = {'category': 'exposure', 'datatype': 'itb',
'subcategory': 'structure'}
# Add layertype
D1['layertype'] = 'raster'
D2['layertype'] = 'vector'
P = get_admissible_plugins([D1, D2])
msg = 'Expected: len(P) >= 2, Got: len(P) is %i' % len(P)
assert len(P) >= 1, msg # Depending on other tests there could be more
assert 'Earthquake Building Impact Function' in P
# This one should get 3 flood population impact functions
D1 = {'category': 'hazard', 'subcategory': 'flood', 'unit': 'm'}
D2 = {'category': 'exposure', 'subcategory': 'population'}
# Add layertype
D1['layertype'] = 'raster'
D2['layertype'] = 'raster'
P = get_admissible_plugins([D1, D2])
assert len(P) >= 1 # Depending on other tests there could be more
#assert 'W B Flood Evacuation Function' in P
# Try form where only one dictionary is passed
# This one gets all the flood related impact functions
# Try to get general inundation building impact function
f_name = 'Flood Building Impact Function'
P = get_admissible_plugins(D1)
assert len(P) >= 2
#assert 'W B Flood Evacuation Function' in P
assert f_name in P
#assert 'Flood Road Impact Function' in P
D1 = {'category': 'hazard', 'subcategory': 'tsunami'}
D2 = {'category': 'exposure', 'subcategory': 'structure'}
# Add layertype
#D1['layertype'] = 'raster' # Not required for flood building impact
D2['layertype'] = 'vector'
P = get_admissible_plugins([D1, D2])
msg = 'Expected name "%s" in P: %s' % (f_name, P)
assert f_name in P, msg
# Get requirements from expected function
P_all = get_admissible_plugins()
assert P[f_name] == P_all[f_name]
requirelines = requirements_collect(P[f_name])
for i, D in enumerate([D1, D2]):
for key in D:
msg = 'Key %s was not found in %s' % (key, requirelines[i])
assert key in requirelines[i], msg
msg = 'Val %s was not found in %s' % (D[key], requirelines[i])
assert D[key] in requirelines[i], msg
if __name__ == '__main__':
suite = unittest.makeSuite(Test_real_plugins, 'test')
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
oscar9/statistics_viewer
|
refs/heads/master
|
processmanager/processdirectory/stat14BoxAndWhisker.py
|
1
|
# encoding: utf-8
import sys
import gvsig
from gvsig import geom
import addons.statistics_viewer.statisticprocess
reload(addons.statistics_viewer.statisticprocess)
import addons.statistics_viewer.sv
reload(addons.statistics_viewer.sv)
from addons.statistics_viewer.sv.svScatterPlot import createPanelMouseListener, createChart, createPanel
from addons.statistics_viewer.statisticprocess.abstractprocess import AbstractStatisticProcess
import os
from addons.statistics_viewer.sv import svgraph
from org.jfree.data.statistics import DefaultBoxAndWhiskerCategoryDataset
from org.jfree.data.general import DatasetUtilities
from org.jfree.chart import ChartFactory
from org.jfree.chart.axis import NumberAxis
import random
class StatProcess(AbstractStatisticProcess):
name = u"Box and Whisker"
description = "Box and Whisker Description"
idprocess = "box-and-whisker-1"
allowZoomProcess = False
def processParameters(self): #o: dynclass
params = self.createInputParameters("BoxAndWhiskerParameters", "BoxAndWhiskerParametersProperties", "Description")
params.addDynFieldString("Layer").setMandatory(True)
#params.addDynFieldString("Field X").setMandatory(True)
#params.addDynFieldString("Field Y").setMandatory(True)
def process(self, params):
# Get initial parameters
param_layer = params.get("Layer")
param_x = "pob0_14" #params.get("Field X")
param_y = "pob15_65"
param_z = "pob66_mas"
#param_y = params.get("Field Y")
layer = gvsig.currentView().getLayer(param_layer)
# dataset
SERIES_COUNT = 1
CATEGORY_COUNT = 1
VALUE_COUNT = 4000
result = DefaultBoxAndWhiskerCategoryDataset()
# Numeric fields
sch = layer.getSchema()
listFields = []
listValues = {}
# get potential numeric fields
for field in sch:
dt = field.getDataTypeName()
if dt=="Integer" or dt=="Long" or dt=="Double":
listFields.append(field.getName())
listValues[field.getName()] = list()
for f in layer.features():
for field in listFields:
prev = listValues[field]
value = f.get(field)
prev.append(value)
listValues[field] = prev
for k in listValues.keys():
result.add(listValues[k], k, "")
# Create chart
#chart = createChart(result)
chart = ChartFactory.createBoxAndWhiskerChart(
"", "", "", result,
True)
plot = chart.getPlot()
plot.setDomainGridlinesVisible(True)
plot.setRangePannable(True)
rangeAxis = plot.getRangeAxis()
rangeAxis.setStandardTickUnits(NumberAxis.createIntegerTickUnits())
# Create panel from chart
panel = createPanel(chart)
#panel = createPanel(chart)
self.setOutputPanel(panel)
self.console = u"** Box And Whisker **"
def main(*args):
print "* stat14.py: Box And Whisker"
proc = StatProcess()
dynobject = proc.createParameters()
dynobject.setDynValue("Layer", "pob")
#dynobject.setDynValue("Field X", "LONGITUDE")
#dynobject.setDynValue("Field Y", "LATITUDE")
proc.process(dynobject.getValues())
print proc.getOutputConsole()
panel = proc.getOutputPanel()
print panel
|
Karel-van-de-Plassche/bokeh
|
refs/heads/master
|
bokeh/protocol/__init__.py
|
8
|
''' Implement and provide message protocols for communication between Bokeh
Servers and clients.
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
from tornado.escape import json_decode
from . import messages
from . import versions
from .exceptions import ProtocolError
class Protocol(object):
''' Provide a message factory for a given version of the Bokeh Server
message protocol.
Args:
version (str) : a string identifying a protocol version, e.g. "1.0"
'''
def __init__(self, version):
if version not in versions.spec:
raise ProtocolError("Unknown protocol version %r" % version)
self._version = version
self._messages = dict()
for msgtype, revision in versions.spec[version]:
self._messages[msgtype] = messages.index[(msgtype, revision)]
def __repr__(self):
return "Protocol(%r)" % self.version
def create(self, msgtype, *args, **kwargs):
''' Create a new Message instance for the given type.
Args:
msgtype (str) :
'''
if msgtype not in self._messages:
raise ProtocolError("Unknown message type %r for protocol version %s" % (msgtype, self._version))
return self._messages[msgtype].create(*args, **kwargs)
def assemble(self, header_json, metadata_json, content_json):
''' Create a Message instance assembled from json fragments.
Args:
header_json (``JSON``) :
metadata_json (``JSON``) :
content_json (``JSON``) :
Returns:
message
'''
header = json_decode(header_json)
if 'msgtype' not in header:
log.error("Bad header with no msgtype was: %r", header)
raise ProtocolError("No 'msgtype' in header")
return self._messages[header['msgtype']].assemble(
header_json, metadata_json, content_json
)
@property
def version(self):
return self._version
|
zchking/odoo
|
refs/heads/8.0
|
addons/account/wizard/__init__.py
|
362
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_report_common
import account_report_common_partner
import account_report_common_journal
import account_report_common_account
import account_automatic_reconcile
import account_move_line_reconcile_select
import account_move_line_unreconcile_select
import account_reconcile_partner_process
import account_reconcile
import account_unreconcile
import account_invoice_refund
import account_journal_select
import account_move_bank_reconcile
import account_subscription_generate
import account_report_aged_partner_balance
import account_report_partner_ledger
import account_report_partner_balance
import account_period_close
import account_fiscalyear_close
import account_fiscalyear_close_state
import account_vat
import account_open_closed_fiscalyear
import account_invoice_state
import account_chart
import account_tax_chart
import account_financial_report
#TODO: remove this file no moe used
# also remove related view fiel
import account_validate_account_move
import account_use_model
import account_state_open
import account_report_print_journal
import account_report_central_journal
import account_report_general_journal
import account_report_general_ledger
import account_report_account_balance
import account_change_currency
import pos_box
import account_statement_from_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
lumig242/Hue-Integration-with-CDAP
|
refs/heads/pull3
|
desktop/core/ext-py/Pygments-1.3.1/pygments/lexer.py
|
58
|
# -*- coding: utf-8 -*-
"""
pygments.lexer
~~~~~~~~~~~~~~
Base lexer classes.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
make_analysator
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
'LexerContext', 'include', 'bygroups', 'using', 'this']
_default_analyse = staticmethod(lambda x: 0.0)
class LexerMeta(type):
"""
This metaclass automagically converts ``analyse_text`` methods into
static methods which always return float values.
"""
def __new__(cls, name, bases, d):
if 'analyse_text' in d:
d['analyse_text'] = make_analysator(d['analyse_text'])
return type.__new__(cls, name, bases, d)
class Lexer(object):
"""
Lexer for a specific language.
Basic options recognized:
``stripnl``
Strip leading and trailing newlines from the input (default: True).
``stripall``
Strip all leading and trailing whitespace from the input
(default: False).
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
*New in Pygments 1.3.*
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
If given, must be an encoding name. This encoding will be used to
convert the input string to Unicode, if it is not already a Unicode
string (default: ``'latin1'``).
Can also be ``'guess'`` to use a simple UTF-8 / Latin1 detection, or
``'chardet'`` to use the chardet library, if it is installed.
"""
#: Name of the lexer
name = None
#: Shortcuts for the lexer
aliases = []
#: fn match rules
filenames = []
#: fn alias filenames
alias_filenames = []
#: mime types
mimetypes = []
__metaclass__ = LexerMeta
def __init__(self, **options):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
self.stripall = get_bool_opt(options, 'stripall', False)
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
self.tabsize = get_int_opt(options, 'tabsize', 0)
self.encoding = options.get('encoding', 'latin1')
# self.encoding = options.get('inencoding', None) or self.encoding
self.filters = []
for filter_ in get_list_opt(options, 'filters', ()):
self.add_filter(filter_)
def __repr__(self):
if self.options:
return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
self.options)
else:
return '<pygments.lexers.%s>' % self.__class__.__name__
def add_filter(self, filter_, **options):
"""
Add a new stream filter to this lexer.
"""
if not isinstance(filter_, Filter):
filter_ = get_filter_by_name(filter_, **options)
self.filters.append(filter_)
def analyse_text(text):
"""
Has to return a float between ``0`` and ``1`` that indicates
if a lexer wants to highlight this text. Used by ``guess_lexer``.
If this method returns ``0`` it won't highlight it in any case, if
it returns ``1`` highlighting with this lexer is guaranteed.
The `LexerMeta` metaclass automatically wraps this function so
that it works like a static method (no ``self`` or ``cls``
parameter) and the return value is automatically converted to
`float`. If the return value is an object that is boolean `False`
it's the same as if the return values was ``0.0``.
"""
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if not isinstance(text, unicode):
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
except UnicodeDecodeError:
text = text.decode('latin1')
elif self.encoding == 'chardet':
try:
import chardet
except ImportError:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/')
enc = chardet.detect(text)
text = text.decode(enc['encoding'])
else:
text = text.decode(self.encoding)
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
if self.ensurenl and not text.endswith('\n'):
text += '\n'
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text):
"""
Return an iterable of (tokentype, value) pairs.
In subclasses, implement this method as a generator to
maximize effectiveness.
"""
raise NotImplementedError
class DelegatingLexer(Lexer):
"""
This lexer takes two lexer as arguments. A root lexer and
a language lexer. First everything is scanned using the language
lexer, afterwards all ``Other`` tokens are lexed using the root
lexer.
The lexers from the ``template`` lexer package use this base lexer.
"""
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
self.root_lexer = _root_lexer(**options)
self.language_lexer = _language_lexer(**options)
self.needle = _needle
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buffered = ''
insertions = []
lng_buffer = []
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
if t is self.needle:
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
lng_buffer = []
buffered += v
else:
lng_buffer.append((i, t, v))
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
return do_insertions(insertions,
self.root_lexer.get_tokens_unprocessed(buffered))
#-------------------------------------------------------------------------------
# RegexLexer and ExtendedRegexLexer
#
class include(str):
"""
Indicates that a state should include rules from another state.
"""
pass
class combined(tuple):
"""
Indicates a state combined from multiple states.
"""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
class _PseudoMatch(object):
"""
A pseudo match object constructed from a string.
"""
def __init__(self, start, text):
self._text = text
self._start = start
def start(self, arg=None):
return self._start
def end(self, arg=None):
return self._start + len(self._text)
def group(self, arg=None):
if arg:
raise IndexError('No such group')
return self._text
def groups(self):
return (self._text,)
def groupdict(self):
return {}
def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer, _PseudoMatch(match.start(i + 1),
match.group(i + 1)), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback
class _This(object):
"""
Special singleton used for indicating the caller class.
Used by ``using``.
"""
this = _This()
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
class RegexLexerMeta(LexerMeta):
"""
Metaclass for RegexLexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_state(cls, unprocessed, processed, state):
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed, str(tdef)))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = re.compile(tdef[0], rflags).match
except Exception, err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
assert type(tdef[1]) is _TokenType or callable(tdef[1]), \
'token type must be simple type or callable, not %r' % (tdef[1],)
if len(tdef) == 2:
new_state = None
else:
tdef2 = tdef[2]
if isinstance(tdef2, str):
# an existing state
if tdef2 == '#pop':
new_state = -1
elif tdef2 in unprocessed:
new_state = (tdef2,)
elif tdef2 == '#push':
new_state = tdef2
elif tdef2[:5] == '#pop:':
new_state = -int(tdef2[5:])
else:
assert False, 'unknown new state %r' % tdef2
elif isinstance(tdef2, combined):
# combine a new state from existing ones
new_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in tdef2:
assert istate != state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[new_state] = itokens
new_state = (new_state,)
elif isinstance(tdef2, tuple):
# push more than one state
for state in tdef2:
assert (state in unprocessed or
state in ('#pop', '#push')), \
'unknown new state ' + state
new_state = tdef2
else:
assert False, 'unknown new state def %r' % tdef2
tokens.append((rex, tdef[1], new_state))
return tokens
def process_tokendef(cls, name, tokendefs=None):
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in tokendefs.keys():
cls._process_state(tokendefs, processed, state)
return processed
def __call__(cls, *args, **kwds):
if not hasattr(cls, '_tokens'):
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef('', cls.tokens)
return type.__call__(cls, *args, **kwds)
class RegexLexer(Lexer):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
__metaclass__ = RegexLexerMeta
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
flags = re.MULTILINE
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
#:
#: The initial state is 'root'.
#: ``new_state`` can be omitted to signify no state transition.
#: If it is a string, the state is pushed on the stack and changed.
#: If it is a tuple of strings, all states are pushed on the stack and
#: the current state will be the topmost.
#: It can also be ``combined('state1', 'state2', ...)``
#: to signify a new, anonymous state combined from the rules of two
#: or more existing ones.
#: Furthermore, it can be '#pop' to signify going back one step in
#: the state stack, or '#push' to push the current state on the stack
#: again.
#:
#: The tuple can also be replaced with ``include('state')``, in which
#: case the rules from the state named by the string are included in the
#: current one.
tokens = {}
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
class LexerContext(object):
"""
A helper object that holds lexer position data.
"""
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
self.end = end or len(text) # end=0 not supported ;-)
self.stack = stack or ['root']
def __repr__(self):
return 'LexerContext(%r, %r, %r)' % (
self.text, self.pos, self.stack)
class ExtendedRegexLexer(RegexLexer):
"""
A RegexLexer that uses a context object to store its state.
"""
def get_tokens_unprocessed(self, text=None, context=None):
"""
Split ``text`` into (tokentype, text) pairs.
If ``context`` is given, use this lexer context instead.
"""
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = ctx.text
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, ctx.pos, ctx.end)
if m:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
for item in action(self, m, ctx):
yield item
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
# CAUTION: callback must set ctx.pos!
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
ctx.stack.extend(new_state)
elif isinstance(new_state, int):
# pop
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to "root"
ctx.pos += 1
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, u'\n'
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = insertions.next()
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = insertions.next()
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = insertions.next()
except StopIteration:
insleft = False
break # not strictly necessary
|
MarkTseng/django-farmersale
|
refs/heads/master
|
farmersale-env/lib/python2.7/site-packages/django/conf/locale/fy/formats.py
|
852
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
# DATE_FORMAT =
# TIME_FORMAT =
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
# SHORT_DATE_FORMAT =
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
forging2012/tornado-demo
|
refs/heads/master
|
test009.py
|
1
|
import tornado.web
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("user")
class MainHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
name = tornado.escape.xhtml_escape(self.current_user)
self.write("Hello, " + name)
class LoginHandler(BaseHandler):
def get(self):
self.write('<html><body><form action="/" method="post">'
'Name: <input type="text" name="name">'
'<input type="submit" value="Sign in">'
'</form></body></html>')
def post(self):
self.set_secure_cookie("user", self.get_argument("name"))
self.redirect("/")
settings = {
"cookie_secret": "61oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
"login_url": "/login",
#"xsrf_cookies": True,
}
application = tornado.web.Application([
(r"/", MainHandler),
(r"/login", LoginHandler),
], **settings)
if __name__ == "__main__":
application.listen(8080)
tornado.ioloop.IOLoop.instance().start()
|
naturali/tensorflow
|
refs/heads/r0.11
|
tensorflow/python/training/learning_rate_decay.py
|
6
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate decay functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import control_flow_ops
def exponential_decay(learning_rate, global_step, decay_steps, decay_rate,
staircase=False, name=None):
"""Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
```
If the argument `staircase` is `True`, then `global_step / decay_steps` is an
integer division and the decayed learning rate follows a staircase function.
Example: decay every 100000 steps with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
100000, 0.96, staircase=True)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. It `True` decay the learning rate at discrete intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
"""
with ops.name_scope(name, "ExponentialDecay",
[learning_rate, global_step,
decay_steps, decay_rate]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
p = global_step / decay_steps
if staircase:
p = math_ops.floor(p)
return math_ops.mul(learning_rate, math_ops.pow(decay_rate, p), name=name)
def piecewise_constant(x, boundaries, values, name=None):
""" Piecewise constant from boundaries and interval values.
Example: use a learning rate that's 1.0 for the first 100000 steps, 0.5
for steps 100001 to 110000, and 0.1 for any additional steps.
```python
global_step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
# Later, whenever we perform an optimization step, we increment global_step.
```
Args:
x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
`float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as `x`.
values: A list of `Tensor`s or float`s or `int`s that specifies the values
for the intervals defined by `boundaries`. It should have one more element
than `boundaries`, and all elements should have the same type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Returns:
A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`,
`values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
and values[-1] when `x > boundaries[-1]`.
"""
with ops.name_scope(name, 'PiecewiseConstant',
[x, boundaries, values, name]) as name:
x = ops.convert_to_tensor(x)
# Avoid explicit conversion to x's dtype. This could result in faulty
# comparisons, for example if floats are converted to integers.
boundaries = ops.convert_n_to_tensor(boundaries)
if not all(b.dtype == x.dtype for b in boundaries):
raise ValueError('boundaries must have the same dtype as x.')
# TODO(rdipietro): Ensure that boundaries' elements are strictly increasing.
values = ops.convert_n_to_tensor(values)
if not all(v.dtype == values[0].dtype for v in values):
raise ValueError('values must have elements all with the same dtype.')
pred_fn_pairs = {}
pred_fn_pairs[x <= boundaries[0]] = lambda: values[0]
pred_fn_pairs[x > boundaries[-1]] = lambda: values[-1]
for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
# Need to bind v here; can do this with lambda v=v: ...
pred = (x > low) & (x <= high)
pred_fn_pairs[pred] = lambda v=v: v
# The default isn't needed here because our conditions are mutually
# exclusive and exhaustive, but tf.case requires it.
default = lambda: values[0]
return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
def polynomial_decay(learning_rate, global_step, decay_steps,
end_learning_rate=0.0001, power=1.0,
cycle=False, name=None):
"""Applies a polynomial decay to the learning rate.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This function applies a polynomial decay function to a provided initial
`learning_rate` to reach an `end_learning_rate` in the given `decay_steps`.
It requires a `global_step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `global_steps`.
```python
decay_steps = decay_steps * ceil(global_step / decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
Example: decay from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5):
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate = tf.train.polynomial_decay(starter_learning_rate, global_step,
decay_steps, end_learning_rate,
power=0.5)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a
Python number. The power of the polynomial. Defaults to sqrt, i.e. 0.5.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to 'PolynomialDecay'
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
"""
with ops.name_scope(name, "PolynomialDecay",
[learning_rate, global_step,
decay_steps, end_learning_rate, power]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
end_learning_rate = math_ops.cast(end_learning_rate, dtype)
power = math_ops.cast(power, dtype)
if cycle:
# Find the first multiple of decay_steps that is bigger than global_step.
decay_steps = math_ops.mul(decay_steps,
math_ops.ceil(global_step / decay_steps))
else:
# Make sure that the global_step used is not bigger than decay_steps.
global_step = math_ops.minimum(global_step, decay_steps)
p = math_ops.div(global_step, decay_steps)
return math_ops.add(math_ops.mul(learning_rate - end_learning_rate,
math_ops.pow(1 - p, power)),
end_learning_rate, name=name)
def natural_exp_decay(learning_rate, global_step, decay_steps, decay_rate,
staircase=False, name=None):
"""Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * global_step)
```
Example: decay exponentially with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
k = 0.5
learning_rate = tf.train.exponential_time_decay(learning_rate, global_step, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A Python number.
Global step to use for the decay computation. Must not be negative.
decay_rate: A Python number. The decay rate.
name: String. Optional name of the operation. Defaults to
'ExponentialTimeDecay'
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
"""
with ops.name_scope(name, "NaturalExpDecay",
[learning_rate, global_step, decay_rate]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
p = global_step / decay_steps
if staircase:
p = math_ops.floor(p)
exponent = math_ops.exp(math_ops.mul(math_ops.neg(decay_rate), p))
return math_ops.mul(learning_rate, exponent, name=name)
def inverse_time_decay(learning_rate, global_step, decay_steps, decay_rate,
staircase=False, name=None):
"""Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an inverse decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate / (1 + decay_rate * t)
```
Example: decay 1/t with a rate of 0.5:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
k = 0.5
learning_rate = tf.train.inverse_time_decay(learning_rate, global_step, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A Python number.
Global step to use for the decay computation. Must not be negative.
decay_rate: A Python number. The decay rate.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
"""
with ops.name_scope(name, "InverseTimeDecay",
[learning_rate, global_step, decay_rate]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
p = global_step / decay_steps
if staircase:
p = math_ops.floor(p)
const = math_ops.cast(constant_op.constant(1), learning_rate.dtype)
denom = math_ops.add(const, math_ops.mul(decay_rate, p))
return math_ops.div(learning_rate, denom, name=name)
|
RCOSDP/waterbutler
|
refs/heads/nii-mergework-201901
|
tests/providers/bitbucket/test_metadata.py
|
1
|
import pytest
from waterbutler.providers.bitbucket.path import BitbucketPath
from waterbutler.providers.bitbucket.metadata import BitbucketFileMetadata
from waterbutler.providers.bitbucket.metadata import BitbucketFolderMetadata
from waterbutler.providers.bitbucket.metadata import BitbucketRevisionMetadata
from .fixtures import owner, repo, file_metadata, folder_metadata, revision_metadata
COMMIT_SHA = '123abc456def'
class TestBitbucketMetadata:
def test_build_file_metadata(self, file_metadata, owner, repo):
name = 'aaa-01-2.txt'
subdir = 'plaster'
full_path = '/{}/{}'.format(subdir, name)
branch = 'master'
path = BitbucketPath(full_path, _ids=[
(COMMIT_SHA, branch), (COMMIT_SHA, branch), (COMMIT_SHA, branch)
])
try:
metadata = BitbucketFileMetadata(file_metadata, path, owner=owner, repo=repo)
except Exception as exc:
pytest.fail(str(exc))
assert metadata.name == name
assert metadata.path == full_path
assert metadata.kind == 'file'
assert metadata.modified == '2016-10-14T00:37:55Z'
assert metadata.modified_utc == '2016-10-14T00:37:55+00:00'
assert metadata.created_utc is None
assert metadata.content_type is None
assert metadata.size == 13
assert metadata.size_as_int == 13
assert metadata.etag == '{}::{}'.format(full_path,COMMIT_SHA)
assert metadata.provider == 'bitbucket'
assert metadata.last_commit_sha == '90c8f7eef948'
assert metadata.commit_sha == COMMIT_SHA
assert metadata.branch_name == branch
web_view = ('https://bitbucket.org/{}/{}/src/{}{}?'
'fileviewer=file-view-default'.format(owner, repo, COMMIT_SHA, full_path))
assert metadata.web_view == web_view
assert metadata.extra == {
'commitSha': COMMIT_SHA,
'branch': 'master',
'webView': web_view,
'lastCommitSha': '90c8f7eef948',
}
resource = 'mst3k'
assert metadata._json_api_links(resource) == {
'delete': None,
'upload': None,
'move': 'http://localhost:7777/v1/resources/{}/providers/bitbucket{}?commitSha={}'.format(resource, full_path, COMMIT_SHA),
'download': 'http://localhost:7777/v1/resources/{}/providers/bitbucket{}?commitSha={}'.format(resource, full_path, COMMIT_SHA),
}
def test_build_folder_metadata(self, folder_metadata, owner, repo):
branch = 'master'
name = 'plaster'
path = BitbucketPath('/{}/'.format(name), _ids=[(None, branch), (None, branch)])
try:
metadata = BitbucketFolderMetadata(folder_metadata, path, owner=owner, repo=repo)
except Exception as exc:
pytest.fail(str(exc))
assert metadata.name == name
assert metadata.path == '/{}/'.format(name)
assert metadata.kind == 'folder'
assert metadata.children is None
assert metadata.extra == {
'commitSha': None,
'branch': branch,
}
assert metadata.provider == 'bitbucket'
assert metadata.commit_sha is None
assert metadata.branch_name == branch
assert metadata._json_api_links('mst3k') == {
'delete': None,
'upload': None,
'move': 'http://localhost:7777/v1/resources/mst3k/providers/bitbucket/{}/?branch={}'.format(name, branch),
'new_folder': None,
}
def test_build_revision_metadata(self, revision_metadata):
try:
metadata = BitbucketRevisionMetadata(revision_metadata)
except Exception as exc:
pytest.fail(str(exc))
assert metadata.modified == '2016-09-08 21:20:59'
assert metadata.modified_utc == '2016-09-08T19:20:59+00:00'
assert metadata.version_identifier == 'commitSha'
assert metadata.version == '522a6be9f98ddf7938d7e9568a6375cd0f88e40e'
assert metadata.extra == {
'user': {
'name': 'Fitz Elliott',
},
'branch': 'smallbranch-a',
}
|
rschiang/shedskin
|
refs/heads/master
|
scripts/checker.py
|
6
|
from heapq import *
class A(object):
def __init__(self, a, hash):
self.a = a
self._hash = hash
def __lt__(self, o):
print "%s.__lt__(%s)" % (self.a, o.a)
return NotImplemented
def __le__(self, o):
print "%s.__le__(%s)" % (self.a, o.a)
return NotImplemented
def __gt__(self, o):
print "%s.__gt__(%s)" % (self.a, o.a)
return NotImplemented
def __ge__(self, o):
print "%s.__ge__(%s)" % (self.a, o.a)
return NotImplemented
# def __cmp__(self, o):
# print "%s.__cmp__(%s)" % (self.a, o.a)
# #return cmp(self._hash, o._hash)
# return NotImplemented
def __eq__(self, o):
print "%s.__eq__(%s)" % (self.a, o.a)
return NotImplemented
def __ne__(self, o):
print "%s.__ne__(%s)" % (self.a, o.a)
return NotImplemented
def __hash__(self):
print "%s.__hash__()" % (self.a)
return 1
# return self._hash
a = A("a", 1)
b = A("b", 2)
c = A("c", 3)
d = A("d", 1)
print 'eq'
a == b
print 'ne'
a != b
print 'lt'
a < b
print 'gt'
a > b
print 'le'
a <= b
print 'ge'
a >= b
#heapify([a,b,c,d])
#a != b
#cmp(a,b)
#l = [a,b,c,d]
#sorted(l)
|
feigames/Odoo
|
refs/heads/master
|
addons/web_graph/__init__.py
|
1350
|
import controllers
|
yank555-lu/N3-Sourcedrops
|
refs/heads/n9005
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
ljwolf/pysal
|
refs/heads/master
|
pysal/spreg/diagnostics_tsls.py
|
10
|
"""
Diagnostics for two stage least squares regression estimations.
"""
__author__ = "Luc Anselin [email protected], Nicholas Malizia [email protected] "
from pysal.common import *
from scipy.stats import pearsonr
__all__ = ["t_stat", "pr2_aspatial", "pr2_spatial"]
def t_stat(reg, z_stat=False):
"""
Calculates the t-statistics (or z-statistics) and associated p-values.
[Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
z_stat : boolean
If True run z-stat instead of t-stat
Returns
-------
ts_result : list of tuples
each tuple includes value of t statistic (or z
statistic) and associated p-value
Examples
--------
We first need to import the needed modules. Numpy is needed to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis. The ``diagnostics`` module is used for the tests
we will show here and the OLS and TSLS are required to run the models on
which we will perform the tests.
>>> import numpy as np
>>> import pysal
>>> import pysal.spreg.diagnostics as diagnostics
>>> from pysal.spreg.ols import OLS
>>> from twosls import TSLS
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
Before being able to apply the diagnostics, we have to run a model and,
for that, we need the input variables. Extract the CRIME column (crime
rates) from the DBF file and make it the dependent variable for the
regression. Note that PySAL requires this to be an numpy array of shape
(n, 1) as opposed to the also common shape of (n, ) that other packages
accept.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) and HOVAL (home value) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this model adds a vector of ones to the
independent variables passed in, but this can be overridden by passing
constant=False.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression. Since it is a non-spatial model, all we need is the
dependent and the independent variable.
>>> reg = OLS(y,X)
Now we can perform a t-statistic on the model:
>>> testresult = diagnostics.t_stat(reg)
>>> print("%12.12f"%testresult[0][0], "%12.12f"%testresult[0][1], "%12.12f"%testresult[1][0], "%12.12f"%testresult[1][1], "%12.12f"%testresult[2][0], "%12.12f"%testresult[2][1])
('14.490373143689', '0.000000000000', '-4.780496191297', '0.000018289595', '-2.654408642718', '0.010874504910')
We can also use the z-stat. For that, we re-build the model so we consider
HOVAL as endogenous, instrument for it using DISCBD and carry out two
stage least squares (TSLS) estimation.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> yd = []
>>> yd.append(db.by_col("HOVAL"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
Once the variables are read as different objects, we are good to run the
model.
>>> reg = TSLS(y, X, yd, q)
With the output of the TSLS regression, we can perform a z-statistic:
>>> testresult = diagnostics.t_stat(reg, z_stat=True)
>>> print("%12.10f"%testresult[0][0], "%12.10f"%testresult[0][1], "%12.10f"%testresult[1][0], "%12.10f"%testresult[1][1], "%12.10f"%testresult[2][0], "%12.10f"%testresult[2][1])
('5.8452644705', '0.0000000051', '0.3676015668', '0.7131703463', '-1.9946891308', '0.0460767956')
"""
k = reg.k # (scalar) number of ind. vas (includes constant)
n = reg.n # (scalar) number of observations
vm = reg.vm # (array) coefficients of variance matrix (k x k)
betas = reg.betas # (array) coefficients of the regressors (1 x k)
variance = vm.diagonal()
tStat = betas.reshape(len(betas),) / np.sqrt(variance)
ts_result = []
for t in tStat:
if z_stat:
ts_result.append((t, stats.norm.sf(abs(t)) * 2))
else:
ts_result.append((t, stats.t.sf(abs(t), n - k) * 2))
return ts_result
def pr2_aspatial(tslsreg):
"""
Calculates the pseudo r^2 for the two stage least squares regression.
Parameters
----------
tslsreg : two stage least squares regression object
output instance from a two stage least squares
regression model
Returns
-------
pr2_result : float
value of the squared pearson correlation between
the y and tsls-predicted y vectors
Examples
--------
We first need to import the needed modules. Numpy is needed to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis. The TSLS is required to run the model on
which we will perform the tests.
>>> import numpy as np
>>> import pysal
>>> from twosls import TSLS
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
Before being able to apply the diagnostics, we have to run a model and,
for that, we need the input variables. Extract the CRIME column (crime
rates) from the DBF file and make it the dependent variable for the
regression. Note that PySAL requires this to be an numpy array of shape
(n, 1) as opposed to the also common shape of (n, ) that other packages
accept.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this model adds a vector of ones to the
independent variables passed in, but this can be overridden by passing
constant=False.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
In this case, we consider HOVAL (home value) as an endogenous regressor,
so we acknowledge that by reading it in a different category.
>>> yd = []
>>> yd.append(db.by_col("HOVAL"))
>>> yd = np.array(yd).T
In order to properly account for the endogeneity, we have to pass in the
instruments. Let us consider DISCBD (distance to the CBD) is a good one:
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
Now we are good to run the model. It is an easy one line task.
>>> reg = TSLS(y, X, yd, q=q)
In order to perform the pseudo R^2, we pass the regression object to the
function and we are done!
>>> result = pr2_aspatial(reg)
>>> print("%1.6f"%result)
0.279361
"""
y = tslsreg.y
predy = tslsreg.predy
pr = pearsonr(y, predy)[0]
pr2_result = float(pr ** 2)
return pr2_result
def pr2_spatial(tslsreg):
"""
Calculates the pseudo r^2 for the spatial two stage least squares
regression.
Parameters
----------
stslsreg : spatial two stage least squares regression object
output instance from a spatial two stage least
squares regression model
Returns
-------
pr2_result : float
value of the squared pearson correlation between
the y and stsls-predicted y vectors
Examples
--------
We first need to import the needed modules. Numpy is needed to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis. The GM_Lag is required to run the model on
which we will perform the tests and the ``pysal.spreg.diagnostics`` module
contains the function with the test.
>>> import numpy as np
>>> import pysal
>>> import pysal.spreg.diagnostics as D
>>> from twosls_sp import GM_Lag
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
Extract the HOVAL column (home value) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this model adds a vector of ones to the
independent variables passed in, but this can be overridden by passing
constant=False.
>>> X = np.array(db.by_col("INC"))
>>> X = np.reshape(X, (49,1))
In this case, we consider CRIME (crime rates) as an endogenous regressor,
so we acknowledge that by reading it in a different category.
>>> yd = np.array(db.by_col("CRIME"))
>>> yd = np.reshape(yd, (49,1))
In order to properly account for the endogeneity, we have to pass in the
instruments. Let us consider DISCBD (distance to the CBD) is a good one:
>>> q = np.array(db.by_col("DISCBD"))
>>> q = np.reshape(q, (49,1))
Since this test has a spatial component, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
Now we are good to run the spatial lag model. Make sure you pass all the
parameters correctly and, if desired, pass the names of the variables as
well so when you print the summary (reg.summary) they are included:
>>> reg = GM_Lag(y, X, w=w, yend=yd, q=q, w_lags=2, name_x=['inc'], name_y='hoval', name_yend=['crime'], name_q=['discbd'], name_ds='columbus')
Once we have a regression object, we can perform the spatial version of
the pesudo R^2. It is as simple as one line!
>>> result = pr2_spatial(reg)
>>> print("%1.6f"%result)
0.299649
"""
y = tslsreg.y
predy_e = tslsreg.predy_e
pr = pearsonr(y, predy_e)[0]
pr2_result = float(pr ** 2)
return pr2_result
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
|
thinkopensolutions/geraldo
|
refs/heads/master
|
site/newsite/django_1_0/django/contrib/auth/management/__init__.py
|
12
|
"""
Creates permissions for all installed apps that need permissions.
"""
from django.dispatch import dispatcher
from django.db.models import get_models, signals
from django.contrib.auth import models as auth_app
def _get_permission_codename(action, opts):
return u'%s_%s' % (action, opts.object_name.lower())
def _get_all_permissions(opts):
"Returns (codename, name) for all permissions in the given opts."
perms = []
for action in ('add', 'change', 'delete'):
perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw)))
return perms + list(opts.permissions)
def create_permissions(app, created_models, verbosity):
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
app_models = get_models(app)
if not app_models:
return
for klass in app_models:
ctype = ContentType.objects.get_for_model(klass)
for codename, name in _get_all_permissions(klass._meta):
p, created = Permission.objects.get_or_create(codename=codename, content_type__pk=ctype.id,
defaults={'name': name, 'content_type': ctype})
if created and verbosity >= 2:
print "Adding permission '%s'" % p
def create_superuser(app, created_models, verbosity, **kwargs):
from django.contrib.auth.models import User
from django.core.management import call_command
if User in created_models and kwargs.get('interactive', True):
msg = "\nYou just installed Django's auth system, which means you don't have " \
"any superusers defined.\nWould you like to create one now? (yes/no): "
confirm = raw_input(msg)
while 1:
if confirm not in ('yes', 'no'):
confirm = raw_input('Please enter either "yes" or "no": ')
continue
if confirm == 'yes':
call_command("createsuperuser", interactive=True)
break
if 'create_permissions' not in [i.__name__ for i in dispatcher.getAllReceivers(signal=signals.post_syncdb)]:
dispatcher.connect(create_permissions, signal=signals.post_syncdb)
if 'create_superuser' not in [i.__name__ for i in dispatcher.getAllReceivers(signal=signals.post_syncdb, sender=auth_app)]:
dispatcher.connect(create_superuser, sender=auth_app, signal=signals.post_syncdb)
|
arnaud-morvan/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/qgis/PolygonsToLines.py
|
2
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
PolygonsToLines.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsApplication,
QgsGeometry,
QgsGeometryCollection,
QgsMultiLineString,
QgsMultiCurve,
QgsWkbTypes,
QgsProcessing)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class PolygonsToLines(QgisFeatureBasedAlgorithm):
def icon(self):
return QgsApplication.getThemeIcon("/algorithms/mAlgorithmPolygonToLine.svg")
def svgIconPath(self):
return QgsApplication.iconPath("/algorithms/mAlgorithmPolygonToLine.svg")
def tags(self):
return self.tr('line,polygon,convert').split(',')
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
def name(self):
return 'polygonstolines'
def displayName(self):
return self.tr('Polygons to lines')
def outputName(self):
return self.tr('Lines')
def outputType(self):
return QgsProcessing.TypeVectorLine
def inputLayerTypes(self):
return [QgsProcessing.TypeVectorPolygon]
def outputWkbType(self, input_wkb_type):
return self.convertWkbToLines(input_wkb_type)
def processFeature(self, feature, context, feedback):
if feature.hasGeometry():
feature.setGeometry(QgsGeometry(self.convertToLines(feature.geometry())))
return [feature]
def supportInPlaceEdit(self, layer):
return False
def convertWkbToLines(self, wkb):
multi_wkb = QgsWkbTypes.NoGeometry
if QgsWkbTypes.singleType(QgsWkbTypes.flatType(wkb)) == QgsWkbTypes.Polygon:
multi_wkb = QgsWkbTypes.MultiLineString
elif QgsWkbTypes.singleType(QgsWkbTypes.flatType(wkb)) == QgsWkbTypes.CurvePolygon:
multi_wkb = QgsWkbTypes.MultiCurve
if QgsWkbTypes.hasM(wkb):
multi_wkb = QgsWkbTypes.addM(multi_wkb)
if QgsWkbTypes.hasZ(wkb):
multi_wkb = QgsWkbTypes.addZ(multi_wkb)
return multi_wkb
def convertToLines(self, geometry):
rings = self.getRings(geometry.constGet())
output_wkb = self.convertWkbToLines(geometry.wkbType())
out_geom = None
if QgsWkbTypes.flatType(output_wkb) == QgsWkbTypes.MultiLineString:
out_geom = QgsMultiLineString()
else:
out_geom = QgsMultiCurve()
for ring in rings:
out_geom.addGeometry(ring)
return out_geom
def getRings(self, geometry):
rings = []
if isinstance(geometry, QgsGeometryCollection):
# collection
for i in range(geometry.numGeometries()):
rings.extend(self.getRings(geometry.geometryN(i)))
else:
# not collection
rings.append(geometry.exteriorRing().clone())
for i in range(geometry.numInteriorRings()):
rings.append(geometry.interiorRing(i).clone())
return rings
|
suneeth51/neutron
|
refs/heads/master
|
neutron/tests/unit/db/quota/test_api.py
|
4
|
# Copyright (c) 2015 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron import context
from neutron.db.quota import api as quota_api
from neutron.tests.unit import testlib_api
class TestQuotaDbApi(testlib_api.SqlTestCaseLight):
def _set_context(self):
self.tenant_id = 'Higuain'
self.context = context.Context('Gonzalo', self.tenant_id,
is_admin=False, is_advsvc=False)
def _create_quota_usage(self, resource, used, reserved, tenant_id=None):
tenant_id = tenant_id or self.tenant_id
return quota_api.set_quota_usage(
self.context, resource, tenant_id,
in_use=used, reserved=reserved)
def _verify_quota_usage(self, usage_info,
expected_resource=None,
expected_used=None,
expected_reserved=None,
expected_dirty=None):
self.assertEqual(self.tenant_id, usage_info.tenant_id)
if expected_resource:
self.assertEqual(expected_resource, usage_info.resource)
if expected_dirty is not None:
self.assertEqual(expected_dirty, usage_info.dirty)
if expected_used is not None:
self.assertEqual(expected_used, usage_info.used)
if expected_reserved is not None:
self.assertEqual(expected_reserved, usage_info.reserved)
if expected_used is not None and expected_reserved is not None:
self.assertEqual(expected_used + expected_reserved,
usage_info.total)
def setUp(self):
super(TestQuotaDbApi, self).setUp()
self._set_context()
def test_create_quota_usage(self):
usage_info = self._create_quota_usage('goals', 26, 10)
self._verify_quota_usage(usage_info,
expected_resource='goals',
expected_used=26,
expected_reserved=10)
def test_update_quota_usage(self):
self._create_quota_usage('goals', 26, 10)
# Higuain scores a double
usage_info_1 = quota_api.set_quota_usage(
self.context, 'goals', self.tenant_id,
in_use=28)
self._verify_quota_usage(usage_info_1,
expected_used=28,
expected_reserved=10)
usage_info_2 = quota_api.set_quota_usage(
self.context, 'goals', self.tenant_id,
reserved=8)
self._verify_quota_usage(usage_info_2,
expected_used=28,
expected_reserved=8)
def test_update_quota_usage_with_deltas(self):
self._create_quota_usage('goals', 26, 10)
# Higuain scores a double
usage_info_1 = quota_api.set_quota_usage(
self.context, 'goals', self.tenant_id,
in_use=2, delta=True)
self._verify_quota_usage(usage_info_1,
expected_used=28,
expected_reserved=10)
usage_info_2 = quota_api.set_quota_usage(
self.context, 'goals', self.tenant_id,
reserved=-2, delta=True)
self._verify_quota_usage(usage_info_2,
expected_used=28,
expected_reserved=8)
def test_set_quota_usage_dirty(self):
self._create_quota_usage('goals', 26, 10)
# Higuain needs a shower after the match
self.assertEqual(1, quota_api.set_quota_usage_dirty(
self.context, 'goals', self.tenant_id))
usage_info = quota_api.get_quota_usage_by_resource_and_tenant(
self.context, 'goals', self.tenant_id)
self._verify_quota_usage(usage_info,
expected_dirty=True)
# Higuain is clean now
self.assertEqual(1, quota_api.set_quota_usage_dirty(
self.context, 'goals', self.tenant_id, dirty=False))
usage_info = quota_api.get_quota_usage_by_resource_and_tenant(
self.context, 'goals', self.tenant_id)
self._verify_quota_usage(usage_info,
expected_dirty=False)
def test_set_dirty_non_existing_quota_usage(self):
self.assertEqual(0, quota_api.set_quota_usage_dirty(
self.context, 'meh', self.tenant_id))
def test_set_resources_quota_usage_dirty(self):
self._create_quota_usage('goals', 26, 10)
self._create_quota_usage('assists', 11, 5)
self._create_quota_usage('bookings', 3, 1)
self.assertEqual(2, quota_api.set_resources_quota_usage_dirty(
self.context, ['goals', 'bookings'], self.tenant_id))
usage_info_goals = quota_api.get_quota_usage_by_resource_and_tenant(
self.context, 'goals', self.tenant_id)
usage_info_assists = quota_api.get_quota_usage_by_resource_and_tenant(
self.context, 'assists', self.tenant_id)
usage_info_bookings = quota_api.get_quota_usage_by_resource_and_tenant(
self.context, 'bookings', self.tenant_id)
self._verify_quota_usage(usage_info_goals, expected_dirty=True)
self._verify_quota_usage(usage_info_assists, expected_dirty=False)
self._verify_quota_usage(usage_info_bookings, expected_dirty=True)
def test_set_resources_quota_usage_dirty_with_empty_list(self):
self._create_quota_usage('goals', 26, 10)
self._create_quota_usage('assists', 11, 5)
self._create_quota_usage('bookings', 3, 1)
# Expect all the resources for the tenant to be set dirty
self.assertEqual(3, quota_api.set_resources_quota_usage_dirty(
self.context, [], self.tenant_id))
usage_info_goals = quota_api.get_quota_usage_by_resource_and_tenant(
self.context, 'goals', self.tenant_id)
usage_info_assists = quota_api.get_quota_usage_by_resource_and_tenant(
self.context, 'assists', self.tenant_id)
usage_info_bookings = quota_api.get_quota_usage_by_resource_and_tenant(
self.context, 'bookings', self.tenant_id)
self._verify_quota_usage(usage_info_goals, expected_dirty=True)
self._verify_quota_usage(usage_info_assists, expected_dirty=True)
self._verify_quota_usage(usage_info_bookings, expected_dirty=True)
# Higuain is clean now
self.assertEqual(1, quota_api.set_quota_usage_dirty(
self.context, 'goals', self.tenant_id, dirty=False))
usage_info = quota_api.get_quota_usage_by_resource_and_tenant(
self.context, 'goals', self.tenant_id)
self._verify_quota_usage(usage_info,
expected_dirty=False)
def _test_set_all_quota_usage_dirty(self, expected):
self._create_quota_usage('goals', 26, 10)
self._create_quota_usage('goals', 12, 6, tenant_id='Callejon')
self.assertEqual(expected, quota_api.set_all_quota_usage_dirty(
self.context, 'goals'))
def test_set_all_quota_usage_dirty(self):
# All goal scorers need a shower after the match, but since this is not
# admin context we can clean only one
self._test_set_all_quota_usage_dirty(expected=1)
def test_get_quota_usage_by_tenant(self):
self._create_quota_usage('goals', 26, 10)
self._create_quota_usage('assists', 11, 5)
# Create a resource for a different tenant
self._create_quota_usage('mehs', 99, 99, tenant_id='buffon')
usage_infos = quota_api.get_quota_usage_by_tenant_id(
self.context, self.tenant_id)
self.assertEqual(2, len(usage_infos))
resources = [info.resource for info in usage_infos]
self.assertIn('goals', resources)
self.assertIn('assists', resources)
def test_get_quota_usage_by_resource(self):
self._create_quota_usage('goals', 26, 10)
self._create_quota_usage('assists', 11, 5)
self._create_quota_usage('goals', 12, 6, tenant_id='Callejon')
usage_infos = quota_api.get_quota_usage_by_resource(
self.context, 'goals')
# Only 1 result expected in tenant context
self.assertEqual(1, len(usage_infos))
self._verify_quota_usage(usage_infos[0],
expected_resource='goals',
expected_used=26,
expected_reserved=10)
def test_get_quota_usage_by_tenant_and_resource(self):
self._create_quota_usage('goals', 26, 10)
usage_info = quota_api.get_quota_usage_by_resource_and_tenant(
self.context, 'goals', self.tenant_id)
self._verify_quota_usage(usage_info,
expected_resource='goals',
expected_used=26,
expected_reserved=10)
def test_get_non_existing_quota_usage_returns_none(self):
self.assertIsNone(quota_api.get_quota_usage_by_resource_and_tenant(
self.context, 'goals', self.tenant_id))
class TestQuotaDbApiAdminContext(TestQuotaDbApi):
def _set_context(self):
self.tenant_id = 'Higuain'
self.context = context.Context('Gonzalo', self.tenant_id,
is_admin=True, is_advsvc=True,
load_admin_roles=False)
def test_get_quota_usage_by_resource(self):
self._create_quota_usage('goals', 26, 10)
self._create_quota_usage('assists', 11, 5)
self._create_quota_usage('goals', 12, 6, tenant_id='Callejon')
usage_infos = quota_api.get_quota_usage_by_resource(
self.context, 'goals')
# 2 results expected in admin context
self.assertEqual(2, len(usage_infos))
for usage_info in usage_infos:
self.assertEqual('goals', usage_info.resource)
def test_set_all_quota_usage_dirty(self):
# All goal scorers need a shower after the match, and with admin
# context we should be able to clean all of them
self._test_set_all_quota_usage_dirty(expected=2)
|
cocagne/zpax
|
refs/heads/master
|
zpax/network/zmq_node.py
|
2
|
'''
This module provides a NetworkNode implementation on top of ZeroMQ sockets.
'''
from twisted.internet import defer, task, reactor
from zpax.network import zed
from zpax.network.channel import Channel
class SimpleEncoder(object):
'''
An in-process "encoder" that is primarily useful for unit testing.
'''
def encode(self, node_uid, message_type, parts):
return ['{0}\0{1}'.format(node_uid, message_type)] + list(parts)
def decode(self, parts):
from_uid, message_type = parts[0].split('\0')
return from_uid, message_type, parts[1:]
class NetworkNode (object):
'''
Messages are handled by adding instances to the message_handlers list. The
first instance that contains a method named 'receive_<message_type>'
will have that method called. The first argument is always the message
sender's node_uid. The remaining positional arguments are filled with the
parts of the ZeroMQ message.
'''
def __init__(self, node_uid, encoder=SimpleEncoder()):
self.node_uid = node_uid
self.zpax_nodes = None # Dictionary of node_uid -> (rtr_addr, pub_addr)
self.pax_rtr = None
self.pax_pub = None
self.pax_sub = None
self.encoder = encoder
self.message_handlers = dict() # Dictionary of channel_name => list( message_handlers )
def add_message_handler(self, channel_name, handler):
if not channel_name in self.message_handlers:
self.message_handlers[ channel_name ] = list()
self.message_handlers[channel_name].append( handler )
def connect(self, zpax_nodes):
'''
zpax_nodes - Dictionary of node_uid => (zmq_rtr_addr, zmq_pub_addr)
'''
if not self.node_uid in zpax_nodes:
raise Exception('Missing local node configuration')
self.zpax_nodes = zpax_nodes
if self.pax_rtr:
self.pax_rtr.close()
self.pax_pub.close()
self.pax_sub.close()
self.pax_rtr = zed.ZmqRouterSocket()
self.pax_pub = zed.ZmqPubSocket()
self.pax_sub = zed.ZmqSubSocket()
self.pax_rtr.identity = self.node_uid
self.pax_rtr.linger = 0
self.pax_pub.linger = 0
self.pax_sub.linger = 0
self.pax_rtr.bind(zpax_nodes[self.node_uid][0])
self.pax_pub.bind(zpax_nodes[self.node_uid][1])
self.pax_rtr.messageReceived = self._on_rtr_received
self.pax_sub.messageReceived = self._on_sub_received
self.pax_sub.subscribe = 'zpax'
for node_uid, tpl in zpax_nodes.iteritems():
self.pax_sub.connect(tpl[1])
if self.node_uid < node_uid:
# We only need 1 connection between any two router nodes so
# we'll make it the responsibility of the lower UID node to
# initiate the connection
self.pax_rtr.connect(tpl[0])
def shutdown(self):
self.pax_rtr.close()
self.pax_pub.close()
self.pax_sub.close()
self.pax_rtr = None
self.pax_pub = None
self.pax_sub = None
def broadcast_message(self, channel_name, message_type, *parts):
if len(parts) == 1 and isinstance(parts[0], (list, tuple)):
parts = parts[0]
l = ['zpax', channel_name]
l.extend( self.encoder.encode(self.node_uid, message_type, parts) )
self.pax_pub.send( l )
def unicast_message(self, to_uid, channel_name, message_type, *parts):
if to_uid == self.node_uid:
self.dispatch_message( self.node_uid, channel_name, message_type, parts )
return
if len(parts) == 1 and isinstance(parts[0], (list, tuple)):
parts = parts[0]
l = [str(to_uid), channel_name]
l.extend( self.encoder.encode(self.node_uid, message_type, parts) )
self.pax_rtr.send( l )
def _dispatch_message(self, from_uid, channel_name, message_type, parts):
handlers = self.message_handlers.get(channel_name, None)
if handlers:
for h in handlers:
f = getattr(h, 'receive_' + message_type, None)
if f:
f(from_uid, *parts)
break
def _on_rtr_received(self, raw_parts):
# discard source address. We'll use the one embedded in the message
# for consistency
channel_name = raw_parts[1]
from_uid, message_type, parts = self.encoder.decode( raw_parts[2:] )
self._dispatch_message( from_uid, channel_name, message_type, parts )
def _on_sub_received(self, raw_parts):
# discard the message header. Can address targeted subscriptions
# later
channel_name = raw_parts[1]
from_uid, message_type, parts = self.encoder.decode( raw_parts[2:] )
self._dispatch_message( from_uid, channel_name, message_type, parts )
|
kenshay/ImageScript
|
refs/heads/master
|
ProgramData/SystemFiles/Python/Lib/site-packages/pylint/test/functional/membership_protocol_py3.py
|
12
|
# pylint: disable=missing-docstring,too-few-public-methods,no-init,no-self-use,unused-argument,pointless-statement,expression-not-assigned
# metaclasses that support membership test protocol
class MetaIterable(type):
def __iter__(cls):
return iter((1, 2, 3))
class MetaOldIterable(type):
def __getitem__(cls, key):
if key < 10:
return key ** 2
else:
raise IndexError("bad index")
class MetaContainer(type):
def __contains__(cls, key):
return False
class IterableClass(metaclass=MetaOldIterable):
pass
class OldIterableClass(metaclass=MetaOldIterable):
pass
class ContainerClass(metaclass=MetaContainer):
pass
def test():
1 in IterableClass
1 in OldIterableClass
1 in ContainerClass
1 in IterableClass() # [unsupported-membership-test]
1 in OldIterableClass() # [unsupported-membership-test]
1 in ContainerClass() # [unsupported-membership-test]
|
brandonium21/snowflake
|
refs/heads/master
|
snowflakeEnv/lib/python2.7/site-packages/gunicorn/workers/__init__.py
|
15
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import sys
# supported gunicorn workers.
SUPPORTED_WORKERS={
"sync": "gunicorn.workers.sync.SyncWorker",
"eventlet": "gunicorn.workers.geventlet.EventletWorker",
"gevent": "gunicorn.workers.ggevent.GeventWorker",
"gevent_wsgi": "gunicorn.workers.ggevent.GeventPyWSGIWorker",
"gevent_pywsgi": "gunicorn.workers.ggevent.GeventPyWSGIWorker",
"tornado": "gunicorn.workers.gtornado.TornadoWorker"}
if sys.version_info >= (3, 3):
# gaiohttp worker can be used with Python 3.3+ only.
SUPPORTED_WORKERS["gaiohttp"] = "gunicorn.workers.gaiohttp.AiohttpWorker"
|
quake0day/oj
|
refs/heads/master
|
bitSwapRequired.py
|
1
|
class Solution:
"""
@param a, b: Two integer
return: An integer
"""
# def bitSwapRequired(self, a, b):
# addition = 0
# if ((a < 0 and b > 0) or (a > 0 and b < 0)):
# return 31
# # write your code here
# bin_a = bin(a).split("b")[1][::-1]
# bin_b = bin(b).split("b")[1][::-1]
# len_a = len(bin_a)
# len_b = len(bin_b)
# diff_len = abs(len_a - len_b)
# min_len = min(len_a, len_b)
# result = 0
# for i in xrange(min_len):
# if bin_a[i] != bin_b[i]:
# result += 1
# for j in xrange(min_len,diff_len+min_len):
# try:
# if bin_a[j] == '1':
# result += 1
# except:
# pass
# try:
# if bin_b[j] == '1':
# result += 1
# except:
# pass
# return result
# while xor != 0:
# if xor & 1 == 1:
# result += 1
# xor >>= 1
# return result
def bitSwapRequired(self, a, b):
# write your code here
xor = a ^ b
result = 0
index = 0
while index < 32:
if ((1 << index) & a) != ((1 << index) & b):
result += 1
index += 1
return result
a = Solution()
print a.bitSwapRequired(14, 31)
print a.bitSwapRequired(67, 31)
print a.bitSwapRequired(1, -1)
print a.bitSwapRequired(-2147483648, 2147483647)
|
Khan/pyobjc-framework-Cocoa
|
refs/heads/master
|
Examples/AppKit/CocoaBindings/ToDos/Category.py
|
3
|
#
# Category.py
# ToDos
#
# Converted by u.fiedler on 09.02.05.
#
# The original version was written in Objective-C by Malcolm Crawford
# at http://homepage.mac.com/mmalc/CocoaExamples/controllers.html
from Foundation import *
import objc
class Category(NSObject):
title = objc.ivar('title')
priority = objc.ivar('priority', 'i')
@classmethod
def allCategories(cls):
"""Predefined global list of categories"""
return categories
@classmethod
def categoryForPriority_(cls, thePriority):
for category in categories:
if thePriority >= category.priority:
return category
return None
@classmethod
def categoryWithTitle_andPriority_(cls, aTitle, aValue):
"""Convenience constructor"""
newCategory = Category.alloc().init()
newCategory.title = aTitle
newCategory.priority = aValue
return newCategory
# NSCoding methods
# To encode, simply save 'priority'; on decode, replace self with
# the existing instance from 'allCategories' with the same priority
def encodeWithCoder_(self, encoder):
if encoder.allowsKeyedCoding():
encoder.encodeInt_forKey_(self.priority, u"priority")
else:
encoder.encodeObject_(self.priority)
def initWithCoder_(self, decoder):
if decoder.allowsKeyedCoding():
thePriority = decoder.decodeIntForKey_(u"priority")
else:
thePriority = decoder.decodeObject()
return Category.categoryForPriority_(thePriority)
categories = [
Category.categoryWithTitle_andPriority_(u"Vital", 11),
Category.categoryWithTitle_andPriority_(u"Very Important", 4),
Category.categoryWithTitle_andPriority_(u"Important", 3),
Category.categoryWithTitle_andPriority_(u"Not Important", 2),
Category.categoryWithTitle_andPriority_(u"Whenever", 0)
]
|
xia0pin9/capstone
|
refs/heads/next
|
bindings/python/test_detail.py
|
2
|
#!/usr/bin/env python
# Capstone Python bindings, by Nguyen Anh Quynnh <[email protected]>
from __future__ import print_function
from capstone import *
X86_CODE16 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00"
X86_CODE32 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00"
X86_CODE64 = b"\x55\x48\x8b\x05\xb8\x13\x00\x00"
ARM_CODE = b"\xED\xFF\xFF\xEB\x04\xe0\x2d\xe5\x00\x00\x00\x00\xe0\x83\x22\xe5\xf1\x02\x03\x0e\x00\x00\xa0\xe3\x02\x30\xc1\xe7\x00\x00\x53\xe3"
ARM_CODE2 = b"\x10\xf1\x10\xe7\x11\xf2\x31\xe7\xdc\xa1\x2e\xf3\xe8\x4e\x62\xf3"
THUMB_CODE = b"\x70\x47\xeb\x46\x83\xb0\xc9\x68"
THUMB_CODE2 = b"\x4f\xf0\x00\x01\xbd\xe8\x00\x88"
THUMB_MCLASS = b"\xef\xf3\x02\x80"
ARMV8 = b"\xe0\x3b\xb2\xee\x42\x00\x01\xe1\x51\xf0\x7f\xf5"
MIPS_CODE = b"\x0C\x10\x00\x97\x00\x00\x00\x00\x24\x02\x00\x0c\x8f\xa2\x00\x00\x34\x21\x34\x56"
MIPS_CODE2 = b"\x56\x34\x21\x34\xc2\x17\x01\x00"
MIPS_32R6M = b"\x00\x07\x00\x07\x00\x11\x93\x7c\x01\x8c\x8b\x7c\x00\xc7\x48\xd0"
MIPS_32R6 = b"\xec\x80\x00\x19\x7c\x43\x22\xa0"
ARM64_CODE = b"\x09\x00\x38\xd5\xbf\x40\x00\xd5\x0c\x05\x13\xd5\x20\x50\x02\x0e\x20\xe4\x3d\x0f\x00\x18\xa0\x5f\xa2\x00\xae\x9e\x9f\x37\x03\xd5\xbf\x33\x03\xd5\xdf\x3f\x03\xd5\x21\x7c\x02\x9b\x21\x7c\x00\x53\x00\x40\x21\x4b\xe1\x0b\x40\xb9\x20\x04\x81\xda\x20\x08\x02\x8b\x10\x5b\xe8\x3c"
PPC_CODE = b"\x80\x20\x00\x00\x80\x3f\x00\x00\x10\x43\x23\x0e\xd0\x44\x00\x80\x4c\x43\x22\x02\x2d\x03\x00\x80\x7c\x43\x20\x14\x7c\x43\x20\x93\x4f\x20\x00\x21\x4c\xc8\x00\x21"
PPC_CODE2 = b"\x10\x60\x2a\x10\x10\x64\x28\x88\x7c\x4a\x5d\x0f"
SPARC_CODE = b"\x80\xa0\x40\x02\x85\xc2\x60\x08\x85\xe8\x20\x01\x81\xe8\x00\x00\x90\x10\x20\x01\xd5\xf6\x10\x16\x21\x00\x00\x0a\x86\x00\x40\x02\x01\x00\x00\x00\x12\xbf\xff\xff\x10\xbf\xff\xff\xa0\x02\x00\x09\x0d\xbf\xff\xff\xd4\x20\x60\x00\xd4\x4e\x00\x16\x2a\xc2\x80\x03"
SPARCV9_CODE = b"\x81\xa8\x0a\x24\x89\xa0\x10\x20\x89\xa0\x1a\x60\x89\xa0\x00\xe0"
SYSZ_CODE = b"\xed\x00\x00\x00\x00\x1a\x5a\x0f\x1f\xff\xc2\x09\x80\x00\x00\x00\x07\xf7\xeb\x2a\xff\xff\x7f\x57\xe3\x01\xff\xff\x7f\x57\xeb\x00\xf0\x00\x00\x24\xb2\x4f\x00\x78"
XCORE_CODE = b"\xfe\x0f\xfe\x17\x13\x17\xc6\xfe\xec\x17\x97\xf8\xec\x4f\x1f\xfd\xec\x37\x07\xf2\x45\x5b\xf9\xfa\x02\x06\x1b\x10"
M68K_CODE = b"\xd4\x40\x87\x5a\x4e\x71\x02\xb4\xc0\xde\xc0\xde\x5c\x00\x1d\x80\x71\x12\x01\x23\xf2\x3c\x44\x22\x40\x49\x0e\x56\x54\xc5\xf2\x3c\x44\x00\x44\x7a\x00\x00\xf2\x00\x0a\x28\x4E\xB9\x00\x00\x00\x12\x4E\x75"
all_tests = (
(CS_ARCH_X86, CS_MODE_16, X86_CODE16, "X86 16bit (Intel syntax)", None),
(CS_ARCH_X86, CS_MODE_32, X86_CODE32, "X86 32bit (ATT syntax)", CS_OPT_SYNTAX_ATT),
(CS_ARCH_X86, CS_MODE_32, X86_CODE32, "X86 32 (Intel syntax)", None),
(CS_ARCH_X86, CS_MODE_64, X86_CODE64, "X86 64 (Intel syntax)", None),
(CS_ARCH_ARM, CS_MODE_ARM, ARM_CODE, "ARM", None),
(CS_ARCH_ARM, CS_MODE_ARM, ARM_CODE2, "ARM: Cortex-A15 + NEON", None),
(CS_ARCH_ARM, CS_MODE_THUMB, THUMB_CODE, "THUMB", None),
(CS_ARCH_ARM, CS_MODE_THUMB, THUMB_CODE2, "THUMB-2", None),
(CS_ARCH_ARM, CS_MODE_THUMB + CS_MODE_MCLASS, THUMB_MCLASS, "Thumb-MClass", None),
(CS_ARCH_ARM, CS_MODE_ARM + CS_MODE_V8, ARMV8, "Arm-V8", None),
(CS_ARCH_ARM64, CS_MODE_ARM, ARM64_CODE, "ARM-64", None),
(CS_ARCH_MIPS, CS_MODE_MIPS32 + CS_MODE_BIG_ENDIAN, MIPS_CODE, "MIPS-32 (Big-endian)", None),
(CS_ARCH_MIPS, CS_MODE_MIPS64 + CS_MODE_LITTLE_ENDIAN, MIPS_CODE2, "MIPS-64-EL (Little-endian)", None),
(CS_ARCH_MIPS, CS_MODE_MIPS32R6 + CS_MODE_MICRO + CS_MODE_BIG_ENDIAN, MIPS_32R6M, "MIPS-32R6 | Micro (Big-endian)", None),
(CS_ARCH_MIPS, CS_MODE_MIPS32R6 + CS_MODE_BIG_ENDIAN, MIPS_32R6, "MIPS-32R6 (Big-endian)", None),
(CS_ARCH_PPC, CS_MODE_BIG_ENDIAN, PPC_CODE, "PPC-64", None),
(CS_ARCH_PPC, CS_MODE_BIG_ENDIAN + CS_MODE_QPX, PPC_CODE2, "PPC-64 + QPX", None),
(CS_ARCH_SPARC, CS_MODE_BIG_ENDIAN, SPARC_CODE, "Sparc", None),
(CS_ARCH_SPARC, CS_MODE_BIG_ENDIAN + CS_MODE_V9, SPARCV9_CODE, "SparcV9", None),
(CS_ARCH_SYSZ, 0, SYSZ_CODE, "SystemZ", None),
(CS_ARCH_XCORE, 0, XCORE_CODE, "XCore", None),
(CS_ARCH_M68K, CS_MODE_BIG_ENDIAN | CS_MODE_M68K_040, M68K_CODE, "M68K (68040)", None),
)
def print_detail(insn):
print("0x%x:\t%s\t%s // insn-ID: %u, insn-mnem: %s" \
% (insn.address, insn.mnemonic, insn.op_str, insn.id, \
insn.insn_name()))
# "data" instruction generated by SKIPDATA option has no detail
if insn.id == 0:
return
if len(insn.regs_read) > 0:
print("\tImplicit registers read: ", end=''),
for m in insn.regs_read:
print("%s " % insn.reg_name(m), end=''),
print()
if len(insn.regs_write) > 0:
print("\tImplicit registers modified: ", end=''),
for m in insn.regs_write:
print("%s " % insn.reg_name(m), end=''),
print()
if len(insn.groups) > 0:
print("\tThis instruction belongs to groups: ", end=''),
for m in insn.groups:
print("%s " % insn.group_name(m), end=''),
print()
# ## Test class Cs
def test_class():
for (arch, mode, code, comment, syntax) in all_tests:
print('*' * 40)
print("Platform: %s" % comment)
print("Disasm:")
try:
md = Cs(arch, mode)
md.detail = True
if syntax is not None:
md.syntax = syntax
for insn in md.disasm(code, 0x1000):
print_detail(insn)
print()
except CsError as e:
print("ERROR: %s" % e)
if __name__ == '__main__':
test_class()
|
kanarelo/dairy
|
refs/heads/master
|
dairy/core/views.py
|
1
|
import json
import random
from tumasms import Tumasms
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.http import JsonResponse, HttpResponse
from django.template.response import TemplateResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.gis import measure
from .models import *
@login_required
def index(request):
return TemplateResponse(request, "index.html", {
})
def starter(request):
return TemplateResponse(request, "starter.html", {
})
def mobile_phone(request):
return TemplateResponse(request, "mobile_phone.html", {
})
def get_session_id(limit=10):
chosen_chars = ""
possible_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
#Loop while letter count is less than limit
while len(chosen_chars) <= limit:
#get a random char from possible chars and concatinate it to the chosen chars
chosen_chars = (chosen_chars + random.choice(possible_chars))
if chosen_chars == '0':
#Shouldn't start with a zero, if it does,
#recursively call the get_session_id again to start all over
return get_session_id(limit=limit)
return chosen_chars
def get_ussd_response(
input_value=None,
session_id=None,
phone_number=None
):
response = "Sorry, your request could not be processed. Try again."
if session_id is None:
session_id = get_session_id()
#Validate to see that the main variables have values
if phone_number is None:
return (response, None)
#Get the USSDRequest object or create it if does not exist, set the ussd_object_created as False
(ussd_request_obj, ussd_object_created) = USSDRequest.objects.get_or_create(
session_id=session_id
)
# If the ussd request has been closed before, return nothing
# This is to avoid updating a closed request
if (ussd_request_obj.request_closed):
return (response, None)
#get meu_step and last_menu_step from the ussd_request
menu_step = last_menu_step = ussd_request_obj.last_step
#if ussd_request_object has not been
if not ussd_object_created:
menu_step = (last_menu_step + 1)
elif ussd_object_created:
#if the last menu_step is between 2, 3 and 4
if last_menu_step in (
USSDRequest.MENU_SERVICE,
USSDRequest.MENU_PRODUCT_SERVICE_CATEGORY,
USSDRequest.MENU_PRODUCT,
):
if int(input_value) == 99:
menu_step -= 1
elif int(input_value) == 100:
menu_step = USSDRequest.MENU_USER_TYPE
try:
#Using the phone number provided by the Mobile Operator / Simulator, get the user
request_user = User.objects.get(mobile_phone_number=phone_number)
ussd_request_obj.user = request_user
except User.DoesNotExist:
#If no user exists, set as None, we will check to decide whether to register
request_user = None
if (request_user is None) and (menu_step > USSDRequest.MENU_FIRST):
return (response, ussd_request_obj)
# STEP 0:
# We provide a welcome message for the user
elif menu_step == USSDRequest.MENU_FIRST:
if request_user is not None: # We know the user, so, we prompt for authentication
response = (("Welcome %s, Please enter your PIN number to get started") % request_user.name)
else:
#We dont know the user, so, lets ask them to call us
response = (
"You do not have an account with us. Contact 0724158671 to register"
)
ussd_request_obj.request_closed = True
#STEP 1.
# The User has provide us with the PIN Number
elif menu_step == USSDRequest.MENU_USER_TYPE:
pin_number = input_value
#Lets check if the PIN number matches the User's PIN
if request_user.pin_number == pin_number:
response = (
"You are a? \n"
"1. Farmer\n"
"2. Supplier\n"
)
else:
#The PINs dont match, possibly not the owner
response = "The PIN you have provided is not valid. Try again."
return (response, ussd_request_obj)
#STEP 2:
# We already know the user type, lets check the services
elif menu_step == USSDRequest.MENU_SERVICE:
user_type = int(input_value)
if user_type in (USSDRequest.FARMER_USER, USSDRequest.SUPPLIER_USER): #farmer
response = "You are looking for? \n" if (user_type == 1) else "What do you provide? \n"
response += (
"1. Product \n"
"2. Service \n"
"0. Back \n"
"00. Menu \n"
)
#Set the user type
ussd_request_obj.user_type = int(user_type)
elif menu_step == USSDRequest.MENU_PRODUCT_SERVICE_CATEGORY:
product_or_service = int(input_value)
if product_or_service in (USSDRequest.PRODUCT_ORDER, USSDRequest.SERVICE_ORDER):
if product_or_service == USSDRequest.PRODUCT_ORDER:
product_types = []
for p_type in ProductType.objects.all():
product_types.append("%s. %s" % (p_type.id, p_type.name))
response = "%s%s%s%s" % (
"Product Category \n",
("\n".join(product_types)),
"\n0. Back \n",
"00. Menu \n"
)
elif product_or_service == USSDRequest.SERVICE_ORDER:
services = []
for service in Service.objects.all():
services.append("%s. %s" % (service.id, service.name))
response = "%s%s%s%s" % (
"Service \n",
"\n".join(services),
"\n0. Back \n",
"00. Menu \n"
)
ussd_request_obj.order_type = int(input_value)
elif menu_step == USSDRequest.MENU_PRODUCT:
if ussd_request_obj.order_type == USSDRequest.PRODUCT_ORDER:
product_type_id = int(input_value)
products = []
for product in Product.objects.filter(
product_type__id=product_type_id
):
products.append("%s. %s" % (product.id, product.name))
response = "%s%s%s%s" % (
"Select Product \n",
"\n".join(products),
"\n0. Back \n",
"00. Menu \n"
)
ussd_request_obj.product_type_id = product_type_id
elif ussd_request_obj.order_type == USSDRequest.SERVICE_ORDER:
service_id = int(input_value)
response = ("Your order has been received. Wait for an SMS response")
ussd_request_obj.service_id = service_id
ussd_request_obj.request_closed = True
send_sms_to_nearby_suppliers(ussd_request_obj)
elif menu_step == USSDRequest.MENU_QUANTITY:
product_id = int(input_value)
try:
product = Product.objects.get(id=product_id)
ussd_request_obj.product = product
response = ("Enter quantity in %s" % product.get_unit_display())
except Product.DoesNotExist:
product = None
elif menu_step == USSDRequest.MENU_PRICE:
quantity = float(input_value)
ussd_request_obj.quantity = quantity
response = "Enter price per unit"
elif menu_step == USSDRequest.MENU_LAST:
price = float(input_value)
ussd_request_obj.price = price
ussd_request_obj.request_closed = True
response = ("Your order has been received. Wait for an SMS response")
# ussd_request_obj.save()
send_sms_to_nearby_suppliers(ussd_request_obj)
ussd_request_obj.last_step = menu_step
ussd_request_obj.save()
return (response, ussd_request_obj)
def ussd_request(request):
'''
This is a ussd request view, it receives a HTTP request object from Django Webserver
Then, it checks through for values passed via the GET dictionary. We assume that
in every step, we will be provided with a sessio id, a menu_step and phone_number
This view assists the user to create a USSDRequest object that will be used by
the system to create an order.
'''
response = ""
if request.method == "GET":
#Get the important values from the HTTP Request Object
input_value = request.GET.get('input_value')
session_id = request.GET.get('session_id')
phone_number = request.GET.get('phone_number')
response = get_ussd_response(
input_value=input_value,
session_id=session_id,
phone_number=phone_number
)
return HttpResponse(response)
def http_simulator(request):
session_id = None
phone_number = None
request_closed = False
if request.method == "GET":
message = "Please enter your phone number to start"
elif request.method == "POST":
session_id = request.POST.get('session_id')
phone_number = request.POST.get('phone_number')
input_value = request.POST.get('input_value')
if not USSDRequest.objects.filter(
session_id=session_id,
user__mobile_phone_number=phone_number
).exists():
phone_number = input_value
(message, ussd_request_obj) = get_ussd_response(
input_value=input_value,
session_id=session_id,
phone_number=phone_number
)
session_id = ussd_request_obj.session_id
phone_number = (
ussd_request_obj.user.mobile_phone_number
if ussd_request_obj.user else phone_number
)
request_closed = ussd_request_obj.request_closed
return TemplateResponse(request, "ussd.html", {
"session_id": session_id,
"phone_number": phone_number,
"request_closed": request_closed,
"message": message
})
@login_required
def suppliers_geojson(request):
context = {
"type": "FeatureCollection",
"crs": {
"type": "name",
"properties": {
"name": "urn:ogc:def:crs:OGC:1.3:CRS84"
}
},
"features": []
}
def to_json(supplier, id=None):
product_or_service_rendered = None
item_type = None
if service_name:
item_type = "Service"
supplier_services = supplier.supplier_services.all().first()
product_or_service_rendered = supplier_services.service.name
elif product_name or product_category:
item_type = "Product"
supplier_products = supplier.supplier_products.all().first()
product_or_service_rendered = supplier_products.product.name
else:
product_or_service_rendered = supplier.supplier_services.all().first()
if product_or_service_rendered:
item_type = "Service"
product_or_service_rendered = product_or_service_rendered.service.name
else:
item_type = "Product"
product_or_service_rendered = supplier.supplier_products.all().first()
product_or_service_rendered = product_or_service_rendered.product.name
return {
"type": "Feature",
"properties": {
"id": id,
"Name": supplier.user.name,
"ItemName": product_or_service_rendered,
"ItemType": item_type,
"Contact": supplier.user.mobile_phone_number
},
"geometry": json.loads(supplier.geom.geojson)
}
suppliers = Supplier.objects.all()
product_name = None
product_category = None
service_name = None
if request.GET.get('item_type') == "product":
product_name = request.GET.get('product_name')
product_category = request.GET.get('product_category')
quantity = int(request.GET.get('quantity', 0).strip() or 0)
price = int(request.GET.get('price', 0).strip() or 0)
suppliers = suppliers.filter(
Q(supplier_products__product__name__icontains=product_name)|
Q(supplier_products__product__product_type__name__icontains=product_category)|
Q(supplier_products__price_per_unit__lte=price)|
Q(supplier_products__quantity__gte=quantity)
)
elif request.GET.get('item_type') == "service":
service_name = request.GET.get('service_name')
suppliers = suppliers.filter(
Q(supplier_services__service__name__icontains=service_name)
)
farmer_location = request.user.farmer_profile.geom
suppliers = get_suppliers_nearby(farmer_location, supplier_queryset=suppliers, radius=100)
context['features'] = [
to_json(supplier, id=index)
for index, supplier in enumerate(suppliers)
if (
supplier.supplier_services.exists() or
supplier.supplier_products.exists()
) and supplier.geom is not None
]
return JsonResponse(context)
def send_sms(message, phone_numbers=[]):
# Setup API credentials
api_key = settings.TUMA_SMS_API_KEY
api_signature = settings.TUMA_SMS_API_SIGNATURE
# Make API request
tumasms = Tumasms(api_key, api_signature) # Instantiate API library
for phone_number in phone_numbers:
tumasms.queue_sms(phone_number, message, "Sender_ID") # Replace example with valid recipient, message and sender id
tumasms.send_sms() # Initiate API call to send messages
# Get API response
print tumasms.status # View status either (SUCCESS or FAIL)
print tumasms.message # Returns SMS available (Credits balance)
print tumasms.description # Returns a status message
print tumasms.response_xml # Returns full xml response
print tumasms.response_json # Returns full json response
def send_sms_to_nearby_suppliers(ussd_request_obj):
suppliers = Supplier.objects.all()
message = ""
if ussd_request_obj.order_type == USSDRequest.PRODUCT_ORDER:
message = "%s Delivered By:" % ussd_request_obj.product.name
suppliers = suppliers.filter(
Q(supplier_products__product=ussd_request_obj.product),
Q(supplier_products__price_per_unit__lte=ussd_request_obj.price)|
Q(supplier_products__quantity__gte=ussd_request_obj.quantity)
)
suppliers = get_suppliers_nearby(ussd_request_obj.user.farmer_profile.geom, supplier_queryset=suppliers)
if suppliers:
for index, supplier in enumerate(suppliers):
message += "\n%s.%s %s 1%s@%s" % (
index + 1,
supplier.user.name.split()[0],
supplier.user.mobile_phone_number,
ussd_request_obj.product.get_unit_display(),
ussd_request_obj.product.product_suppliers.filter(
supplier=supplier,
).first() or 0
)
else:
message = "We could not locate any suppliers within your location."
elif ussd_request_obj.order_type == USSDRequest.SERVICE_ORDER:
message = "%s Services By:" % ussd_request_obj.service.name
suppliers = suppliers.filter(
Q(supplier_services__service=ussd_request_obj.service)
)
suppliers = get_suppliers_nearby(ussd_request_obj.user.farmer_profile.geom, supplier_queryset=suppliers)
if suppliers:
for index, supplier in enumerate(suppliers):
message += "\n%s.%s %s" % (
(index + 1),( supplier.user.name.split()[0]),
supplier.user.mobile_phone_number,
)
else:
message = "We could not locate any suppliers within your location."
send_sms(message, phone_numbers=[ussd_request_obj.user.mobile_phone_number])
def get_suppliers_nearby(farmer_location, supplier_queryset=None, radius=2.5):
if supplier_queryset is None:
supplier_queryset = Supplier.objects.all()
if farmer_location is not None:
distance_from_point = {
'km': radius
}
supplier_queryset = supplier_queryset.filter(
geom__distance_lte=(
farmer_location, measure.D(**distance_from_point)
)
)
return supplier_queryset
else:
return []
|
Bitl/RBXLegacy-src
|
refs/heads/stable
|
Cut/RBXLegacyDiscordBot/lib/youtube_dl/extractor/odnoklassniki.py
|
24
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
unified_strdate,
int_or_none,
qualities,
unescapeHTML,
)
class OdnoklassnikiIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|m|mobile)\.)?(?:odnoklassniki|ok)\.ru/(?:video(?:embed)?|web-api/video/moviePlayer)/(?P<id>[\d-]+)'
_TESTS = [{
# metadata in JSON
'url': 'http://ok.ru/video/20079905452',
'md5': '6ba728d85d60aa2e6dd37c9e70fdc6bc',
'info_dict': {
'id': '20079905452',
'ext': 'mp4',
'title': 'Культура меняет нас (прекрасный ролик!))',
'duration': 100,
'upload_date': '20141207',
'uploader_id': '330537914540',
'uploader': 'Виталий Добровольский',
'like_count': int,
'age_limit': 0,
},
'skip': 'Video has been blocked',
}, {
# metadataUrl
'url': 'http://ok.ru/video/63567059965189-0?fromTime=5',
'md5': '6ff470ea2dd51d5d18c295a355b0b6bc',
'info_dict': {
'id': '63567059965189-0',
'ext': 'mp4',
'title': 'Девушка без комплексов ...',
'duration': 191,
'upload_date': '20150518',
'uploader_id': '534380003155',
'uploader': '☭ Андрей Мещанинов ☭',
'like_count': int,
'age_limit': 0,
'start_time': 5,
},
}, {
# YouTube embed (metadataUrl, provider == USER_YOUTUBE)
'url': 'http://ok.ru/video/64211978996595-1',
'md5': '2f206894ffb5dbfcce2c5a14b909eea5',
'info_dict': {
'id': '64211978996595-1',
'ext': 'mp4',
'title': 'Космическая среда от 26 августа 2015',
'description': 'md5:848eb8b85e5e3471a3a803dae1343ed0',
'duration': 440,
'upload_date': '20150826',
'uploader_id': 'tvroscosmos',
'uploader': 'Телестудия Роскосмоса',
'age_limit': 0,
},
}, {
# YouTube embed (metadata, provider == USER_YOUTUBE, no metadata.movie.title field)
'url': 'http://ok.ru/video/62036049272859-0',
'info_dict': {
'id': '62036049272859-0',
'ext': 'mp4',
'title': 'МУЗЫКА ДОЖДЯ .',
'description': 'md5:6f1867132bd96e33bf53eda1091e8ed0',
'upload_date': '20120106',
'uploader_id': '473534735899',
'uploader': 'МARINA D',
'age_limit': 0,
},
'params': {
'skip_download': True,
},
'skip': 'Video has not been found',
}, {
'url': 'http://ok.ru/web-api/video/moviePlayer/20079905452',
'only_matching': True,
}, {
'url': 'http://www.ok.ru/video/20648036891',
'only_matching': True,
}, {
'url': 'http://www.ok.ru/videoembed/20648036891',
'only_matching': True,
}, {
'url': 'http://m.ok.ru/video/20079905452',
'only_matching': True,
}, {
'url': 'http://mobile.ok.ru/video/20079905452',
'only_matching': True,
}]
def _real_extract(self, url):
start_time = int_or_none(compat_parse_qs(
compat_urllib_parse_urlparse(url).query).get('fromTime', [None])[0])
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://ok.ru/video/%s' % video_id, video_id)
error = self._search_regex(
r'[^>]+class="vp_video_stub_txt"[^>]*>([^<]+)<',
webpage, 'error', default=None)
if error:
raise ExtractorError(error, expected=True)
player = self._parse_json(
unescapeHTML(self._search_regex(
r'data-options=(?P<quote>["\'])(?P<player>{.+?%s.+?})(?P=quote)' % video_id,
webpage, 'player', group='player')),
video_id)
flashvars = player['flashvars']
metadata = flashvars.get('metadata')
if metadata:
metadata = self._parse_json(metadata, video_id)
else:
metadata = self._download_json(
compat_urllib_parse_unquote(flashvars['metadataUrl']),
video_id, 'Downloading metadata JSON')
movie = metadata['movie']
# Some embedded videos may not contain title in movie dict (e.g.
# http://ok.ru/video/62036049272859-0) thus we allow missing title
# here and it's going to be extracted later by an extractor that
# will process the actual embed.
provider = metadata.get('provider')
title = movie['title'] if provider == 'UPLOADED_ODKL' else movie.get('title')
thumbnail = movie.get('poster')
duration = int_or_none(movie.get('duration'))
author = metadata.get('author', {})
uploader_id = author.get('id')
uploader = author.get('name')
upload_date = unified_strdate(self._html_search_meta(
'ya:ovs:upload_date', webpage, 'upload date', default=None))
age_limit = None
adult = self._html_search_meta(
'ya:ovs:adult', webpage, 'age limit', default=None)
if adult:
age_limit = 18 if adult == 'true' else 0
like_count = int_or_none(metadata.get('likeCount'))
info = {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'upload_date': upload_date,
'uploader': uploader,
'uploader_id': uploader_id,
'like_count': like_count,
'age_limit': age_limit,
'start_time': start_time,
}
if provider == 'USER_YOUTUBE':
info.update({
'_type': 'url_transparent',
'url': movie['contentId'],
})
return info
quality = qualities(('4', '0', '1', '2', '3', '5'))
formats = [{
'url': f['url'],
'ext': 'mp4',
'format_id': f['name'],
} for f in metadata['videos']]
m3u8_url = metadata.get('hlsManifestUrl')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
dash_manifest = metadata.get('metadataEmbedded')
if dash_manifest:
formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(dash_manifest), 'mpd'))
for fmt in formats:
fmt_type = self._search_regex(
r'\btype[/=](\d)', fmt['url'],
'format type', default=None)
if fmt_type:
fmt['quality'] = quality(fmt_type)
self._sort_formats(formats)
info['formats'] = formats
return info
|
sk2/autonetkit
|
refs/heads/master
|
autonetkit/load/model.py
|
1
|
from typing import List, Optional, Dict
from pydantic import BaseModel
from autonetkit.network_model.types import DeviceType, PortType, LinkId, PortId, NodeId
class StructuredPort(BaseModel):
id: Optional[PortId]
slot: Optional[int]
type: PortType
label: Optional[str]
data: Optional[Dict] = {}
loopback_zero: Optional[bool]
class StructuredNode(BaseModel):
id: Optional[NodeId]
type: Optional[DeviceType]
label: str
x: Optional[float]
y: Optional[float]
asn: Optional[int]
target: Optional[int]
loopback_zero_id: Optional[StructuredPort]
data: Optional[Dict] = {}
ports: List[StructuredPort] = []
class StructuredLink(BaseModel):
id: Optional[LinkId]
n1: str
n2: str
p1: int
p2: int
data: Optional[Dict] = {}
class StructuredTopology(BaseModel):
nodes: List[StructuredNode] = []
links: List[StructuredLink] = []
|
ClearCorp/odoo-clearcorp
|
refs/heads/9.0
|
TODO-9.0/account_analytic_extended/__openerp__.py
|
3
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account analytic extended',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """This module adds a short name
to the analytical account and reference concatenated
with the short name of the analytical account.
""",
'author': 'ClearCorp',
'website': 'http://clearcorp.co.cr',
'complexity': 'normal',
'images' : [],
'depends': ['analytic'],
'data': [
'account_analytic_extended_view.xml',
],
'test' : [],
'demo': [],
'installable': True,
'auto_install': False,
'application': False,
'license': 'AGPL-3',
}
|
cosmo-ethz/CosmoHammer
|
refs/heads/master
|
cosmoHammer/util/SampleFileUtil.py
|
1
|
import pickle
import numpy as np
import cosmoHammer.Constants as c
class SampleFileUtil(object):
"""
Util for handling sample files
:param filePrefix: the prefix to use
:param master: True if the sampler instance is the master
:param reuseBurnin: True if the burn in data from a previous run should be used
"""
def __init__(self, filePrefix, master=True, reuseBurnin=False):
self.filePrefix = filePrefix
if(master):
if(reuseBurnin):
mode = "r"
else:
mode = "w"
self.samplesFileBurnin = open(self.filePrefix+c.BURNIN_SUFFIX, mode)
self.probFileBurnin = open(self.filePrefix+c.BURNIN_PROB_SUFFIX, mode)
self.samplesFile = open(self.filePrefix+c.FILE_SUFFIX, "w")
self.probFile = open(self.filePrefix+c.PROB_SUFFIX, "w")
def importFromFile(self, filePath):
values = np.loadtxt(filePath, dtype=float)
return values
def storeRandomState(self, filePath, randomState):
with open(filePath,'wb') as f:
pickle.dump(randomState, f)
def importRandomState(self, filePath):
with open(filePath,'rb') as f:
state = pickle.load(f)
return state
def persistBurninValues(self, pos, prob, data):
self.persistValues(self.samplesFileBurnin, self.probFileBurnin, pos, prob, data)
def persistSamplingValues(self, pos, prob, data):
self.persistValues(self.samplesFile, self.probFile, pos, prob, data)
def persistValues(self, posFile, probFile, pos, prob, data):
"""
Writes the walker positions and the likelihood to the disk
"""
posFile.write("\n".join(["\t".join([str(q) for q in p]) for p in pos]))
posFile.write("\n")
posFile.flush()
probFile.write("\n".join([str(p) for p in prob]))
probFile.write("\n")
probFile.flush();
def close(self):
self.samplesFileBurnin.close()
self.probFileBurnin.close()
self.samplesFile.close()
self.probFile.close()
def __str__(self, *args, **kwargs):
return "SampleFileUtil"
|
hj3938/panda3d
|
refs/heads/master
|
direct/src/motiontrail/MotionTrail.py
|
8
|
from panda3d.core import *
from panda3d.direct import *
from direct.task import Task
from direct.showbase.DirectObject import DirectObject
def remove_task ( ):
if (MotionTrail.task_added):
total_motion_trails = len (MotionTrail.motion_trail_list)
if (total_motion_trails > 0):
print "warning:", total_motion_trails, "motion trails still exist when motion trail task is removed"
MotionTrail.motion_trail_list = [ ]
taskMgr.remove (MotionTrail.motion_trail_task_name)
print "MotionTrail task removed"
MotionTrail.task_added = False
return
class MotionTrailVertex:
def __init__(self, vertex_id, vertex_function, context):
self.vertex_id = vertex_id
self.vertex_function = vertex_function
self.context = context
self.vertex = Vec4 (0.0, 0.0, 0.0, 1.0)
# default
self.start_color = Vec4 (1.0, 1.0, 1.0, 1.0)
self.end_color = Vec4 (0.0, 0.0, 0.0, 1.0)
self.v = 0.0
class MotionTrailFrame:
def __init__ (self, current_time, transform):
self.time = current_time
self.transform = transform
class MotionTrail(NodePath, DirectObject):
notify = directNotify.newCategory ("MotionTrail")
task_added = False
motion_trail_list = [ ]
motion_trail_task_name = "motion_trail_task"
global_enable = True
@classmethod
def setGlobalEnable (self, enable):
MotionTrail.global_enable = enable
def __init__ (self,name,parent_node_path):
DirectObject.__init__(self)
NodePath.__init__ (self,name)
# required initialization
self.active = True
self.enable = True
self.pause = False
self.pause_time = 0.0
self.fade = False
self.fade_end = False
self.fade_start_time = 0.0
self.fade_color_scale = 1.0
self.total_vertices = 0
self.last_update_time = 0.0
self.texture = None
self.vertex_list = [ ]
self.frame_list = [ ]
self.parent_node_path = parent_node_path
self.previous_matrix = None
self.calculate_relative_matrix = False
self.playing = False;
# default options
self.continuous_motion_trail = True
self.color_scale = 1.0
self.time_window = 1.0
self.sampling_time = 0.0
self.square_t = True
# self.task_transform = False
self.root_node_path = None
# node path states
self.reparentTo (parent_node_path)
self.geom_node = GeomNode ("motion_trail")
self.geom_node_path = self.attachNewNode(self.geom_node)
node_path = self.geom_node_path
### set render states
node_path.setTwoSided (True)
# set additive blend effects
node_path.setTransparency (True)
node_path.setDepthWrite (False)
node_path.node ( ).setAttrib (ColorBlendAttrib.make (ColorBlendAttrib.MAdd))
# do not light
node_path.setLightOff ( )
# disable writes to destination alpha, write out rgb colors only
node_path.setAttrib (ColorWriteAttrib.make (ColorWriteAttrib.CRed | ColorWriteAttrib.CGreen | ColorWriteAttrib.CBlue));
if (MotionTrail.task_added == False):
# taskMgr.add (self.motion_trail_task, "motion_trail_task", priority = 50)
taskMgr.add (self.motion_trail_task, MotionTrail.motion_trail_task_name)
self.acceptOnce ("clientLogout", remove_task)
MotionTrail.task_added = True
self.relative_to_render = False
self.use_nurbs = False
self.resolution_distance = 0.5
self.cmotion_trail = CMotionTrail ( )
self.cmotion_trail.setGeomNode (self.geom_node)
self.modified_vertices = True
if base.config.GetBool('want-python-motion-trails', 0):
self.use_python_version = True
else:
self.use_python_version = False
return
def delete(self):
self.reset_motion_trail()
self.reset_motion_trail_geometry()
self.cmotion_trail.resetVertexList ( )
self.removeNode()
return
def print_matrix (self, matrix):
separator = ' '
print matrix.getCell (0, 0), separator, matrix.getCell (0, 1), separator, matrix.getCell (0, 2), separator, matrix.getCell (0, 3)
print matrix.getCell (1, 0), separator, matrix.getCell (1, 1), separator, matrix.getCell (1, 2), separator, matrix.getCell (1, 3)
print matrix.getCell (2, 0), separator, matrix.getCell (2, 1), separator, matrix.getCell (2, 2), separator, matrix.getCell (2, 3)
print matrix.getCell (3, 0), separator, matrix.getCell (3, 1), separator, matrix.getCell (3, 2), separator, matrix.getCell (3, 3)
def motion_trail_task (self, task):
current_time = task.time
total_motion_trails = len (MotionTrail.motion_trail_list)
index = 0
while (index < total_motion_trails):
motion_trail = MotionTrail.motion_trail_list [index]
if (MotionTrail.global_enable):
if (motion_trail.use_python_version):
# Python version
if (motion_trail.active and motion_trail.check_for_update (current_time)):
transform = None
if (motion_trail.root_node_path != None) and (motion_trail.root_node_path != render):
motion_trail.root_node_path.update ( )
if (motion_trail.root_node_path and (motion_trail.relative_to_render == False)):
transform = motion_trail.getMat(motion_trail.root_node_path)
else:
transform = Mat4 (motion_trail.getNetTransform ( ).getMat ( ))
if (transform != None):
motion_trail.update_motion_trail (current_time, transform)
else:
# C++ version
if (motion_trail.active and motion_trail.cmotion_trail.checkForUpdate (current_time)):
transform = None
if (motion_trail.root_node_path != None) and (motion_trail.root_node_path != render):
motion_trail.root_node_path.update ( )
if (motion_trail.root_node_path and (motion_trail.relative_to_render == False)):
transform = motion_trail.getMat(motion_trail.root_node_path)
else:
transform = Mat4 (motion_trail.getNetTransform ( ).getMat ( ))
if (transform != None):
motion_trail.transferVertices ( )
motion_trail.cmotion_trail.updateMotionTrail (current_time, transform)
else:
motion_trail.reset_motion_trail()
motion_trail.reset_motion_trail_geometry()
index += 1
return Task.cont
def add_vertex (self, vertex_id, vertex_function, context):
motion_trail_vertex = MotionTrailVertex (vertex_id, vertex_function, context)
total_vertices = len (self.vertex_list)
self.vertex_list [total_vertices : total_vertices] = [motion_trail_vertex]
self.total_vertices = len (self.vertex_list)
self.modified_vertices = True
return motion_trail_vertex
def set_vertex_color (self, vertex_id, start_color, end_color):
if (vertex_id >= 0 and vertex_id < self.total_vertices):
motion_trail_vertex = self.vertex_list [vertex_id]
motion_trail_vertex.start_color = start_color
motion_trail_vertex.end_color = end_color
self.modified_vertices = True
return
def set_texture (self, texture):
self.texture = texture
if (texture):
self.geom_node_path.setTexture (texture)
# texture.setWrapU(Texture.WMClamp)
# texture.setWrapV(Texture.WMClamp)
else:
self.geom_node_path.clearTexture ( )
self.modified_vertices = True
return
def update_vertices (self):
total_vertices = len (self.vertex_list)
self.total_vertices = total_vertices
if (total_vertices >= 2):
vertex_index = 0
while (vertex_index < total_vertices):
motion_trail_vertex = self.vertex_list [vertex_index]
motion_trail_vertex.vertex = motion_trail_vertex.vertex_function (motion_trail_vertex, motion_trail_vertex.vertex_id, motion_trail_vertex.context)
vertex_index += 1
# calculate v coordinate
# this is based on the number of vertices only and not on the relative positions of the vertices
vertex_index = 0
float_vertex_index = 0.0
float_total_vertices = 0.0
float_total_vertices = total_vertices - 1.0
while (vertex_index < total_vertices):
motion_trail_vertex = self.vertex_list [vertex_index]
motion_trail_vertex.v = float_vertex_index / float_total_vertices
vertex_index += 1
float_vertex_index += 1.0
# print "motion_trail_vertex.v", motion_trail_vertex.v
self.modified_vertices = True
return
def transferVertices (self):
# transfer only on modification
if (self.modified_vertices):
self.cmotion_trail.setParameters (self.sampling_time, self.time_window, self.texture != None, self.calculate_relative_matrix, self.use_nurbs, self.resolution_distance)
self.cmotion_trail.resetVertexList ( )
vertex_index = 0
total_vertices = len (self.vertex_list)
while (vertex_index < total_vertices):
motion_trail_vertex = self.vertex_list [vertex_index]
self.cmotion_trail.addVertex (motion_trail_vertex.vertex, motion_trail_vertex.start_color, motion_trail_vertex.end_color, motion_trail_vertex.v)
vertex_index += 1
self.modified_vertices = False
return
def register_motion_trail (self):
MotionTrail.motion_trail_list = MotionTrail.motion_trail_list + [self]
return
def unregister_motion_trail (self):
if (self in MotionTrail.motion_trail_list):
MotionTrail.motion_trail_list.remove (self)
return
def begin_geometry (self):
self.vertex_index = 0;
if (self.texture != None):
self.format = GeomVertexFormat.getV3c4t2 ( )
else:
self.format = GeomVertexFormat.getV3c4 ( )
self.vertex_data = GeomVertexData ("vertices", self.format, Geom.UHStatic)
self.vertex_writer = GeomVertexWriter (self.vertex_data, "vertex")
self.color_writer = GeomVertexWriter (self.vertex_data, "color")
if (self.texture != None):
self.texture_writer = GeomVertexWriter (self.vertex_data, "texcoord")
self.triangles = GeomTriangles (Geom.UHStatic)
def add_geometry_quad (self, v0, v1, v2, v3, c0, c1, c2, c3, t0, t1, t2, t3):
self.vertex_writer.addData3f (v0 [0], v0 [1], v0 [2])
self.vertex_writer.addData3f (v1 [0], v1 [1], v1 [2])
self.vertex_writer.addData3f (v2 [0], v2 [1], v2 [2])
self.vertex_writer.addData3f (v3 [0], v3 [1], v3 [2])
self.color_writer.addData4f (c0)
self.color_writer.addData4f (c1)
self.color_writer.addData4f (c2)
self.color_writer.addData4f (c3)
if (self.texture != None):
self.texture_writer.addData2f (t0)
self.texture_writer.addData2f (t1)
self.texture_writer.addData2f (t2)
self.texture_writer.addData2f (t3)
vertex_index = self.vertex_index;
self.triangles.addVertex (vertex_index + 0)
self.triangles.addVertex (vertex_index + 1)
self.triangles.addVertex (vertex_index + 2)
self.triangles.closePrimitive ( )
self.triangles.addVertex (vertex_index + 1)
self.triangles.addVertex (vertex_index + 3)
self.triangles.addVertex (vertex_index + 2)
self.triangles.closePrimitive ( )
self.vertex_index += 4
def end_geometry (self):
self.geometry = Geom (self.vertex_data)
self.geometry.addPrimitive (self.triangles)
self.geom_node.removeAllGeoms ( )
self.geom_node.addGeom (self.geometry)
def check_for_update (self, current_time):
state = False
if ((current_time - self.last_update_time) >= self.sampling_time):
state = True
if (self.pause):
state = False
update = state and self.enable
return state
def update_motion_trail (self, current_time, transform):
if (len (self.frame_list) >= 1):
if (transform == self.frame_list [0].transform):
# ignore duplicate transform updates
return
if (self.check_for_update (current_time)):
color_scale = self.color_scale;
if (self.fade):
elapsed_time = current_time - self.fade_start_time
if (elapsed_time < 0.0):
elapsed_time = 0.0
print "elapsed_time < 0", elapsed_time
if (elapsed_time < self.fade_time):
color_scale = (1.0 - (elapsed_time / self.fade_time)) * color_scale
else:
color_scale = 0.0
self.fade_end = True
self.last_update_time = current_time
# remove expired frames
minimum_time = current_time - self.time_window
index = 0
last_frame_index = len (self.frame_list) - 1
while (index <= last_frame_index):
motion_trail_frame = self.frame_list [last_frame_index - index]
if (motion_trail_frame.time >= minimum_time):
break
index += 1
if (index > 0):
self.frame_list [last_frame_index - index: last_frame_index + 1] = [ ]
# add new frame to beginning of list
motion_trail_frame = MotionTrailFrame (current_time, transform)
self.frame_list = [motion_trail_frame] + self.frame_list
# convert frames and vertices to geometry
total_frames = len (self.frame_list)
"""
print "total_frames", total_frames
index = 0;
while (index < total_frames):
motion_trail_frame = self.frame_list [index]
print "frame time", index, motion_trail_frame.time
index += 1
"""
if ((total_frames >= 2) and (self.total_vertices >= 2)):
self.begin_geometry ( )
total_segments = total_frames - 1
last_motion_trail_frame = self.frame_list [total_segments]
minimum_time = last_motion_trail_frame.time
delta_time = current_time - minimum_time
if (self.calculate_relative_matrix):
inverse_matrix = Mat4 (transform)
inverse_matrix.invertInPlace ( )
if (self.use_nurbs and (total_frames >= 5)):
total_distance = 0.0
vector = Vec3 ( )
nurbs_curve_evaluator_list = [ ]
total_vertex_segments = self.total_vertices - 1
# create a NurbsCurveEvaluator for each vertex (the starting point for the trail)
index = 0
while (index < self.total_vertices):
nurbs_curve_evaluator = NurbsCurveEvaluator ( )
nurbs_curve_evaluator.reset (total_segments)
nurbs_curve_evaluator_list = nurbs_curve_evaluator_list + [nurbs_curve_evaluator]
index += 1
# add vertices to each NurbsCurveEvaluator
segment_index = 0
while (segment_index < total_segments):
motion_trail_frame_start = self.frame_list [segment_index]
motion_trail_frame_end = self.frame_list [segment_index + 1]
vertex_segement_index = 0
if (self.calculate_relative_matrix):
start_transform = Mat4 ( )
end_transform = Mat4 ( )
start_transform.multiply (motion_trail_frame_start.transform, inverse_matrix)
end_transform.multiply (motion_trail_frame_end.transform, inverse_matrix)
else:
start_transform = motion_trail_frame_start.transform
end_transform = motion_trail_frame_end.transform
motion_trail_vertex_start = self.vertex_list [0]
v0 = start_transform.xform (motion_trail_vertex_start.vertex)
v2 = end_transform.xform (motion_trail_vertex_start.vertex)
nurbs_curve_evaluator = nurbs_curve_evaluator_list [vertex_segement_index]
nurbs_curve_evaluator.setVertex (segment_index, v0)
while (vertex_segement_index < total_vertex_segments):
motion_trail_vertex_start = self.vertex_list [vertex_segement_index]
motion_trail_vertex_end = self.vertex_list [vertex_segement_index + 1]
v1 = start_transform.xform (motion_trail_vertex_end.vertex)
v3 = end_transform.xform (motion_trail_vertex_end.vertex)
nurbs_curve_evaluator = nurbs_curve_evaluator_list [vertex_segement_index + 1]
nurbs_curve_evaluator.setVertex (segment_index, v1)
if (vertex_segement_index == (total_vertex_segments - 1)):
v = v1 - v3
vector.set (v[0], v[1], v[2])
distance = vector.length()
total_distance += distance
vertex_segement_index += 1
segment_index += 1
# evaluate NurbsCurveEvaluator for each vertex
index = 0
nurbs_curve_result_list = [ ]
while (index < self.total_vertices):
nurbs_curve_evaluator = nurbs_curve_evaluator_list [index]
nurbs_curve_result = nurbs_curve_evaluator.evaluate ( )
nurbs_curve_result_list = nurbs_curve_result_list + [nurbs_curve_result]
nurbs_start_t = nurbs_curve_result.getStartT()
nurbs_end_t = nurbs_curve_result.getEndT()
index += 1
# create quads from NurbsCurveResult
total_curve_segments = total_distance / self.resolution_distance
if (total_curve_segments < total_segments):
total_curve_segments = total_segments;
v0 = Vec3 ( )
v1 = Vec3 ( )
v2 = Vec3 ( )
v3 = Vec3 ( )
def one_minus_x (x):
x = 1.0 - x
if (x < 0.0):
x = 0.0
return x
curve_segment_index = 0.0
while (curve_segment_index < total_curve_segments):
vertex_segement_index = 0
if (True):
st = curve_segment_index / total_curve_segments
et = (curve_segment_index + 1.0) / total_curve_segments
else:
st = curve_segment_index / total_segments
et = (curve_segment_index + 1.0) / total_segments
start_t = st
end_t = et
if (self.square_t):
start_t *= start_t
end_t *= end_t
motion_trail_vertex_start = self.vertex_list [0]
vertex_start_color = motion_trail_vertex_start.end_color + (motion_trail_vertex_start.start_color - motion_trail_vertex_start.end_color)
color_start_t = color_scale * start_t
color_end_t = color_scale * end_t
c0 = vertex_start_color * one_minus_x (color_start_t)
c2 = vertex_start_color * one_minus_x (color_end_t)
t0 = Vec2 (one_minus_x (st), motion_trail_vertex_start.v)
t2 = Vec2 (one_minus_x (et), motion_trail_vertex_start.v)
while (vertex_segement_index < total_vertex_segments):
motion_trail_vertex_start = self.vertex_list [vertex_segement_index]
motion_trail_vertex_end = self.vertex_list [vertex_segement_index + 1]
start_nurbs_curve_result = nurbs_curve_result_list [vertex_segement_index]
end_nurbs_curve_result = nurbs_curve_result_list [vertex_segement_index + 1]
start_nurbs_start_t = start_nurbs_curve_result.getStartT()
start_nurbs_end_t = start_nurbs_curve_result.getEndT()
end_nurbs_start_t = end_nurbs_curve_result.getStartT()
end_nurbs_end_t = end_nurbs_curve_result.getEndT()
start_delta_t = (start_nurbs_end_t - start_nurbs_start_t)
end_delta_t = (end_nurbs_end_t - end_nurbs_start_t)
start_nurbs_curve_result.evalPoint (start_nurbs_start_t + (start_delta_t * st), v0);
end_nurbs_curve_result.evalPoint (end_nurbs_start_t + (end_delta_t * st), v1);
start_nurbs_curve_result.evalPoint (start_nurbs_start_t + (start_delta_t * et), v2);
end_nurbs_curve_result.evalPoint (end_nurbs_start_t + (end_delta_t * et), v3);
# color
vertex_end_color = motion_trail_vertex_end.end_color + (motion_trail_vertex_end.start_color - motion_trail_vertex_end.end_color)
c1 = vertex_end_color * one_minus_x (color_start_t)
c3 = vertex_end_color * one_minus_x (color_end_t)
# uv
t1 = Vec2 (one_minus_x (st), motion_trail_vertex_end.v)
t3 = Vec2 (one_minus_x (et), motion_trail_vertex_end.v)
self.add_geometry_quad (v0, v1, v2, v3, c0, c1, c2, c3, t0, t1, t2, t3)
# reuse calculations
c0 = c1
c2 = c3
t0 = t1
t2 = t3
vertex_segement_index += 1
curve_segment_index += 1.0
else:
segment_index = 0
while (segment_index < total_segments):
motion_trail_frame_start = self.frame_list [segment_index]
motion_trail_frame_end = self.frame_list [segment_index + 1]
start_t = (motion_trail_frame_start.time - minimum_time) / delta_time
end_t = (motion_trail_frame_end.time - minimum_time) / delta_time
st = start_t
et = end_t
if (self.square_t):
start_t *= start_t
end_t *= end_t
vertex_segement_index = 0
total_vertex_segments = self.total_vertices - 1
if (self.calculate_relative_matrix):
start_transform = Mat4 ( )
end_transform = Mat4 ( )
start_transform.multiply (motion_trail_frame_start.transform, inverse_matrix)
end_transform.multiply (motion_trail_frame_end.transform, inverse_matrix)
else:
start_transform = motion_trail_frame_start.transform
end_transform = motion_trail_frame_end.transform
motion_trail_vertex_start = self.vertex_list [0]
v0 = start_transform.xform (motion_trail_vertex_start.vertex)
v2 = end_transform.xform (motion_trail_vertex_start.vertex)
vertex_start_color = motion_trail_vertex_start.end_color + (motion_trail_vertex_start.start_color - motion_trail_vertex_start.end_color)
color_start_t = color_scale * start_t
color_end_t = color_scale * end_t
c0 = vertex_start_color * color_start_t
c2 = vertex_start_color * color_end_t
t0 = Vec2 (st, motion_trail_vertex_start.v)
t2 = Vec2 (et, motion_trail_vertex_start.v)
while (vertex_segement_index < total_vertex_segments):
motion_trail_vertex_start = self.vertex_list [vertex_segement_index]
motion_trail_vertex_end = self.vertex_list [vertex_segement_index + 1]
v1 = start_transform.xform (motion_trail_vertex_end.vertex)
v3 = end_transform.xform (motion_trail_vertex_end.vertex)
# color
vertex_end_color = motion_trail_vertex_end.end_color + (motion_trail_vertex_end.start_color - motion_trail_vertex_end.end_color)
c1 = vertex_end_color * color_start_t
c3 = vertex_end_color * color_end_t
# uv
t1 = Vec2 (st, motion_trail_vertex_end.v)
t3 = Vec2 (et, motion_trail_vertex_end.v)
self.add_geometry_quad (v0, v1, v2, v3, c0, c1, c2, c3, t0, t1, t2, t3)
# reuse calculations
v0 = v1
v2 = v3
c0 = c1
c2 = c3
t0 = t1
t2 = t3
vertex_segement_index += 1
segment_index += 1
self.end_geometry ( )
return
def enable_motion_trail(self, enable):
self.enable = enable
return
def reset_motion_trail(self):
self.frame_list = [ ]
self.cmotion_trail.reset ( );
return
def reset_motion_trail_geometry(self):
if (self.geom_node != None):
self.geom_node.removeAllGeoms ( )
return
def attach_motion_trail (self):
self.reset_motion_trail ( )
return
def begin_motion_trail (self):
if (self.continuous_motion_trail == False):
self.reset_motion_trail ( )
self.active = True;
self.playing = True;
return
def end_motion_trail (self):
if (self.continuous_motion_trail == False):
self.active = False
self.reset_motion_trail ( )
self.reset_motion_trail_geometry ( )
self.playing = False;
return
# the following functions are not currently supported in the C++ version
def set_fade (self, time, current_time):
if (self.pause == False):
self.fade_color_scale = 1.0
if (time == 0.0):
self.fade = False
else:
self.fade_start_time = current_time
self.fade_time = time
self.fade = True
return
def pause_motion_trail(self, current_time):
if (self.pause == False):
self.pause_time = current_time
self.pause = True
return
def resume_motion_trail(self, current_time):
if (self.pause):
delta_time = current_time - self.pause_time
frame_index = 0
total_frames = len (self.frame_list)
while (frame_index < total_frames):
motion_trail_frame = self.frame_list [frame_index]
motion_trail_frame.time += delta_time
frame_index += 1
if (self.fade):
self.fade_start_time += delta_time
self.pause = False
return
def toggle_pause_motion_trail (self, current_time):
if (self.pause):
self.resume_motion_trail (current_time)
else:
self.pause_motion_trail (current_time)
|
fnouama/intellij-community
|
refs/heads/master
|
python/testData/highlighting/docStrings.py
|
83
|
# bg is always black.
# effect is white
# doc comment: blue bold
def <info descr="null" type="INFORMATION">foo</info>():
<info descr="null" type="INFORMATION" foreground="0x0000ff" background="0x000000" effectcolor="0xffffff" effecttype="BOXED" fonttype="1">"Func doc string"</info>
pass
class <info descr="null" type="INFORMATION">Boo</info>:
<info descr="null" type="INFORMATION" foreground="0x0000ff" background="0x000000" effectcolor="0xffffff" effecttype="BOXED" fonttype="1">"Class doc string"</info>
pass
class <info descr="null" type="INFORMATION">Moo</info>:
def <info descr="null" type="INFORMATION">meth</info>(self):
<info descr="null" type="INFORMATION" foreground="0x0000ff" background="0x000000" effectcolor="0xffffff" effecttype="BOXED" fonttype="1">"Meth doc string"</info>
pass
|
fhaoquan/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/ctypes/macholib/dyld.py
|
152
|
"""
dyld emulation
"""
import os
from ctypes.macholib.framework import framework_info
from ctypes.macholib.dylib import dylib_info
from itertools import *
__all__ = [
'dyld_find', 'framework_find',
'framework_info', 'dylib_info',
]
# These are the defaults as per man dyld(1)
#
DEFAULT_FRAMEWORK_FALLBACK = [
os.path.expanduser("~/Library/Frameworks"),
"/Library/Frameworks",
"/Network/Library/Frameworks",
"/System/Library/Frameworks",
]
DEFAULT_LIBRARY_FALLBACK = [
os.path.expanduser("~/lib"),
"/usr/local/lib",
"/lib",
"/usr/lib",
]
def dyld_env(env, var):
if env is None:
env = os.environ
rval = env.get(var)
if rval is None:
return []
return rval.split(':')
def dyld_image_suffix(env=None):
if env is None:
env = os.environ
return env.get('DYLD_IMAGE_SUFFIX')
def dyld_framework_path(env=None):
return dyld_env(env, 'DYLD_FRAMEWORK_PATH')
def dyld_library_path(env=None):
return dyld_env(env, 'DYLD_LIBRARY_PATH')
def dyld_fallback_framework_path(env=None):
return dyld_env(env, 'DYLD_FALLBACK_FRAMEWORK_PATH')
def dyld_fallback_library_path(env=None):
return dyld_env(env, 'DYLD_FALLBACK_LIBRARY_PATH')
def dyld_image_suffix_search(iterator, env=None):
"""For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics"""
suffix = dyld_image_suffix(env)
if suffix is None:
return iterator
def _inject(iterator=iterator, suffix=suffix):
for path in iterator:
if path.endswith('.dylib'):
yield path[:-len('.dylib')] + suffix + '.dylib'
else:
yield path + suffix
yield path
return _inject()
def dyld_override_search(name, env=None):
# If DYLD_FRAMEWORK_PATH is set and this dylib_name is a
# framework name, use the first file that exists in the framework
# path if any. If there is none go on to search the DYLD_LIBRARY_PATH
# if any.
framework = framework_info(name)
if framework is not None:
for path in dyld_framework_path(env):
yield os.path.join(path, framework['name'])
# If DYLD_LIBRARY_PATH is set then use the first file that exists
# in the path. If none use the original name.
for path in dyld_library_path(env):
yield os.path.join(path, os.path.basename(name))
def dyld_executable_path_search(name, executable_path=None):
# If we haven't done any searching and found a library and the
# dylib_name starts with "@executable_path/" then construct the
# library name.
if name.startswith('@executable_path/') and executable_path is not None:
yield os.path.join(executable_path, name[len('@executable_path/'):])
def dyld_default_search(name, env=None):
yield name
framework = framework_info(name)
if framework is not None:
fallback_framework_path = dyld_fallback_framework_path(env)
for path in fallback_framework_path:
yield os.path.join(path, framework['name'])
fallback_library_path = dyld_fallback_library_path(env)
for path in fallback_library_path:
yield os.path.join(path, os.path.basename(name))
if framework is not None and not fallback_framework_path:
for path in DEFAULT_FRAMEWORK_FALLBACK:
yield os.path.join(path, framework['name'])
if not fallback_library_path:
for path in DEFAULT_LIBRARY_FALLBACK:
yield os.path.join(path, os.path.basename(name))
def dyld_find(name, executable_path=None, env=None):
"""
Find a library or framework using dyld semantics
"""
for path in dyld_image_suffix_search(chain(
dyld_override_search(name, env),
dyld_executable_path_search(name, executable_path),
dyld_default_search(name, env),
), env):
if os.path.isfile(path):
return path
raise ValueError("dylib %s could not be found" % (name,))
def framework_find(fn, executable_path=None, env=None):
"""
Find a framework using dyld semantics in a very loose manner.
Will take input such as:
Python
Python.framework
Python.framework/Versions/Current
"""
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError as e:
pass
fmwk_index = fn.rfind('.framework')
if fmwk_index == -1:
fmwk_index = len(fn)
fn += '.framework'
fn = os.path.join(fn, os.path.basename(fn[:fmwk_index]))
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError:
raise e
def test_dyld_find():
env = {}
assert dyld_find('libSystem.dylib') == '/usr/lib/libSystem.dylib'
assert dyld_find('System.framework/System') == '/System/Library/Frameworks/System.framework/System'
if __name__ == '__main__':
test_dyld_find()
|
rolandovillca/python_introduction_basic
|
refs/heads/master
|
web/client_get_with_urllib2.py
|
4
|
'''
urllib2 - Library for opening URLs
A library for opening URLs that can be extended by defining custom protocol
handlers.
The urllib2 module defines functions and classes which help in opening URLs
(mostly HTTP) in a complex world - basic and digest authentication,
redirections, cookies and more.
The urllib2 module provides an updated API for using internet resources
identified by URLs. It is designed to be extended by individual applications to
support new protocols or add variations to existing protocols (such as handling
HTTP basic authentication).
https://pymotw.com/2/urllib2/
'''
import urllib2
# EXAMPLE 1: HTTP GET:
# ==============================================================================
# As with urllib, an HTTP GET operation is the simplest use of urllib2.
# Pass the URL to urlopen() to get a "file-like" handle to the remote data.
url = 'http://www.google.com'
resp = urllib2.urlopen(url)
print 'Response: ', resp
print 'Url: ', resp.geturl()
print 'Code: ', resp.code
print 'Html: ', resp.read()
print
headers = resp.info()
print 'Date: ', headers['date']
print 'Server: ', headers['server']
print 'Headers:'
print headers
print
data = resp.read()
print 'Length: ', len(data)
print 'Data:'
print data
print
# EXAMPLE 2: The file-like object returned by urlopen() is iterable:
# ==============================================================================
for line in resp:
print line.rstrip()
# EXAMPLE 3: Encoding Arguments:
# ==============================================================================
# Arguments can be passed to the server by encoding them with urllib.urlencode()
# and appending them to the URL.
import urllib
url = 'http://www.google.com?'
query_args = { 'q':'query string', 'foo':'bar' }
encoded_args = urllib.urlencode(query_args)
print 'Encoded: ', encoded_args
url = url + encoded_args
print urllib2.urlopen(url).read()
|
mahak/keystone
|
refs/heads/master
|
keystone/common/sql/expand_repo/versions/023_expand_add_second_password_column_for_expanded_hash_sizes.py
|
2
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
# NOTE(notmorgan): To support the full range of scrypt and pbkfd password
# hash lengths, this should be closer to varchar(1500) instead of
# varchar(255).
password_hash = sql.Column('password_hash', sql.String(255), nullable=True)
password_table = sql.Table('password', meta, autoload=True)
password_table.create_column(password_hash)
|
hlzz/dotfiles
|
refs/heads/master
|
graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/names/error.py
|
2
|
# -*- test-case-name: twisted.names.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Exception class definitions for Twisted Names.
"""
from __future__ import division, absolute_import
from twisted.internet.defer import TimeoutError
class DomainError(ValueError):
"""
Indicates a lookup failed because there were no records matching the given
C{name, class, type} triple.
"""
class AuthoritativeDomainError(ValueError):
"""
Indicates a lookup failed for a name for which this server is authoritative
because there were no records matching the given C{name, class, type}
triple.
"""
class DNSQueryTimeoutError(TimeoutError):
"""
Indicates a lookup failed due to a timeout.
@ivar id: The id of the message which timed out.
"""
def __init__(self, id):
TimeoutError.__init__(self)
self.id = id
class DNSFormatError(DomainError):
"""
Indicates a query failed with a result of L{twisted.names.dns.EFORMAT}.
"""
class DNSServerError(DomainError):
"""
Indicates a query failed with a result of L{twisted.names.dns.ESERVER}.
"""
class DNSNameError(DomainError):
"""
Indicates a query failed with a result of L{twisted.names.dns.ENAME}.
"""
class DNSNotImplementedError(DomainError):
"""
Indicates a query failed with a result of L{twisted.names.dns.ENOTIMP}.
"""
class DNSQueryRefusedError(DomainError):
"""
Indicates a query failed with a result of L{twisted.names.dns.EREFUSED}.
"""
class DNSUnknownError(DomainError):
"""
Indicates a query failed with an unknown result.
"""
class ResolverError(Exception):
"""
Indicates a query failed because of a decision made by the local
resolver object.
"""
__all__ = [
'DomainError', 'AuthoritativeDomainError', 'DNSQueryTimeoutError',
'DNSFormatError', 'DNSServerError', 'DNSNameError',
'DNSNotImplementedError', 'DNSQueryRefusedError',
'DNSUnknownError', 'ResolverError']
|
mne-tools/mne-tools.github.io
|
refs/heads/main
|
dev/_downloads/166d565c496703ca2cd5bf0481983599/20_cluster_1samp_spatiotemporal.py
|
10
|
"""
=================================================================
Permutation t-test on source data with spatio-temporal clustering
=================================================================
This example tests if the evoked response is significantly different between
two conditions across subjects. Here just for demonstration purposes
we simulate data from multiple subjects using one subject's data.
The multiple comparisons problem is addressed with a cluster-level
permutation test across space and time.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
from scipy import stats as stats
import mne
from mne.epochs import equalize_epoch_counts
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
src_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for all channels, removing a bad one
# ------------------------------------------------
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
event_id = 1 # L auditory
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
event_id = 3 # L visual
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
equalize_epoch_counts([epochs1, epochs2])
###############################################################################
# Transform to source space
# -------------------------
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE, sLORETA, or eLORETA)
inverse_operator = read_inverse_operator(fname_inv)
sample_vertices = [s['vertno'] for s in inverse_operator['src']]
# Let's average and compute inverse, resampling to speed things up
evoked1 = epochs1.average()
evoked1.resample(50, npad='auto')
condition1 = apply_inverse(evoked1, inverse_operator, lambda2, method)
evoked2 = epochs2.average()
evoked2.resample(50, npad='auto')
condition2 = apply_inverse(evoked2, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition1.crop(0, None)
condition2.crop(0, None)
tmin = condition1.tmin
tstep = condition1.tstep * 1000 # convert to milliseconds
###############################################################################
# Transform to common cortical space
# ----------------------------------
#
# Normally you would read in estimates across several subjects and morph
# them to the same cortical space (e.g. fsaverage). For example purposes,
# we will simulate this by just having each "subject" have the same
# response (just noisy in source space) here.
#
# .. note::
# Note that for 7 subjects with a two-sided statistical test, the minimum
# significance under a permutation test is only p = 1/(2 ** 6) = 0.015,
# which is large.
n_vertices_sample, n_times = condition1.data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 2) * 10
X[:, :, :, 0] += condition1.data[:, :, np.newaxis]
X[:, :, :, 1] += condition2.data[:, :, np.newaxis]
###############################################################################
# It's a good idea to spatially smooth the data, and for visualization
# purposes, let's morph these to fsaverage, which is a grade 5 source space
# with vertices 0:10242 for each hemisphere. Usually you'd have to morph
# each subject's data separately (and you might want to use morph_data
# instead), but here since all estimates are on 'sample' we can use one
# morph matrix for all the heavy lifting.
# Read the source space we are morphing to
src = mne.read_source_spaces(src_fname)
fsave_vertices = [s['vertno'] for s in src]
morph_mat = mne.compute_source_morph(
src=inverse_operator['src'], subject_to='fsaverage',
spacing=fsave_vertices, subjects_dir=subjects_dir).morph_mat
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 2)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 2)
###############################################################################
# Finally, we want to compare the overall activity levels in each condition,
# the diff is taken along the last axis (condition). The negative sign makes
# it so condition1 > condition2 shows up as "red blobs" (instead of blue).
X = np.abs(X) # only magnitude
X = X[:, :, :, 0] - X[:, :, :, 1] # make paired contrast
###############################################################################
# Compute statistic
# -----------------
#
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial adjacency matrix (instead of spatio-temporal)
print('Computing adjacency.')
adjacency = mne.spatial_src_adjacency(src)
# Note that X needs to be a multi-dimensional array of shape
# samples (subjects) x time x space, so we permute dimensions
X = np.transpose(X, [2, 1, 0])
# Now let's actually do the clustering. This can take a long time...
# Here we set the threshold quite high to reduce computation.
p_threshold = 0.001
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_1samp_test(X, adjacency=adjacency, n_jobs=1,
threshold=t_threshold, buffer_size=None,
verbose=True)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
# ----------------------
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration.
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A < condition B, red for A > B
brain = stc_all_cluster_vis.plot(
hemi='both', views='lateral', subjects_dir=subjects_dir,
time_label='temporal extent (ms)', size=(800, 800),
smoothing_steps=5, clim=dict(kind='value', pos_lims=[0, 1, 40]))
# brain.save_image('clusters.png')
|
openpeer/webrtc-gyp
|
refs/heads/master
|
test/ninja/use-custom-environment-files/gyptest-use-custom-environment-files.py
|
269
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure environment files can be suppressed.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
test.run_gyp('use-custom-environment-files.gyp',
'-G', 'ninja_use_custom_environment_files')
# Make sure environment files do not exist.
if os.path.exists(test.built_file_path('environment.x86')):
test.fail_test()
if os.path.exists(test.built_file_path('environment.x64')):
test.fail_test()
test.pass_test()
|
pankajp/pyface
|
refs/heads/master
|
pyface/ui/qt4/widget.py
|
3
|
#------------------------------------------------------------------------------
# Copyright (c) 2007, Riverbank Computing Limited
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD license.
# However, when used with the GPL version of PyQt the additional terms described in the PyQt GPL exception also apply
#
# Author: Riverbank Computing Limited
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
# Enthought library imports.
from traits.api import Any, HasTraits, provides
# Local imports.
from pyface.i_widget import IWidget, MWidget
@provides(IWidget)
class Widget(MWidget, HasTraits):
""" The toolkit specific implementation of a Widget. See the IWidget
interface for the API documentation.
"""
#### 'IWidget' interface ##################################################
control = Any
parent = Any
###########################################################################
# 'IWidget' interface.
###########################################################################
def destroy(self):
if self.control is not None:
self.control.hide()
self.control.deleteLater()
self.control = None
#### EOF ######################################################################
|
etovrodeya/hotel_project2
|
refs/heads/master
|
booking/migrations/0001_initial.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-28 11:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Booking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('s_date', models.DateField(verbose_name='День заезда')),
('e_date', models.DateField(verbose_name='День выезда')),
('room', models.SmallIntegerField(null=True, verbose_name='Комната')),
('style', models.CharField(choices=[('budget', 'Бюджетный'), ('business', 'Бизнесс-класс'), ('lux', 'Люкс')], max_length=15, verbose_name='Класс аппартаментов')),
('status', models.SmallIntegerField(choices=[('budget', 'Бюджетный'), ('business', 'Бизнесс-класс'), ('lux', 'Люкс')], null=True, verbose_name='Статус')),
('comment', models.CharField(max_length=500, verbose_name='Коментарий')),
('child', models.SmallIntegerField(verbose_name='Количество детей')),
('number_peoples', models.SmallIntegerField(verbose_name='Количество людей')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Время бронирования')),
('price', models.IntegerField(null=True, verbose_name='Цена')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Бронь',
'verbose_name_plural': 'Брони',
},
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('housing', models.SmallIntegerField(verbose_name='Корпус')),
('floor', models.SmallIntegerField(verbose_name='Этаж')),
('number', models.SmallIntegerField(verbose_name='Номер')),
('per_night', models.SmallIntegerField(verbose_name='Стоимость за ночь')),
('number_beds', models.SmallIntegerField(verbose_name='Количество спальных мест')),
('style', models.CharField(choices=[('budget', 'Бюджетный'), ('business', 'Бизнесс-класс'), ('lux', 'Люкс')], max_length=15, verbose_name='Класс аппартаментов')),
],
),
]
|
limavicente/py-scripts
|
refs/heads/master
|
pesquisa.py
|
1
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
Uso:
python pesquisa.py -i /pastacomarquivos/ -p palavra1:palavra2
Pesquisa recursivamente a pasta de origem por arquivos que contenham as palavras
informadas.
Parametros:
-i pasta onde estão os arquivos que serão pesquisados.
-p uma ou mais palavras para pesquisa. Separar palavras por ":"
-m quantidade de palavras que devem ser encontradas. Se não informado,
o valor padrão é 1 (um).
Exemplos:
python pesquisa.py -i /meustextos/ -p shell:dicas -m 2
python pesquisa.py -i /meustextos/ -p carro:automovel:veiculo -m 1
'''
__author__ = 'Vicente Lima'
__version__ = '0.1'
import sys
import os
def paramPalavras(num):
'''
lista de palavras separadas por 2 pontos
'''
listaPalavras = sys.argv[num + 1]
#print 'listapalavras: ', listaPalavras
return listaPalavras.split(':')
def paramString(num):
'''
path do local onde estao os arquivos
'''
return sys.argv[num + 1]
def paramTipoMatch(num):
'''
'''
retorno = 1
try:
retorno = int(sys.argv[num + 1])
except:
print 'erro -> o parametro (-m) não é um inteiro:', sys.argv[num + 1]
exit(1)
return retorno
parametros = {'-i': paramString,
'-p': paramPalavras,
'-m': paramTipoMatch
}
def pesquisarNoArquivo(path, file, listapalavras, matchminimo):
arq = open(path + '/' + file, 'r')
achou = []
conteudo = arq.read().upper()
arq.close()
for palavra in listapalavras:
if conteudo.find(palavra.upper()) > 0:
achou.append(palavra)
arq.close()
if len(achou) >= matchminimo:
print 'arquivo: ', achou , arq.name
def verificarValidarParametros():
#assegura que "-m" (quantidade minima de match) eh ao menos igual a quantidade de palavras
if parametros['-m'] > len(parametros['-p']):
parametros['-m'] = len(parametros['-p'])
#verifica se "-i" (caminho de entrada) eh valido
if not os.path.isdir(parametros['-i']):
print 'erro -> caminho informado nao existe: ', parametros['-i']
exit()
if __name__ == '__main__':
if '-h' in sys.argv:
print __doc__
exit(0)
for num, arg in enumerate(sys.argv):
if parametros.keys().__contains__(arg):
retorno = parametros[arg](num)
parametros[arg] = retorno
verificarValidarParametros()
# mostrar parametros que serão usados
for param in parametros.keys():
print param, ' : ', parametros[param]
palavras = parametros['-p']
origem = parametros['-i']
matchminimo = parametros['-m']
#print parametros
print '-------------'
# if len(sys.argv) == 1:
# print 'Informar caminho para pesquisa'
# exit(1)
#
# else:
# origem = sys.argv[1]
#exit()
for raiz, subpastas, arquivos in os.walk(origem):
#print 'raiz:', raiz
#print 'subpastas: ', subpastas
for arquivo in arquivos:
#print 'arquivo: ', raiz, arquivo
pesquisarNoArquivo(raiz, arquivo, palavras, matchminimo)
|
bguillot/OpenUpgrade
|
refs/heads/master
|
setup/win32/OpenERPServerService.py
|
105
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import win32serviceutil
import win32service
import win32api
import win32process
import servicemanager
import sys
import subprocess
import os
try:
import meta
except ImportError:
if hasattr(sys, 'frozen'):
raise
from setup import generate_files
generate_files()
import meta # noqa
class OpenERPServerService(win32serviceutil.ServiceFramework):
# required info
_svc_name_ = meta.nt_service_name
_svc_display_name_ = "%s %s" % (meta.description, meta.serie)
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
# a reference to the server's process
self.terpprocess = None
def SvcStop(self):
# Before we do anything, tell the SCM we are starting the stop process.
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
# stop the running OpenERP Server: say it's a normal exit
win32api.TerminateProcess(int(self.terpprocess._handle), 0)
servicemanager.LogInfoMsg("OpenERP Server stopped correctly")
def StartTERP(self):
# The server finds now its configuration automatically on Windows
# We start the ERP Server as an independent process, but we keep its handle
# The server's binary must be one directory above the service's binary (when py2exe'd the python libraries shouldn' mix)
service_dir = os.path.dirname(sys.argv[0])
server_dir = os.path.split(service_dir)[0]
server_path = os.path.join(server_dir, 'server', 'openerp-server.exe')
self.terpprocess = subprocess.Popen([server_path], cwd=server_dir, creationflags=win32process.CREATE_NO_WINDOW)
def SvcDoRun(self):
self.StartTERP()
servicemanager.LogInfoMsg("OpenERP Server up and running")
# exit with same exit code as OpenERP process
sys.exit(self.terpprocess.wait())
def option_handler(opts):
# configure the service to auto restart on failures...
subprocess.call(['sc', 'failure', meta.nt_service_name, 'reset=', '0', 'actions=', 'restart/0/restart/0/restart/0'])
if __name__ == '__main__':
# Do with the service whatever option is passed in the command line
win32serviceutil.HandleCommandLine(OpenERPServerService, customOptionHandler=option_handler)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
radlws/AWS-ElasticBeanstalk-CLI
|
refs/heads/master
|
eb/linux/python2.7/scli/prompt.py
|
8
|
#!/usr/bin/env python
# ==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
import sys as _sys
from lib.utility import misc
from scli.constants import OutputLevel
_EMPTY_PROMPT = ''
_STAR_PROMPT = '* '
_DASH_PROMPT = '--'
_EXCLA_PROMPT = '! '
_ARROW_PROMPT = '> '
class _OutputStream(object):
def __init__(self, stream=_sys.stdout):
self._out_stream = stream
def write(self, msg):
if self._out_stream is not None:
self._out_stream.write('{0}\n'.format(msg))
self._out_stream.flush()
def set_stream(self, stream=_sys.stdout):
self._out_stream = stream
_std_out = _OutputStream(_sys.stdout)
_err_out = _OutputStream(_sys.stderr)
_null_out = _OutputStream(None)
_info = _std_out
_result = _std_out
_err = _err_out
_current_level = OutputLevel.Info
def _output(stream, prompt_string, message):
stream.write(prompt_string + message)
def get_level():
return _current_level
def set_level(level):
global _current_level
global _std_out, _err_out, _null_out
global _info, _result, _err
_current_level = level
if level == OutputLevel.Info:
_info = _result = _std_out
_err = _err_out
elif level == OutputLevel.ResultOnly:
_info = _null_out
_result = _std_out
_err = _err_out
elif level == OutputLevel.Quiet:
_info = _result = _null_out
_err = _err_out
elif level == OutputLevel.Silence:
_info = _result = _err = _null_out
def plain(message):
global _result
_output(_result, _EMPTY_PROMPT, message)
def action(message):
global _info
_output(_info, _EMPTY_PROMPT, message)
def info(message):
global _info
_output(_info, _EMPTY_PROMPT, message)
def result(message):
global _result
_output(_result, _EMPTY_PROMPT, message)
def error(message):
global _err
_output(_err, _EMPTY_PROMPT, message)
|
dyyi/moneybook
|
refs/heads/master
|
venv/Lib/site-packages/pip/_vendor/packaging/version.py
|
1151
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
|
bambuste/qgis-vfk-plugin
|
refs/heads/master
|
budovySearchForm.py
|
2
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
vfkPluginDialog
A QGIS plugin
Plugin umoznujici praci s daty katastru nemovitosti
-------------------
begin : 2015-06-11
git sha : $Format:%H$
copyright : (C) 2015 by Stepan Bambula
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtGui import *
from PyQt4.QtCore import QAbstractItemModel
from ui_budovysearchform import *
class BudovySearchForm(QWidget):
def __init__(self, parent=None):
super(BudovySearchForm, self).__init__(parent)
# Set up the user interface from Designer.
self.ui = Ui_BudovySearchForm()
self.ui.setupUi(self)
self.__mZpusobVyuzitiModel = QAbstractItemModel
def domovniCislo(self):
return unicode(self.ui.cisloDomovniLineEdit.text()).strip()
def naParcele(self):
return unicode(self.ui.naParceleLineEdit.text()).strip()
def lv(self):
return unicode(self.ui.lvBudovyLineEdit.text()).strip()
def setZpusobVyuzitiModel(self, model):
"""
:param model: QAbstractItemModel
"""
self.__mZpusobVyuzitiModel = model
self.ui.mZpVyuzitiCombo.setModel(model)
self.ui.mZpVyuzitiCombo.setModelColumn(1)
def zpusobVyuzitiKod(self):
row = self.ui.mZpVyuzitiCombo.currentIndex()
index = self.ui.mZpVyuzitiCombo.model().index(row, 1)
if self.ui.mZpVyuzitiCombo.model().data(index) == u"libovolný":
return u''
else:
return u"{}".format(self.ui.mZpVyuzitiCombo.model().data(index))
|
HurtowniaPixeli/pixelcms-server
|
refs/heads/master
|
cms/accounts/views.py
|
1
|
from django.contrib.auth import get_user_model, authenticate
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.core import signing
from django.shortcuts import Http404
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework import status, permissions
from rest_framework_jwt.settings import api_settings
from social_django.utils import load_strategy, load_backend
from social_core.actions import do_auth
from social_core.exceptions import MissingBackend
from rest_social_auth.views import SocialJWTUserAuthView
from .serializers import (
LoginSerializer, RegisterSerializer, ActivateSerializer,
ResendActivationMessageSerializer, SendResetPasswordMessageSerializer,
ResetPasswordSerializer, ChangePasswordSerializer, ChangeEmailSerializer,
ChangeEmailConfirmationSerializer
)
from . import utils as accounts_utils
@api_view(['POST'])
def login(request):
serializer = LoginSerializer(data=request.data)
if serializer.is_valid(raise_exception=False):
# get username
username_or_email = serializer.data['username_or_email']
if '@' in username_or_email:
try:
username = get_user_model().objects \
.get(email=username_or_email, social_auth__isnull=True) \
.username
except get_user_model().DoesNotExist:
username = username_or_email
else:
username = username_or_email
# try credentials
user = authenticate(
username=username,
password=serializer.data['password']
)
if user is not None:
# success
payload = api_settings.JWT_PAYLOAD_HANDLER(user)
token = api_settings.JWT_ENCODE_HANDLER(payload)
response_payload = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER(
token, user
)
return Response(
status=status.HTTP_200_OK,
data={
'authInfo': response_payload,
'msg': _('You have been logged in.')
}
)
return Response(
status=status.HTTP_403_FORBIDDEN,
data={'_error': _('Wrong username or password.')}
)
@api_view(['POST'])
def social_login_begin(request, backend):
strategy = load_strategy(request)
try:
redirect_uri = (
settings.FRONTEND_ADDRESS +
'/accounts/social-auth/' +
backend +
'/'
)
backend = load_backend(
strategy=strategy,
name=backend,
redirect_uri=redirect_uri
)
except MissingBackend:
return Response(status=status.HTTP_400_BAD_REQUEST)
auth = do_auth(backend)
if auth:
return Response({'url': auth.url})
else:
return Response(status=status.HTTP_500_SERVER_ERROR)
class SocialView(SocialJWTUserAuthView):
def respond_error(self, error):
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={'_error': str(error)}
)
def post(self, request, *args, **kwargs):
res = super(SocialView, self).post(request, *args, **kwargs)
if res.status_code != 200:
return res
token = res.data['token']
user = get_user_model().objects.get(pk=res.data['id'])
response_payload = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER(
token, user
)
return Response(
status=status.HTTP_200_OK,
data={
'authInfo': response_payload,
'msg': _('You have been logged in.')
}
)
@api_view(['POST'])
def register(request):
serializer = RegisterSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
user = get_user_model().objects.create_user(
username=serializer.data['username'],
email=serializer.data['email'],
password=serializer.data['password']
)
if settings.ACCOUNTS_ACTIVATION:
user.is_active = False
user.save()
accounts_utils.send_activation_message(user, request)
return Response(
status=status.HTTP_201_CREATED,
data={
'activation': True,
'msg': _('Your account has been created. Activation '
'message has been sent to provided email '
'address.')
}
)
else:
user = authenticate(
username=serializer.data['username'],
password=serializer.data['password']
)
return Response(
status=status.HTTP_201_CREATED,
data={
'msg': _('You account has been created. You can log in.')
}
)
@api_view(['POST'])
def activate(request):
serializer = ActivateSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
invalid_key_response = Response(
status=status.HTTP_400_BAD_REQUEST,
data={'_error': _('Activation key is invalid.')}
)
try:
key_data = signing.loads(serializer.data['key'], max_age=60*15)
if key_data.get('action') != 'ACTIVATE':
return invalid_key_response
try:
user = get_user_model().objects.get(
pk=key_data['user'],
is_active=False,
last_login=None,
social_auth__isnull=True
)
if not user.is_active:
user.is_active = True
user.save()
return Response(
status=status.HTTP_201_CREATED,
data={'msg': _('Your account is now active. You can log '
'in.')}
)
except get_user_model().DoesNotExist:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={'msg': _('Activation key is not associated with any '
'account that needs activation.')}
)
except signing.SignatureExpired:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
'expired': True,
'_error': _('Activation key has expired.')
}
)
except signing.BadSignature:
return invalid_key_response
@api_view(['POST'])
def resend_activation_message(request):
serializer = ResendActivationMessageSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
try:
user = get_user_model().objects.get(
email=serializer.data['email'],
is_active=False,
last_login=None,
social_auth__isnull=True
)
accounts_utils.send_activation_message(user, request)
return Response(
status=status.HTTP_201_CREATED,
data={
'msg': _('Activation message has been sent to provided '
'email address.')
}
)
except get_user_model().DoesNotExist:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={'_error': _('Provided email is not associated with any '
'account that needs to be activated.')}
)
@api_view(['POST'])
def send_reset_password_message(request):
serializer = SendResetPasswordMessageSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
try:
user = get_user_model().objects.get(
email=serializer.data['email'],
social_auth__isnull=True
)
accounts_utils.send_reset_password_message(user, request)
return Response(
status=status.HTTP_201_CREATED,
data={
'msg': _('Message with instructions how to change your '
'password has been sent to provided email '
'address.')
}
)
except get_user_model().DoesNotExist:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={'_error': _('Provided email is not associated with any '
'account.')}
)
@api_view(['POST'])
def reset_password(request):
serializer = ResetPasswordSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
invalid_key_response = Response(
status=status.HTTP_400_BAD_REQUEST,
data={
'keyError': True,
'_error': _('Change password key is invalid.')
}
)
try:
key_data = signing.loads(serializer.data['key'], max_age=60*15)
if key_data.get('action') != 'RESET_PASSWORD':
return invalid_key_response
try:
user = get_user_model().objects.get(
pk=key_data['user'],
social_auth__isnull=True
)
user.set_password(serializer.data['password'])
user.save()
return Response(
status=status.HTTP_201_CREATED,
data={'msg': _('Your password has been changed. You can '
'log in.')}
)
except get_user_model().DoesNotExist:
return invalid_key_response
except signing.SignatureExpired:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
'keyError': True,
'_error': _('Change password key has expired.')
}
)
except signing.BadSignature:
return invalid_key_response
@api_view(['POST'])
@permission_classes((permissions.IsAuthenticated,))
def change_password(request):
if request.user.social_auth.exists():
raise Http404
serializer = ChangePasswordSerializer(
data=request.data,
context={'request': request}
)
if serializer.is_valid(raise_exception=True):
request.user.set_password(serializer.data['new_password'])
request.user.save()
return Response(
status=status.HTTP_200_OK,
data={'msg': _('Your password has been changed.')}
)
@api_view(['POST'])
@permission_classes((permissions.IsAuthenticated,))
def change_email(request):
if request.user.social_auth.exists():
raise Http404
serializer = ChangeEmailSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
if settings.ACCOUNTS_ACTIVATION:
accounts_utils.send_change_email_confirmation_message(
new_email=serializer.data['new_email'],
request=request
)
return Response(
status=status.HTTP_200_OK,
data={
'msg': _('Confirmation messages has been sent to provided '
'email address.')
}
)
else:
request.user.email = serializer.data['new_email']
request.user.save()
return Response(
status=status.HTTP_200_OK,
data={'msg': _('Your email has been changed.')}
)
@api_view(['POST'])
def change_email_confirmation(request):
serializer = ChangeEmailConfirmationSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
invalid_key_response = Response(
status=status.HTTP_400_BAD_REQUEST,
data={
'keyError': True,
'_error': _('Confirmation key is invalid.')
}
)
try:
key_data = signing.loads(serializer.data['key'], max_age=60*15)
if key_data.get('action') != 'CHANGE_EMAIL':
return invalid_key_response
try:
email_exists = get_user_model().objects \
.filter(email=key_data['new_email']).exists()
if email_exists:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
'_error': _('Provided email address is already '
'taken.')
}
)
user = get_user_model().objects.get(
pk=key_data['user'],
social_auth__isnull=True
)
user.email = key_data['new_email']
user.save()
return Response(
status=status.HTTP_200_OK,
data={'msg': _('Your email has been changed.')}
)
except get_user_model().DoesNotExist:
return invalid_key_response
except signing.SignatureExpired:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
'keyError': True,
'_error': _('Confirmation key has expired.')
}
)
except signing.BadSignature:
return invalid_key_response
|
romankagan/DDBWorkbench
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/gis/gdal/geomtype.py
|
404
|
from django.contrib.gis.gdal.error import OGRException
#### OGRGeomType ####
class OGRGeomType(object):
"Encapulates OGR Geometry Types."
wkb25bit = -2147483648
# Dictionary of acceptable OGRwkbGeometryType s and their string names.
_types = {0 : 'Unknown',
1 : 'Point',
2 : 'LineString',
3 : 'Polygon',
4 : 'MultiPoint',
5 : 'MultiLineString',
6 : 'MultiPolygon',
7 : 'GeometryCollection',
100 : 'None',
101 : 'LinearRing',
1 + wkb25bit: 'Point25D',
2 + wkb25bit: 'LineString25D',
3 + wkb25bit: 'Polygon25D',
4 + wkb25bit: 'MultiPoint25D',
5 + wkb25bit : 'MultiLineString25D',
6 + wkb25bit : 'MultiPolygon25D',
7 + wkb25bit : 'GeometryCollection25D',
}
# Reverse type dictionary, keyed by lower-case of the name.
_str_types = dict([(v.lower(), k) for k, v in _types.items()])
def __init__(self, type_input):
"Figures out the correct OGR Type based upon the input."
if isinstance(type_input, OGRGeomType):
num = type_input.num
elif isinstance(type_input, basestring):
type_input = type_input.lower()
if type_input == 'geometry': type_input='unknown'
num = self._str_types.get(type_input, None)
if num is None:
raise OGRException('Invalid OGR String Type "%s"' % type_input)
elif isinstance(type_input, int):
if not type_input in self._types:
raise OGRException('Invalid OGR Integer Type: %d' % type_input)
num = type_input
else:
raise TypeError('Invalid OGR input type given.')
# Setting the OGR geometry type number.
self.num = num
def __str__(self):
"Returns the value of the name property."
return self.name
def __eq__(self, other):
"""
Does an equivalence test on the OGR type with the given
other OGRGeomType, the short-hand string, or the integer.
"""
if isinstance(other, OGRGeomType):
return self.num == other.num
elif isinstance(other, basestring):
return self.name.lower() == other.lower()
elif isinstance(other, int):
return self.num == other
else:
return False
def __ne__(self, other):
return not (self == other)
@property
def name(self):
"Returns a short-hand string form of the OGR Geometry type."
return self._types[self.num]
@property
def django(self):
"Returns the Django GeometryField for this OGR Type."
s = self.name.replace('25D', '')
if s in ('LinearRing', 'None'):
return None
elif s == 'Unknown':
s = 'Geometry'
return s + 'Field'
|
Inspq/ansible
|
refs/heads/inspq
|
lib/ansible/modules/storage/infinidat/infini_fs.py
|
69
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Gregory Shulov ([email protected])
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_fs
version_added: 2.3
short_description: Create, Delete or Modify filesystems on Infinibox
description:
- This module creates, deletes or modifies filesystems on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- File system name.
required: true
state:
description:
- Creates/Modifies file system when present or removes when absent.
required: false
default: present
choices: [ "present", "absent" ]
size:
description:
- File system size in MB, GB or TB units. See examples.
required: false
pool:
description:
- Pool that will host file system.
required: true
extends_documentation_fragment:
- infinibox
'''
EXAMPLES = '''
- name: Create new file system named foo under pool named bar
infini_fs:
name: foo
size: 1TB
pool: bar
state: present
user: admin
password: secret
system: ibox001
'''
RETURN = '''
'''
HAS_INFINISDK = True
try:
from infinisdk import InfiniBox, core
except ImportError:
HAS_INFINISDK = False
from ansible.module_utils.infinibox import *
from capacity import KiB, Capacity
@api_wrapper
def get_pool(module, system):
"""Return Pool or None"""
try:
return system.pools.get(name=module.params['pool'])
except:
return None
@api_wrapper
def get_filesystem(module, system):
"""Return Filesystem or None"""
try:
return system.filesystems.get(name=module.params['name'])
except:
return None
@api_wrapper
def create_filesystem(module, system):
"""Create Filesystem"""
if not module.check_mode:
filesystem = system.filesystems.create(name=module.params['name'], pool=get_pool(module, system))
if module.params['size']:
size = Capacity(module.params['size']).roundup(64 * KiB)
filesystem.update_size(size)
module.exit_json(changed=True)
@api_wrapper
def update_filesystem(module, filesystem):
"""Update Filesystem"""
changed = False
if module.params['size']:
size = Capacity(module.params['size']).roundup(64 * KiB)
if filesystem.get_size() != size:
if not module.check_mode:
filesystem.update_size(size)
changed = True
module.exit_json(changed=changed)
@api_wrapper
def delete_filesystem(module, filesystem):
""" Delete Filesystem"""
if not module.check_mode:
filesystem.delete()
module.exit_json(changed=True)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name = dict(required=True),
state = dict(default='present', choices=['present', 'absent']),
pool = dict(required=True),
size = dict()
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg='infinisdk is required for this module')
if module.params['size']:
try:
Capacity(module.params['size'])
except:
module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
state = module.params['state']
system = get_system(module)
pool = get_pool(module, system)
filesystem = get_filesystem(module, system)
if pool is None:
module.fail_json(msg='Pool {} not found'.format(module.params['pool']))
if state == 'present' and not filesystem:
create_filesystem(module, system)
elif state == 'present' and filesystem:
update_filesystem(module, filesystem)
elif state == 'absent' and filesystem:
delete_filesystem(module, filesystem)
elif state == 'absent' and not filesystem:
module.exit_json(changed=False)
# Import Ansible Utilities
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
msiebuhr/v8.go
|
refs/heads/master
|
v8/build/gyp/test/hello/gyptest-regyp.py
|
268
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that Makefiles get rebuilt when a source gyp file changes.
"""
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('hello.gyp')
test.build('hello.gyp', test.ALL)
test.run_built_executable('hello', stdout="Hello, world!\n")
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('hello.gyp', test.read('hello2.gyp'))
test.build('hello.gyp', test.ALL)
test.run_built_executable('hello', stdout="Hello, two!\n")
test.pass_test()
|
cloudera/hue
|
refs/heads/master
|
desktop/core/ext-py/boto-2.46.1/tests/unit/s3/test_bucketlistresultset.py
|
22
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from mock import patch, Mock
import unittest
from boto.s3.bucket import ResultSet
from boto.s3.bucketlistresultset import multipart_upload_lister
from boto.s3.bucketlistresultset import versioned_bucket_lister
class S3BucketListResultSetTest (unittest.TestCase):
def _test_patched_lister_encoding(self, inner_method, outer_method):
bucket = Mock()
call_args = []
first = ResultSet()
first.append('foo')
first.next_key_marker = 'a+b'
first.is_truncated = True
second = ResultSet()
second.append('bar')
second.is_truncated = False
pages = [first, second]
def return_pages(**kwargs):
call_args.append(kwargs)
return pages.pop(0)
setattr(bucket, inner_method, return_pages)
results = list(outer_method(bucket, encoding_type='url'))
self.assertEqual(['foo', 'bar'], results)
self.assertEqual('a b', call_args[1]['key_marker'])
def test_list_object_versions_with_url_encoding(self):
self._test_patched_lister_encoding(
'get_all_versions', versioned_bucket_lister)
def test_list_multipart_upload_with_url_encoding(self):
self._test_patched_lister_encoding(
'get_all_multipart_uploads', multipart_upload_lister)
|
PythoO/Contest
|
refs/heads/master
|
models.py
|
1
|
__author__ = 'pythoo'
from app import db
|
lennox/score_linux
|
refs/heads/master
|
tools/perf/util/setup.py
|
4998
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
Nictec/nictec_website2.0
|
refs/heads/master
|
nictecsite/page/apps.py
|
5
|
from __future__ import unicode_literals
from django.apps import AppConfig
class PageConfig(AppConfig):
name = 'page'
|
CJ8664/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/wptserve/wptserve/stash.py
|
125
|
import base64
import json
import os
import uuid
from multiprocessing.managers import BaseManager, DictProxy
class ServerDictManager(BaseManager):
shared_data = {}
def _get_shared():
return ServerDictManager.shared_data
ServerDictManager.register("get_dict",
callable=_get_shared,
proxytype=DictProxy)
class ClientDictManager(BaseManager):
pass
ClientDictManager.register("get_dict")
class StashServer(object):
def __init__(self, address=None, authkey=None):
self.address = address
self.authkey = authkey
self.manager = None
def __enter__(self):
self.manager, self.address, self.authkey = start_server(self.address, self.authkey)
store_env_config(self.address, self.authkey)
def __exit__(self, *args, **kwargs):
if self.manager is not None:
self.manager.shutdown()
def load_env_config():
address, authkey = json.loads(os.environ["WPT_STASH_CONFIG"])
if isinstance(address, list):
address = tuple(address)
else:
address = str(address)
authkey = base64.decodestring(authkey)
return address, authkey
def store_env_config(address, authkey):
authkey = base64.encodestring(authkey)
os.environ["WPT_STASH_CONFIG"] = json.dumps((address, authkey))
def start_server(address=None, authkey=None):
manager = ServerDictManager(address, authkey)
manager.start()
return (manager, manager._address, manager._authkey)
#TODO: Consider expiring values after some fixed time for long-running
#servers
class Stash(object):
"""Key-value store for persisting data across HTTP/S and WS/S requests.
This data store is specifically designed for persisting data across server
requests. The synchronization is achieved by using the BaseManager from
the multiprocessing module so different processes can acccess the same data.
Stash can be used interchangeably between HTTP, HTTPS, WS and WSS servers.
A thing to note about WS/S servers is that they require additional steps in
the handlers for accessing the same underlying shared data in the Stash.
This can usually be achieved by using load_env_config(). When using Stash
interchangeably between HTTP/S and WS/S request, the path part of the key
should be expliclitly specified if accessing the same key/value subset.
The store has several unusual properties. Keys are of the form (path,
uuid), where path is, by default, the path in the HTTP request and
uuid is a unique id. In addition, the store is write-once, read-once,
i.e. the value associated with a particular key cannot be changed once
written and the read operation (called "take") is destructive. Taken together,
these properties make it difficult for data to accidentally leak
between different resources or different requests for the same
resource.
"""
_proxy = None
def __init__(self, default_path, address=None, authkey=None):
self.default_path = default_path
self.data = self._get_proxy(address, authkey)
def _get_proxy(self, address=None, authkey=None):
if address is None and authkey is None:
Stash._proxy = {}
if Stash._proxy is None:
manager = ClientDictManager(address, authkey)
manager.connect()
Stash._proxy = manager.get_dict()
return Stash._proxy
def _wrap_key(self, key, path):
if path is None:
path = self.default_path
# This key format is required to support using the path. Since the data
# passed into the stash can be a DictProxy which wouldn't detect changes
# when writing to a subdict.
return (str(path), str(uuid.UUID(key)))
def put(self, key, value, path=None):
"""Place a value in the shared stash.
:param key: A UUID to use as the data's key.
:param value: The data to store. This can be any python object.
:param path: The path that has access to read the data (by default
the current request path)"""
if value is None:
raise ValueError("SharedStash value may not be set to None")
internal_key = self._wrap_key(key, path)
if internal_key in self.data:
raise StashError("Tried to overwrite existing shared stash value "
"for key %s (old value was %s, new value is %s)" %
(internal_key, self.data[str(internal_key)], value))
else:
self.data[internal_key] = value
def take(self, key, path=None):
"""Remove a value from the shared stash and return it.
:param key: A UUID to use as the data's key.
:param path: The path that has access to read the data (by default
the current request path)"""
internal_key = self._wrap_key(key, path)
value = self.data.get(internal_key, None)
if value is not None:
try:
self.data.pop(internal_key)
except KeyError:
# Silently continue when pop error occurs.
pass
return value
class StashError(Exception):
pass
|
npo-poms/scripts
|
refs/heads/master
|
python/netinnederlandAddNTRLocations.py
|
1
|
#!/usr/bin/env python3
""" """
"""Script to add a location """
from npoapi import MediaBackend, MediaBackendUtil as MU
import requests
import pickle
import os.path
import time
api = MediaBackend().command_line_client()
api.add_argument('mid', type=str, nargs=1, help='The mid of the object to handle')
args = api.parse_args()
filename = "/tmp/members.pkl"
if os.path.isfile(filename):
with open(filename, 'rb') as input:
members = pickle.load(input)
else:
members = []
MU.descendants(api, args.mid[0], batch=200, target=members, log_progress=True)
with open(filename, 'wb') as output:
pickle.dump(members, output, pickle.HIGHEST_PROTOCOL)
api.logger.info("Wrote %s", filename)
count_new = 0
count_done = 0
count_404 = 0
for member in MU.iterate_objects(members):
print("%s %s %s " % (member.mid, member.locations.location[0].programUrl, str(list(member.crid))), end="")
has_mp4 = False
if len(member.locations.location) >= 1:
for location in member.locations.location:
if location.avAttributes.avFileFormat == 'MP4' and not location.avAttributes.bitrate is None:
has_mp4 = True
for location in member.locations.location:
if location.avAttributes.avFileFormat == 'HASP':
programUrl = location.programUrl
publish_start = location.publishStart
publish_stop = location.publishStop
last_part = programUrl.split('/')[-1]
new_program_url = 'http://video.omroep.nl/ntr/schooltv/beeldbank/video/' + last_part + ".mp4"
resp = requests.head(new_program_url)
new_location = MU.create_location(new_program_url, embargo={'publish_start':publish_start, 'publish_stop':publish_stop}, avFileFormat='MP4', bitrate=1000000)
print("%s %s " % (new_program_url, resp.status_code), end="")
if not has_mp4:
if resp.status_code == 302:
print(api.add_location(member.mid, new_location))
count_new += 1
time.sleep(1)
else:
print("404 not doing")
count_404 += 1
else:
print("done already")
count_done += 1
print("new locations: %s, not added because 404: %s, already had mp4: %s" % (str(count_new), str(count_404), str(count_done)))
|
facebookexperimental/eden
|
refs/heads/master
|
eden/hg-server/tests/revlog-formatv0.py
|
2
|
#!/usr/bin/env python
# Copyright 2010 Intevation GmbH
# Author(s):
# Thomas Arendsen Hein <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Create a Mercurial repository in revlog format 0
changeset: 0:a1ef0b125355
user: user
date: Thu Jan 01 00:00:00 1970 +0000
files: empty
description:
empty file
"""
from __future__ import absolute_import
import os
import sys
# PY3-compat
if sys.version_info[0] >= 3:
fromhex = bytes.fromhex
else:
fromhex = lambda x: x.decode("hex")
files = [
(
"formatv0/.hg/00changelog.i",
"000000000000004400000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000"
"0000a1ef0b125355d27765928be600cfe85784284ab3",
),
(
"formatv0/.hg/00changelog.d",
"756163613935613961356635353036303562366138343738336237"
"61623536363738616436356635380a757365720a3020300a656d70"
"74790a0a656d7074792066696c65",
),
(
"formatv0/.hg/00manifest.i",
"000000000000003000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000"
"0000aca95a9a5f550605b6a84783b7ab56678ad65f58",
),
(
"formatv0/.hg/00manifest.d",
"75656d707479006238306465356431333837353835343163356630"
"35323635616431343461623966613836643164620a",
),
(
"formatv0/.hg/data/empty.i",
"000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000"
"0000b80de5d138758541c5f05265ad144ab9fa86d1db",
),
("formatv0/.hg/data/empty.d", ""),
]
def makedirs(name):
"""recursive directory creation"""
parent = os.path.dirname(name)
if parent:
makedirs(parent)
os.mkdir(name)
makedirs(os.path.join(*"formatv0/.hg/data".split("/")))
for name, data in files:
f = open(name, "wb")
f.write(fromhex(data))
f.close()
sys.exit(0)
|
gonboy/sl4a
|
refs/heads/master
|
python/src/Lib/lib2to3/fixes/fix_filter.py
|
53
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes filter(F, X) into list(filter(F, X)).
We avoid the transformation if the filter() call is directly contained
in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
for V in <>:.
NOTE: This is still not correct if the original code was depending on
filter(F, X) to return a string if X is a string and a tuple if X is a
tuple. That would require type inference, which we don't do. Let
Python 2.6 figure it out.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
class FixFilter(fixer_base.ConditionalFix):
PATTERN = """
filter_lambda=power<
'filter'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'filter'
trailer< '(' arglist< none='None' ',' seq=any > ')' >
>
|
power<
'filter'
args=trailer< '(' [any] ')' >
>
"""
skip_on = "future_builtins.filter"
def transform(self, node, results):
if self.should_skip(node):
return
if "filter_lambda" in results:
new = ListComp(results.get("fp").clone(),
results.get("fp").clone(),
results.get("it").clone(),
results.get("xp").clone())
elif "none" in results:
new = ListComp(Name("_f"),
Name("_f"),
results["seq"].clone(),
Name("_f"))
else:
if in_special_context(node):
return None
new = node.clone()
new.set_prefix("")
new = Call(Name("list"), [new])
new.set_prefix(node.get_prefix())
return new
|
gizeminci/espresso-1
|
refs/heads/master
|
samples/python/cellsystem_test.py
|
13
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import ctypes
import sys
sys.setdlopenflags((sys.getdlopenflags() | ctypes.RTLD_GLOBAL ))
import espresso as es
print(dir(es))
cs=es.cellsystem.Cellsystem()
gh=es.global_variables.GlobalsHandle()
# domain decomposition with verlet list: three equivalent commands
cs.setDomainDecomposition()
cs.setDomainDecomposition(True)
cs.setDomainDecomposition(useVerletList=True)
|
agrif/django-cannen
|
refs/heads/master
|
cannen/tests.py
|
1
|
# This file is part of Cannen, a collaborative music player.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
|
Ronak6892/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_sanitizer.py
|
430
|
from __future__ import absolute_import, division, unicode_literals
try:
import json
except ImportError:
import simplejson as json
from html5lib import html5parser, sanitizer, constants, treebuilders
def toxmlFactory():
tree = treebuilders.getTreeBuilder("etree")
def toxml(element):
# encode/decode roundtrip required for Python 2.6 compatibility
result_bytes = tree.implementation.tostring(element, encoding="utf-8")
return result_bytes.decode("utf-8")
return toxml
def runSanitizerTest(name, expected, input, toxml=None):
if toxml is None:
toxml = toxmlFactory()
expected = ''.join([toxml(token) for token in html5parser.HTMLParser().
parseFragment(expected)])
expected = json.loads(json.dumps(expected))
assert expected == sanitize_html(input)
def sanitize_html(stream, toxml=None):
if toxml is None:
toxml = toxmlFactory()
return ''.join([toxml(token) for token in
html5parser.HTMLParser(tokenizer=sanitizer.HTMLSanitizer).
parseFragment(stream)])
def test_should_handle_astral_plane_characters():
assert '<html:p xmlns:html="http://www.w3.org/1999/xhtml">\U0001d4b5 \U0001d538</html:p>' == sanitize_html("<p>𝒵 𝔸</p>")
def test_sanitizer():
toxml = toxmlFactory()
for tag_name in sanitizer.HTMLSanitizer.allowed_elements:
if tag_name in ['caption', 'col', 'colgroup', 'optgroup', 'option', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'tr']:
continue # TODO
if tag_name != tag_name.lower():
continue # TODO
if tag_name == 'image':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<img title=\"1\"/>foo <bad>bar</bad> baz",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
elif tag_name == 'br':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<br title=\"1\"/>foo <bad>bar</bad> baz<br/>",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
elif tag_name in constants.voidElements:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\"/>foo <bad>bar</bad> baz" % tag_name,
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
else:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\">foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
for tag_name in sanitizer.HTMLSanitizer.allowed_elements:
tag_name = tag_name.upper()
yield (runSanitizerTest, "test_should_forbid_%s_tag" % tag_name,
"<%s title=\"1\">foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
for attribute_name in sanitizer.HTMLSanitizer.allowed_attributes:
if attribute_name != attribute_name.lower():
continue # TODO
if attribute_name == 'style':
continue
yield (runSanitizerTest, "test_should_allow_%s_attribute" % attribute_name,
"<p %s=\"foo\">foo <bad>bar</bad> baz</p>" % attribute_name,
"<p %s='foo'>foo <bad>bar</bad> baz</p>" % attribute_name,
toxml)
for attribute_name in sanitizer.HTMLSanitizer.allowed_attributes:
attribute_name = attribute_name.upper()
yield (runSanitizerTest, "test_should_forbid_%s_attribute" % attribute_name,
"<p>foo <bad>bar</bad> baz</p>",
"<p %s='display: none;'>foo <bad>bar</bad> baz</p>" % attribute_name,
toxml)
for protocol in sanitizer.HTMLSanitizer.allowed_protocols:
yield (runSanitizerTest, "test_should_allow_%s_uris" % protocol,
"<a href=\"%s\">foo</a>" % protocol,
"""<a href="%s">foo</a>""" % protocol,
toxml)
for protocol in sanitizer.HTMLSanitizer.allowed_protocols:
yield (runSanitizerTest, "test_should_allow_uppercase_%s_uris" % protocol,
"<a href=\"%s\">foo</a>" % protocol,
"""<a href="%s">foo</a>""" % protocol,
toxml)
|
Code4SA/nearby
|
refs/heads/master
|
manage.py
|
1
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nearby.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
christophlsa/odoo
|
refs/heads/8.0
|
addons/edi/__openerp__.py
|
312
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Electronic Data Interchange (EDI)',
'version': '1.0',
'category': 'Tools',
'description': """
Provides a common EDI platform that other Applications can use.
===============================================================
OpenERP specifies a generic EDI format for exchanging business documents between
different systems, and provides generic mechanisms to import and export them.
More details about OpenERP's EDI format may be found in the technical OpenERP
documentation at http://doc.openerp.com.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/api',
'depends': ['base', 'email_template'],
'data' : [
'views/edi.xml',
],
'icon': '/edi/static/src/img/knowledge.png',
'test': ['test/edi_partner_test.yml'],
'qweb': ['static/src/xml/*.xml'],
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kvar/ansible
|
refs/heads/seas_master_2.9.5
|
lib/ansible/modules/cloud/cloudstack/cs_iso.py
|
11
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_iso
short_description: Manages ISO images on Apache CloudStack based clouds.
description:
- Register and remove ISO images.
version_added: '2.0'
author: René Moser (@resmo)
options:
name:
description:
- Name of the ISO.
type: str
required: true
display_text:
description:
- Display text of the ISO.
- If not specified, I(name) will be used.
type: str
version_added: '2.4'
url:
description:
- URL where the ISO can be downloaded from. Required if I(state) is present.
type: str
os_type:
description:
- Name of the OS that best represents the OS of this ISO. If the iso is bootable this parameter needs to be passed. Required if I(state) is present.
type: str
is_ready:
description:
- This flag is used for searching existing ISOs. If set to C(yes), it will only list ISO ready for deployment e.g.
successfully downloaded and installed. Recommended to set it to C(no).
type: bool
default: no
is_public:
description:
- Register the ISO to be publicly available to all users. Only used if I(state) is present.
type: bool
is_featured:
description:
- Register the ISO to be featured. Only used if I(state) is present.
type: bool
is_dynamically_scalable:
description:
- Register the ISO having XS/VMware tools installed inorder to support dynamic scaling of VM cpu/memory. Only used if I(state) is present.
type: bool
checksum:
description:
- The MD5 checksum value of this ISO. If set, we search by checksum instead of name.
type: str
bootable:
description:
- Register the ISO to be bootable. Only used if I(state) is present.
type: bool
domain:
description:
- Domain the ISO is related to.
type: str
account:
description:
- Account the ISO is related to.
type: str
project:
description:
- Name of the project the ISO to be registered in.
type: str
zone:
description:
- Name of the zone you wish the ISO to be registered or deleted from.
- If not specified, first zone found will be used.
type: str
cross_zones:
description:
- Whether the ISO should be synced or removed across zones.
- Mutually exclusive with I(zone).
type: bool
default: no
version_added: '2.4'
iso_filter:
description:
- Name of the filter used to search for the ISO.
type: str
default: self
choices: [ featured, self, selfexecutable,sharedexecutable,executable, community ]
state:
description:
- State of the ISO.
type: str
default: present
choices: [ present, absent ]
poll_async:
description:
- Poll async jobs until job has finished.
type: bool
default: yes
version_added: '2.3'
tags:
description:
- List of tags. Tags are a list of dictionaries having keys I(key) and I(value).
- "To delete all tags, set a empty list e.g. I(tags: [])."
type: list
aliases: [ tag ]
version_added: '2.4'
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Register an ISO if ISO name does not already exist
cs_iso:
name: Debian 7 64-bit
url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
os_type: Debian GNU/Linux 7(64-bit)
delegate_to: localhost
- name: Register an ISO with given name if ISO md5 checksum does not already exist
cs_iso:
name: Debian 7 64-bit
url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
os_type: Debian GNU/Linux 7(64-bit)
checksum: 0b31bccccb048d20b551f70830bb7ad0
delegate_to: localhost
- name: Remove an ISO by name
cs_iso:
name: Debian 7 64-bit
state: absent
delegate_to: localhost
- name: Remove an ISO by checksum
cs_iso:
name: Debian 7 64-bit
checksum: 0b31bccccb048d20b551f70830bb7ad0
state: absent
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the ISO.
returned: success
type: str
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of the ISO.
returned: success
type: str
sample: Debian 7 64-bit
display_text:
description: Text to be displayed of the ISO.
returned: success
type: str
sample: Debian 7.7 64-bit minimal 2015-03-19
zone:
description: Name of zone the ISO is registered in.
returned: success
type: str
sample: zuerich
status:
description: Status of the ISO.
returned: success
type: str
sample: Successfully Installed
is_ready:
description: True if the ISO is ready to be deployed from.
returned: success
type: bool
sample: true
is_public:
description: True if the ISO is public.
returned: success
type: bool
sample: true
version_added: '2.4'
bootable:
description: True if the ISO is bootable.
returned: success
type: bool
sample: true
version_added: '2.4'
is_featured:
description: True if the ISO is featured.
returned: success
type: bool
sample: true
version_added: '2.4'
format:
description: Format of the ISO.
returned: success
type: str
sample: ISO
version_added: '2.4'
os_type:
description: Typo of the OS.
returned: success
type: str
sample: CentOS 6.5 (64-bit)
version_added: '2.4'
checksum:
description: MD5 checksum of the ISO.
returned: success
type: str
sample: 0b31bccccb048d20b551f70830bb7ad0
created:
description: Date of registering.
returned: success
type: str
sample: 2015-03-29T14:57:06+0200
cross_zones:
description: true if the ISO is managed across all zones, false otherwise.
returned: success
type: bool
sample: false
version_added: '2.4'
domain:
description: Domain the ISO is related to.
returned: success
type: str
sample: example domain
account:
description: Account the ISO is related to.
returned: success
type: str
sample: example account
project:
description: Project the ISO is related to.
returned: success
type: str
sample: example project
tags:
description: List of resource tags associated with the ISO.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
version_added: '2.4'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackIso(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackIso, self).__init__(module)
self.returns = {
'checksum': 'checksum',
'status': 'status',
'isready': 'is_ready',
'crossZones': 'cross_zones',
'format': 'format',
'ostypename': 'os_type',
'isfeatured': 'is_featured',
'bootable': 'bootable',
'ispublic': 'is_public',
}
self.iso = None
def _get_common_args(self):
return {
'name': self.module.params.get('name'),
'displaytext': self.get_or_fallback('display_text', 'name'),
'isdynamicallyscalable': self.module.params.get('is_dynamically_scalable'),
'ostypeid': self.get_os_type('id'),
'bootable': self.module.params.get('bootable'),
}
def register_iso(self):
args = self._get_common_args()
args.update({
'domainid': self.get_domain('id'),
'account': self.get_account('name'),
'projectid': self.get_project('id'),
'checksum': self.module.params.get('checksum'),
'isfeatured': self.module.params.get('is_featured'),
'ispublic': self.module.params.get('is_public'),
})
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
else:
args['zoneid'] = -1
if args['bootable'] and not args['ostypeid']:
self.module.fail_json(msg="OS type 'os_type' is required if 'bootable=true'.")
args['url'] = self.module.params.get('url')
if not args['url']:
self.module.fail_json(msg="URL is required.")
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('registerIso', **args)
self.iso = res['iso'][0]
return self.iso
def present_iso(self):
iso = self.get_iso()
if not iso:
iso = self.register_iso()
else:
iso = self.update_iso(iso)
if iso:
iso = self.ensure_tags(resource=iso, resource_type='ISO')
self.iso = iso
return iso
def update_iso(self, iso):
args = self._get_common_args()
args.update({
'id': iso['id'],
})
if self.has_changed(args, iso):
self.result['changed'] = True
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
else:
# Workaround API does not return cross_zones=true
self.result['cross_zones'] = True
args['zoneid'] = -1
if not self.module.check_mode:
res = self.query_api('updateIso', **args)
self.iso = res['iso']
return self.iso
def get_iso(self):
if not self.iso:
args = {
'isready': self.module.params.get('is_ready'),
'isofilter': self.module.params.get('iso_filter'),
'domainid': self.get_domain('id'),
'account': self.get_account('name'),
'projectid': self.get_project('id'),
}
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
# if checksum is set, we only look on that.
checksum = self.module.params.get('checksum')
if not checksum:
args['name'] = self.module.params.get('name')
isos = self.query_api('listIsos', **args)
if isos:
if not checksum:
self.iso = isos['iso'][0]
else:
for i in isos['iso']:
if i['checksum'] == checksum:
self.iso = i
break
return self.iso
def absent_iso(self):
iso = self.get_iso()
if iso:
self.result['changed'] = True
args = {
'id': iso['id'],
'projectid': self.get_project('id'),
}
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
if not self.module.check_mode:
res = self.query_api('deleteIso', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'iso')
return iso
def get_result(self, iso):
super(AnsibleCloudStackIso, self).get_result(iso)
# Workaround API does not return cross_zones=true
if self.module.params.get('cross_zones'):
self.result['cross_zones'] = True
if 'zone' in self.result:
del self.result['zone']
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
display_text=dict(),
url=dict(),
os_type=dict(),
zone=dict(),
cross_zones=dict(type='bool', default=False),
iso_filter=dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']),
domain=dict(),
account=dict(),
project=dict(),
checksum=dict(),
is_ready=dict(type='bool', default=False),
bootable=dict(type='bool'),
is_featured=dict(type='bool'),
is_public=dict(type='bool'),
is_dynamically_scalable=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present'),
poll_async=dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
mutually_exclusive=(
['zone', 'cross_zones'],
),
supports_check_mode=True
)
acs_iso = AnsibleCloudStackIso(module)
state = module.params.get('state')
if state in ['absent']:
iso = acs_iso.absent_iso()
else:
iso = acs_iso.present_iso()
result = acs_iso.get_result(iso)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
meowler/sandbox
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py
|
1284
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
from gyp.common import OrderedSet
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target(object):
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here. In this case, we also need to save the compile_deps for the target,
# so that the the target that directly depends on the .objs can also depend
# on those.
self.component_objs = None
self.compile_deps = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter(object):
def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.hash_for_rules = hash_for_rules
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
assert not os.path.isabs(path_dir), (
"'%s' can not be absolute path (see crbug.com/462153)." % path_dir)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets, order_only=None):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
assert not order_only
return None
if len(targets) > 1 or order_only:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
self.ninja.variable('cc_host', '$cl_' + arch)
self.ninja.variable('cxx_host', '$cl_' + arch)
self.ninja.variable('asm', '$ml_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
elif self.flavor == 'mac' and len(self.archs) > 1:
link_deps = collections.defaultdict(list)
compile_deps = self.target.actions_stamp or actions_depends
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
self.target.compile_deps = compile_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
compile_deps)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRulesOrActions(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
xcassets = self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends)
self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetToolchainEnv()
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'], self.hash_for_rules)
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
depfile = action.get('depfile', None)
if depfile:
depfile = self.ExpandSpecial(depfile, self.base_to_build)
pool = 'console' if int(action.get('ninja_use_console', 0)) else None
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env, pool,
depfile=depfile)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetToolchainEnv()
all_outputs = []
for rule in rules:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'], self.hash_for_rules)
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
pool = 'console' if int(rule.get('ninja_use_console', 0)) else None
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env, pool)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if '${%s}' % var in argument:
needed_variables.add(var)
def cygwin_munge(path):
# pylint: disable=cell-var-from-loop
if is_cygwin:
return path.replace('\\', '/')
return path
inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])]
# If there are n source files matching the rule, and m additional rule
# inputs, then adding 'inputs' to each build edge written below will
# write m * n inputs. Collapsing reduces this to m + n.
sources = rule.get('rule_sources', [])
num_inputs = len(inputs)
if prebuild:
num_inputs += 1
if num_inputs > 2 and len(sources) > 2:
inputs = [self.WriteCollapsedDependencies(
rule['rule_name'], inputs, order_only=prebuild)]
prebuild = []
# For each source file, write an edge that generates all the outputs.
for source in sources:
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
outputs = [self.GypPathToNinja(o, env) for o in outputs]
if self.flavor == 'win':
# WriteNewNinjaRule uses unique_name for creating an rsp file on win.
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetToolchainEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
xcassets = []
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
if os.path.splitext(output)[-1] != '.xcassets':
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource'), \
('binary', isBinary)])
bundle_depends.append(output)
else:
xcassets.append(res)
return xcassets
def WriteMacXCassets(self, xcassets, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources' .xcassets files.
This add an invocation of 'actool' via the 'mac_tool.py' helper script.
It assumes that the assets catalogs define at least one imageset and
thus an Assets.car file will be generated in the application resources
directory. If this is not the case, then the build will probably be done
at each invocation of ninja."""
if not xcassets:
return
extra_arguments = {}
settings_to_arg = {
'XCASSETS_APP_ICON': 'app-icon',
'XCASSETS_LAUNCH_IMAGE': 'launch-image',
}
settings = self.xcode_settings.xcode_settings[self.config_name]
for settings_key, arg_name in settings_to_arg.iteritems():
value = settings.get(settings_key)
if value:
extra_arguments[arg_name] = value
partial_info_plist = None
if extra_arguments:
partial_info_plist = self.GypPathToUniqueOutput(
'assetcatalog_generated_info.plist')
extra_arguments['output-partial-info-plist'] = partial_info_plist
outputs = []
outputs.append(
os.path.join(
self.xcode_settings.GetBundleResourceFolder(),
'Assets.car'))
if partial_info_plist:
outputs.append(partial_info_plist)
keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor)
extra_env = self.xcode_settings.GetPerTargetSettings()
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
bundle_depends.extend(self.ninja.build(
outputs, 'compile_xcassets', xcassets,
variables=[('env', env), ('keys', keys)]))
return partial_info_plist
def WriteMacInfoPlist(self, partial_info_plist, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
if partial_info_plist:
intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist')
info_plist = self.ninja.build(
intermediate_plist, 'merge_infoplist',
[partial_info_plist, info_plist])
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys),
('binary', isBinary)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
self.ninja.variable('nm', '$nm_host')
self.ninja.variable('readelf', '$readelf_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
asmflags = self.msvs_settings.GetAsmflags(config_name)
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
elif self.toolset == 'host':
cflags_c = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CFLAGS_host', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CXXFLAGS_host', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'asmflags',
map(self.ExpandSpecial, asmflags))
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetToolchainEnv()
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
if self.flavor == 'win':
midl_include_dirs = config.get('midl_include_dirs', [])
midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs(
midl_include_dirs, config_name)
self.WriteVariableList(ninja_file, 'midl_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in midl_include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
arflags = config.get('arflags', [])
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
self.WriteVariableList(ninja_file, 'arflags',
map(self.ExpandSpecial, arflags))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
build_output = output
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
# TODO(yyanagisawa): more work needed to fix:
# https://code.google.com/p/gyp/issues/detail?id=411
if (spec['type'] in ('shared_library', 'loadable_module') and
not self.is_mac_bundle):
extra_bindings.append(('lib', output))
self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
variables=extra_bindings)
else:
self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
order_deps = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
if target.compile_deps:
order_deps.add(target.compile_deps)
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
map(self.ExpandSpecial, ldflags))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor != 'win':
link_file_list = output
if self.is_mac_bundle:
# 'Dependency Framework.framework/Versions/A/Dependency Framework' ->
# 'Dependency Framework.framework.rsp'
link_file_list = self.xcode_settings.GetWrapperName()
if arch:
link_file_list += '.' + arch
link_file_list += '.rsp'
# If an rspfile contains spaces, ninja surrounds the filename with
# quotes around it and then passes it to open(), creating a file with
# quotes in its name (and when looking for the rsp file, the name
# makes it through bash which strips the quotes) :-/
link_file_list = link_file_list.replace(' ', '_')
extra_bindings.append(
('link_file_list',
gyp.common.EncodePOSIXShellArgument(link_file_list)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if ('/NOENTRY' not in ldflags and
not self.msvs_settings.GetNoImportLibrary(config_name)):
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
order_only=list(order_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetToolchainEnv(self, additional_settings=None):
"""Returns the variables toolchain would set for build steps."""
env = self.GetSortedXcodeEnv(additional_settings=additional_settings)
if self.flavor == 'win':
env = self.GetMsvsToolchainEnv(
additional_settings=additional_settings)
return env
def GetMsvsToolchainEnv(self, additional_settings=None):
"""Returns the variables Visual Studio would set for build steps."""
return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=self.config_name)
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
depfile=None):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, depfile=depfile,
restat=True, pool=pool,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
exts = gyp.MSVSUtil.TARGET_TYPE_EXT
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0))
if pool_size:
return pool_size
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
# VS 2015 uses 20% more working set than VS 2013 and can consume all RAM
# on a 64 GB machine.
mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB
hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
if os.path.exists("/proc/meminfo"):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
ar = 'lib.exe'
# cc and cxx must be set to the correct architecture by overriding with one
# of cl_x86 or cl_x64 below.
cc = 'UNSET'
cxx = 'UNSET'
ld = 'link.exe'
ld_host = '$ld'
else:
ar = 'ar'
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
ar_host = 'ar'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
clang_cl = None
nm = 'nm'
nm_host = 'nm'
readelf = 'readelf'
readelf_host = 'readelf'
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_root, value)
if key == 'AR.host':
ar_host = os.path.join(build_to_root, value)
if key == 'CC':
cc = os.path.join(build_to_root, value)
if cc.endswith('clang-cl'):
clang_cl = cc
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key == 'NM':
nm = os.path.join(build_to_root, value)
if key == 'NM.host':
nm_host = os.path.join(build_to_root, value)
if key == 'READELF':
readelf = os.path.join(build_to_root, value)
if key == 'READELF.host':
readelf_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
configs = [target_dicts[qualified_target]['configurations'][config_name]
for qualified_target in target_list]
shared_system_includes = None
if not generator_flags.get('ninja_use_custom_environment_files', 0):
shared_system_includes = \
gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes(
configs, generator_flags)
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, shared_system_includes, OpenOutput)
for arch, path in cl_paths.iteritems():
if clang_cl:
# If we have selected clang-cl, use that instead.
path = clang_cl
command = CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, 'win'))
if clang_cl:
# Use clang-cl to cross-compile for x86 or x86_64.
command += (' -m32' if arch == 'x86' else ' -m64')
master_ninja.variable('cl_' + arch, command)
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', ar)
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('ml_x86', 'ml.exe')
master_ninja.variable('ml_x64', 'ml64.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar))
if flavor != 'mac':
# Mac does not use readelf/nm for .TOC generation, so avoiding polluting
# the master ninja with extra unused variables.
master_ninja.variable(
'nm', GetEnvironFallback(['NM_target', 'NM'], nm))
master_ninja.variable(
'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host))
master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host))
master_ninja.variable('readelf_host',
GetEnvironFallback(['READELF_host'], readelf_host))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$midl_includes $idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $out',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes $asmflags /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $arflags $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $arflags $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ $readelf -d $lib | grep SONAME ; '
'$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content=
'-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in -Wl,--end-group $solibs $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
master_ninja.rule(
'solipo',
description='SOLIPO $out, POSTBUILDS',
command=(
'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&'
'%(extract_toc)s > $lib.TOC'
% { 'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}))
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; '
'else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then '
'mv $lib.tmp $lib.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '@$link_file_list$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys')
master_ninja.rule(
'merge_infoplist',
description='MERGE INFOPLISTS $in',
command='$env ./gyp-mac-tool merge-info-plist $out $in')
master_ninja.rule(
'compile_xcassets',
description='COMPILE XCASSETS $in',
command='$env ./gyp-mac-tool compile-xcassets $keys $in')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
# short name of targets that were skipped because they didn't contain anything
# interesting.
# NOTE: there may be overlap between this an non_empty_target_names.
empty_target_names = set()
# Set of non-empty short target names.
# NOTE: there may be overlap between this an empty_target_names.
non_empty_target_names = set()
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
# If build_file is a symlink, we must not follow it because there's a chance
# it could point to a path above toplevel_dir, and we cannot correctly deal
# with that case at the moment.
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir,
False)
qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name,
toolset)
hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest()
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
non_empty_target_names.add(name)
else:
empty_target_names.add(name)
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
# Write phony targets for any empty targets that weren't written yet. As
# short names are not necessarily unique only do this for short names that
# haven't already been output for another target.
empty_target_names = empty_target_names - non_empty_target_names
if empty_target_names:
master_ninja.newline()
master_ninja.comment('Empty targets (output for completeness).')
for name in sorted(empty_target_names):
master_ninja.build(name, 'phony')
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
|
rcchan/mongo-web-shell
|
refs/heads/master
|
standalone_sample/app.py
|
7
|
# Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from flask import Flask, render_template
app = Flask(__name__, static_url_path='/static', static_folder='../frontend')
HOST = '0.0.0.0'
PORT = 8080
DEBUG = True
app.config['MWS_HOST'] = os.environ.get('MWS_HOST', 'http://localhost:5000')
@app.route('/')
def render_tutorial():
return render_template('tutorial.html')
if __name__ == '__main__':
app.run(host=HOST, port=PORT, debug=DEBUG)
|
yize/grunt-tps
|
refs/heads/master
|
tasks/lib/python/Lib/python2.7/cProfile.py
|
169
|
#! /usr/bin/env python
"""Python interface for the 'lsprof' profiler.
Compatible with the 'profile' module.
"""
__all__ = ["run", "runctx", "help", "Profile"]
import _lsprof
# ____________________________________________________________
# Simple interface
def run(statement, filename=None, sort=-1):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
prof = Profile()
result = None
try:
try:
prof = prof.run(statement)
except SystemExit:
pass
finally:
if filename is not None:
prof.dump_stats(filename)
else:
result = prof.print_stats(sort)
return result
def runctx(statement, globals, locals, filename=None, sort=-1):
"""Run statement under profiler, supplying your own globals and locals,
optionally saving results in filename.
statement and filename have the same semantics as profile.run
"""
prof = Profile()
result = None
try:
try:
prof = prof.runctx(statement, globals, locals)
except SystemExit:
pass
finally:
if filename is not None:
prof.dump_stats(filename)
else:
result = prof.print_stats(sort)
return result
# Backwards compatibility.
def help():
print "Documentation for the profile/cProfile modules can be found "
print "in the Python Library Reference, section 'The Python Profiler'."
# ____________________________________________________________
class Profile(_lsprof.Profiler):
"""Profile(custom_timer=None, time_unit=None, subcalls=True, builtins=True)
Builds a profiler object using the specified timer function.
The default timer is a fast built-in one based on real time.
For custom timer functions returning integers, time_unit can
be a float specifying a scale (i.e. how long each integer unit
is, in seconds).
"""
# Most of the functionality is in the base class.
# This subclass only adds convenient and backward-compatible methods.
def print_stats(self, sort=-1):
import pstats
pstats.Stats(self).strip_dirs().sort_stats(sort).print_stats()
def dump_stats(self, file):
import marshal
f = open(file, 'wb')
self.create_stats()
marshal.dump(self.stats, f)
f.close()
def create_stats(self):
self.disable()
self.snapshot_stats()
def snapshot_stats(self):
entries = self.getstats()
self.stats = {}
callersdicts = {}
# call information
for entry in entries:
func = label(entry.code)
nc = entry.callcount # ncalls column of pstats (before '/')
cc = nc - entry.reccallcount # ncalls column of pstats (after '/')
tt = entry.inlinetime # tottime column of pstats
ct = entry.totaltime # cumtime column of pstats
callers = {}
callersdicts[id(entry.code)] = callers
self.stats[func] = cc, nc, tt, ct, callers
# subcall information
for entry in entries:
if entry.calls:
func = label(entry.code)
for subentry in entry.calls:
try:
callers = callersdicts[id(subentry.code)]
except KeyError:
continue
nc = subentry.callcount
cc = nc - subentry.reccallcount
tt = subentry.inlinetime
ct = subentry.totaltime
if func in callers:
prev = callers[func]
nc += prev[0]
cc += prev[1]
tt += prev[2]
ct += prev[3]
callers[func] = nc, cc, tt, ct
# The following two methods can be called by clients to use
# a profiler to profile a statement, given as a string.
def run(self, cmd):
import __main__
dict = __main__.__dict__
return self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals, locals):
self.enable()
try:
exec cmd in globals, locals
finally:
self.disable()
return self
# This method is more useful to profile a single function call.
def runcall(self, func, *args, **kw):
self.enable()
try:
return func(*args, **kw)
finally:
self.disable()
# ____________________________________________________________
def label(code):
if isinstance(code, str):
return ('~', 0, code) # built-in functions ('~' sorts at the end)
else:
return (code.co_filename, code.co_firstlineno, code.co_name)
# ____________________________________________________________
def main():
import os, sys
from optparse import OptionParser
usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-o', '--outfile', dest="outfile",
help="Save stats to <outfile>", default=None)
parser.add_option('-s', '--sort', dest="sort",
help="Sort order when printing to stdout, based on pstats.Stats class",
default=-1)
if not sys.argv[1:]:
parser.print_usage()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args
if len(args) > 0:
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
runctx(code, globs, None, options.outfile, options.sort)
else:
parser.print_usage()
return parser
# When invoked as main program, invoke the profiler on a script
if __name__ == '__main__':
main()
|
nlaurance/ninepatch
|
refs/heads/master
|
ninepatch/__init__.py
|
2
|
#!/usr/bin/env python
from PIL import Image
from collections import namedtuple
import os
import re
__all__ = ['Ninepatch', 'ScaleError']
content_area = namedtuple('content_area', ['left', 'top', 'right', 'bottom'])
class ScaleError(Exception):
pass
class NinepatchError(Exception):
pass
def is_even(value):
return value % 2 == 0
class Ninepatch(object):
slice_cache = {}
render_cache = {}
@classmethod
def get_cache_size(cls):
# return sys.getsizeof(cls.slice_cache), sys.getsizeof(cls.render_cache)
return cls.render_cache
def __init__(self, filename, cache=False):
self.filename = filename
if filename in self.slice_cache:
self.image_size = self.slice_cache[filename]['image_size']
self.slice_data = self.slice_cache[filename]['slice_data']
self.marks = self.slice_cache[filename]['marks']
else:
self.image = Image.open(filename)
self.image_size = self.image.size
self.marks = self.find_marks(self.image)
self.slice_data = self.slice()
if cache:
self.slice_cache[filename] = {
'image_size': self.image_size,
'slice_data': self.slice_data,
'marks': self.marks,
}
@property
def min_scale_size(self):
return self.slice_data['min_scale_size']
@property
def content_area(self):
if self.marks['fill']['x'] == [] or self.marks['fill']['y'] == []:
return None
return content_area(
self.marks['fill']['x'][0],
self.marks['fill']['y'][0],
self.image.size[0] - self.marks['fill']['x'][1],
self.image.size[1] - self.marks['fill']['y'][1],
)
@staticmethod
def _chain(marks):
for mark in marks:
yield mark[0]
yield mark[1] + 1 # shift end of black region to next tile
@staticmethod
def find_marks(image):
""" find the cut marks
:param image: a PIL Image
:return:
:rtype: dict
"""
pixels = image.load()
scale_marks = {'x': [], 'y': []}
fill_marks = {'x': [], 'y': []}
axes = {'x': 0, 'y': 1}
marker_color = (0, 0, 0, 255)
for axis in axes.keys():
start_scale_mark = end_scale_mark = None
start_fill_mark = end_fill_mark = None
scale_coord = [0, 0] # our handle to rotate the axes
fill_coord = [0, 0]
# last pixel on that axis
fill_coord[axes[axis] - 1] = image.size[not axes[axis]] - 1
# iterate over the first pixels on that axis
for i in range(image.size[axes[axis]]):
scale_coord[axes[axis]] = i # select axis to search
fill_coord[axes[axis]] = i
scale_pixel = pixels[tuple(scale_coord)]
fill_pixel = pixels[tuple(fill_coord)]
# scale marks
if scale_pixel == marker_color:
if not start_scale_mark:
start_scale_mark = i
end_scale_mark = i
else:
if start_scale_mark:
scale_marks[axis].append(
(start_scale_mark, end_scale_mark))
start_scale_mark = end_scale_mark = None
# fill marks
if fill_pixel == marker_color:
if not start_fill_mark:
start_fill_mark = i
end_fill_mark = i
else:
if start_fill_mark:
fill_marks[axis] = (start_fill_mark, end_fill_mark - 1)
return {
'scale': scale_marks,
'fill': fill_marks,
}
def slice(self):
""" slice a 9 patch image
"""
slice_data = {}
slice_marks = {
'x': [],
'y': []
}
image_size = {
'x': self.image.size[0],
'y': self.image.size[1]
}
for axis in ('x', 'y'):
slice_marks[axis] = [1] + list(
self._chain(self.marks['scale'][axis])) + [image_size[axis] - 1]
counts = {
'x': len(slice_marks['x']) - 1,
'y': len(slice_marks['y']) - 1,
}
tiles = [[0 for y in range(counts['y'])] for x in range(counts['x'])]
for x in range(counts['x']):
for y in range(counts['y']):
# cut our tile region
tiles[x][y] = self.image.crop((
slice_marks['x'][x],
slice_marks['y'][y],
slice_marks['x'][x + 1],
slice_marks['y'][y + 1],
))
slice_data['tiles'] = tiles
slice_data['tile_count'] = {
'x': len(tiles) - 1,
'y': len(tiles[0]) - 1,
}
slice_data['scaleable_tile_count'] = {
'x': float(slice_data['tile_count']['x']) / 2,
'y': float(slice_data['tile_count']['y']) / 2,
}
slice_data['fixed_tile_size'] = {
'x': 0,
'y': 0,
}
# calculate fixed_tile_size
for x, column in enumerate(tiles):
for y, tile in enumerate(column):
if y == 0 and is_even(x): # only on first row
slice_data['fixed_tile_size']['x'] += tile.size[0]
if x == 0 and is_even(y): # only on first column
slice_data['fixed_tile_size']['y'] += tile.size[1]
# add 1 pixel for every scalable region
slice_data['min_scale_size'] = {
'x': slice_data['fixed_tile_size']['x'] + slice_data['scaleable_tile_count']['x'],
'y': slice_data['fixed_tile_size']['y'] + slice_data['scaleable_tile_count']['y'],
}
return slice_data
@staticmethod
def _distributor(start):
""" decrement start and yield 1 until it is exhausted, then yield 0
"""
n = start
while True:
yield 1 if n > 0 else 0
n -= 1
@staticmethod
def _tile_scale(total_scale, scalable_tile_count):
if scalable_tile_count > 0:
return int(total_scale / scalable_tile_count)
else:
return 0
def render_fit(self, width, height):
""" expands so that a content area of width/height can fit
:return: PIL Image
"""
ca = self.content_area
min_width = int(self.slice_data['min_scale_size']['x'])
min_height = int(self.slice_data['min_scale_size']['y'])
# creates a new PIL image
return self.render(max(width + ca.left + ca.right, min_width),
max(height + ca.top + ca.bottom, min_height))
def render_wrap(self, image):
""" paste image in content area
:param image: a PIL image to insert in the content area
:return: PIL Image
"""
scaled_image = self.render_fit(*image.size)
ca = self.content_area
scaled_image.paste(image, (ca.left, ca.top), image)
return scaled_image
def render(self, width, height, img_filter=Image.ANTIALIAS, cache=False):
""" render the sliced tiles to a new scaled image
"""
cache_hash = '{} {} {}'.format(width, height, self.filename)
if cache and cache_hash in self.render_cache:
scaled_image = self.render_cache[cache_hash]
else:
scaled_image = Image.new('RGBA', (width, height), None)
# all the even tiles are the ones that can be scaled
# raise error when undersized
if width < self.slice_data['min_scale_size']['x']:
raise ScaleError('width cannot be smaller than %i'
% self.slice_data['min_scale_size']['x'])
if height < self.slice_data['min_scale_size']['y']:
raise ScaleError('height cannot be smaller than %i'
% self.slice_data['min_scale_size']['y'])
total_scale = {
'x': width - self.slice_data['fixed_tile_size']['x'],
'y': height - self.slice_data['fixed_tile_size']['y'],
}
tile_scale = {
'x': self._tile_scale(total_scale['x'], self.slice_data['scaleable_tile_count']['x']),
'y': self._tile_scale(total_scale['y'], self.slice_data['scaleable_tile_count']['y']),
}
# rounding differences
extra = {
'x': total_scale['x'] - (tile_scale['x'] * self.slice_data['scaleable_tile_count']['x']),
'y': total_scale['y'] - (tile_scale['y'] * self.slice_data['scaleable_tile_count']['y']),
}
# distributes the pixels from the rounding differences until exhausted
extra_x_distributor = self._distributor(extra['x'])
x_coord = y_coord = 0
for x, column in enumerate(self.slice_data['tiles']):
extra_x = 0 if is_even(x) else next(extra_x_distributor)
extra_y_distributor = self._distributor(extra['y'])
for y, tile in enumerate(column):
extra_y = 0 if is_even(y) else next(extra_y_distributor)
if y == 0:
y_coord = 0 # reset y_coord
if is_even(x) and is_even(y):
pass # use tile as is
elif is_even(x): # scale y
tile = tile.resize((tile.size[0], tile_scale['y'] + extra_y), img_filter)
elif is_even(y): # scale x
tile = tile.resize((tile_scale['x'] + extra_x, tile.size[1]), img_filter)
else: # scale both
tile = tile.resize((
tile_scale['x'] + extra_x,
tile_scale['y'] + extra_y
), img_filter)
scaled_image.paste(tile, (x_coord, y_coord))
y_coord += tile.size[1]
x_coord += tile.size[0]
if cache:
self.render_cache[cache_hash] = scaled_image
return scaled_image
@staticmethod
def _column(image, pixels, x):
return [pixels[(x, y)] for y in range(image.size[1])]
@staticmethod
def _row(image, pixels, y):
return [pixels[(x, y)] for x in range(image.size[0])]
def compress_tile(self, tile):
""" look if pixels are repeated on one or two axes and compress the tile
"""
pixels = tile.load()
x_compress = True
y_compress = True
first_column = self._column(tile, pixels, 0)
first_row = self._column(tile, pixels, 0)
for x in range(tile.size[0]):
pixel_column = self._column(tile, pixels, x)
if pixel_column != first_column:
x_compress = False
for y in range(tile.size[1]):
pixel_row = self._row(tile, pixels, y)
if pixel_row != first_row:
y_compress = False
if x_compress or y_compress:
width = 1 if x_compress else tile.size[0]
height = 1 if y_compress else tile.size[1]
compressed_tile = Image.new('RGBA', (width, height), None)
compressed_tile.paste(tile.crop((0, 0, width, height)))
return compressed_tile
return tile
def export_slices(self, path):
""" export slices as PNG images into a directory
"""
file_prefix = os.path.basename(self.filename)
file_prefix = re.match('(.*)\.9\.png', file_prefix).groups()[0]
for x, column in enumerate(self.slice_data['tiles']):
for y, tile in enumerate(column):
tile = self.compress_tile(tile)
slice_image = Image.new('RGBA', (tile.size[0], tile.size[1]), None)
slice_image.paste(tile)
slice_image.save('{}/{}_{}_{}.png'.format(path, file_prefix, x, y))
|
cc13ny/Allin
|
refs/heads/master
|
lintcode/000-Trapping-Rain-Water-II/TrappingRainWaterII_001.py
|
5
|
import heapq
class Solution:
# @param heights: a matrix of integers
# @return: an integer
def trapRainWater(self, heights):
# write your code here
n = len(heights)
if n < 3:
return 0
m = len(heights[0])
if m < 3:
return 0
hp = self.heapInialize(heights)
visited = self.visitedInialize(heights)
dx = [0, 0, 1, -1]
dy = [1, -1, 0, 0]
res = 0
while len(hp) != 0:
minval, minx, miny = heapq.heappop(hp)
for i in range(4):
x = minx + dx[i]
y = miny + dy[i]
if -1 < x < n and -1 < y < m and not visited[x][y]:
visited[x][y] = True
if heights[x][y] < minval:
res += minval - heights[x][y]
heights[x][y] = minval
heapq.heappush(hp, (heights[x][y], x, y))
return res
def heapInialize(self, heights):
n, m = len(heights), len(heights[0])
h = []
for j in range(1, m - 1):
heapq.heappush(h, (heights[0][j], 0, j))
heapq.heappush(h, (heights[n - 1][j], n - 1, j))
for i in range(1, n - 1):
heapq.heappush(h, (heights[i][0], i, 0))
heapq.heappush(h, (heights[i][m - 1], i, m - 1))
return h
def visitedInialize(self, heights):
n, m = len(heights), len(heights[0])
visited = [[False for _ in range(m)] for _ in range(n)]
for j in range(0, m):
visited[0][j] = True
visited[n - 1][j] = True
for i in range(1, n - 1):
visited[i][0] = True
visited[i][m - 1] = True
return visited
|
kylelwm/ponus
|
refs/heads/master
|
ponus/wsgi.py
|
2
|
"""
WSGI config for ponus project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ponus.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
eneldoserrata/marcos_openerp
|
refs/heads/master
|
addons/account_report_company/account_report_company.py
|
8
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013 S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class res_partner(osv.Model):
_inherit = 'res.partner'
_order = 'display_name'
def _display_name_compute(self, cr, uid, ids, name, args, context=None):
context = dict(context or {})
context.pop('show_address', None)
return dict(self.name_get(cr, uid, ids, context=context))
_display_name_store_triggers = {
'res.partner': (lambda self,cr,uid,ids,context=None: self.search(cr, uid, [('id','child_of',ids)]),
['parent_id', 'is_company', 'name'], 10)
}
# indirection to avoid passing a copy of the overridable method when declaring the function field
_display_name = lambda self, *args, **kwargs: self._display_name_compute(*args, **kwargs)
_columns = {
# extra field to allow ORDER BY to match visible names
'display_name': fields.function(_display_name, type='char', string='Name', store=_display_name_store_triggers, select=1),
}
class account_invoice(osv.Model):
_inherit = 'account.invoice'
_columns = {
'commercial_partner_id': fields.related('partner_id', 'commercial_partner_id', string='Commercial Entity', type='many2one',
relation='res.partner', store=True, readonly=True,
help="The commercial entity that will be used on Journal Entries for this invoice")
}
|
elelsee/pycfn-elasticsearch
|
refs/heads/master
|
pycfn_elasticsearch/vendored/requests/packages/urllib3/poolmanager.py
|
68
|
import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
from .request import RequestMethods
from .util.url import parse_url
from .util.retry import Retry
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version')
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
if not host:
raise LocationValueError("No host specified.")
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = 'GET'
retries = kw.get('retries')
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
kw['retries'] = retries
kw['redirect'] = redirect
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
headers = kw.get('headers', self.headers)
kw['headers'] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
|
benhylau/cjdns
|
refs/heads/master
|
node_build/dependencies/libuv/build/gyp/test/mac/gyptest-xctest.py
|
221
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xctest targets are correctly configured.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
# Ignore this test if Xcode 5 is not installed
import subprocess
job = subprocess.Popen(['xcodebuild', '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
raise Exception('Error %d running xcodebuild' % job.returncode)
xcode_version, build_number = out.splitlines()
# Convert the version string from 'Xcode 5.0' to ['5','0'].
xcode_version = xcode_version.split()[-1].split('.')
if xcode_version < ['5']:
test.pass_test()
CHDIR = 'xctest'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', chdir=CHDIR, arguments=['-scheme', 'classes', 'test'])
test.built_file_must_match('tests.xctest/Contents/Resources/resource.txt',
'foo\n', chdir=CHDIR)
test.pass_test()
|
imZack/sanji
|
refs/heads/develop
|
sanji/model_initiator.py
|
3
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
import simplejson as json
import os
import shutil
import subprocess
import time
from threading import Thread
from threading import Event
from threading import RLock
_logger = logging.getLogger("sanji.sdk.model_initiator")
class ModelInitiator(object):
"""
" Deal with some model initialization works like DB
" and Condifuration files creating.
" [backup_inteval]: backup db every hours(s), minus means no backup.
"""
def __init__(
self, model_name, model_path, db_type="json", backup_interval=720):
self.model_name = model_name
self.model_path = model_path
self.db = None
self.data_folder_path = self.model_path + "/data"
self.factory_json_db_path = self.model_path + "/data/" + \
self.model_name + ".json.factory"
self.backup_json_db_path = self.model_path + "/data/" + \
self.model_name + ".json.backup"
self.json_db_path = self.model_path + "/data/" + \
self.model_name + ".json"
self.db_type = db_type
self.db_status = None
self.backup_interval = backup_interval * 3600 # hour
self.db_mutex = RLock()
self.db_manager()
self._backup_thread = Thread(target=self.thread_backup_db)
self._backup_thread.daemon = True
self._backup_thread_event = Event()
if self.backup_interval > 0:
self.start_backup()
def db_manager(self):
"""
" Do series of DB operations.
"""
rc_create = self.create_db() # for first create
try:
self.load_db() # load existing/factory
except Exception as e:
_logger.debug("*** %s" % str(e))
try:
self.recover_db(self.backup_json_db_path)
except Exception:
pass
else:
if rc_create is True:
self.db_status = "factory"
else:
self.db_status = "existing"
return True
try:
self.load_db() # load backup
except Exception as b:
_logger.debug("*** %s" % str(b))
self.recover_db(self.factory_json_db_path)
self.load_db() # load factory
self.db_status = "factory"
else:
self.db_status = "backup"
finally:
return True
def create_db(self):
"""
" Create a db file for model if there is no db.
" User need to prepare thier own xxx.json.factory.
"""
if self.db_type != "json":
raise RuntimeError("db_type only supports json now")
if os.path.exists(self.json_db_path):
return False
if os.path.exists(self.factory_json_db_path):
with self.db_mutex:
shutil.copy2(
self.factory_json_db_path, self.json_db_path)
return True
_logger.debug(
"*** NO such file: %s" % self.factory_json_db_path)
raise RuntimeError("No *.json.factory file")
def recover_db(self, src_file):
"""
" Recover DB from xxxxx.backup.json or xxxxx.json.factory to xxxxx.json
" [src_file]: copy from src_file to xxxxx.json
"""
with self.db_mutex:
try:
shutil.copy2(src_file, self.json_db_path)
except IOError as e:
_logger.debug("*** NO: %s file." % src_file)
raise e
def backup_db(self):
"""
" Generate a xxxxx.backup.json.
"""
with self.db_mutex:
if os.path.exists(self.json_db_path):
try:
shutil.copy2(self.json_db_path, self.backup_json_db_path)
except (IOError, OSError):
_logger.debug("*** No file to copy.")
def load_db(self):
"""
" Load json db as a dictionary.
"""
try:
with open(self.json_db_path) as fp:
self.db = json.load(fp)
except Exception as e:
_logger.debug("*** Open JSON DB error.")
raise e
def save_db(self):
"""
" Save json db to file system.
"""
with self.db_mutex:
if not isinstance(self.db, dict) and not isinstance(self.db, list):
return False
try:
with open(self.json_db_path, "w") as fp:
json.dump(self.db, fp, indent=4)
except Exception as e:
# disk full or something.
_logger.debug("*** Write JSON DB to file error.")
raise e
else:
self.sync()
return True
def start_backup(self):
if self._backup_thread.is_alive():
raise RuntimeError("Stop previous backup thread first.")
self._backup_thread = Thread(target=self.thread_backup_db)
self._backup_thread.daemon = True
self._backup_thread.start()
return True
def stop_backup(self, timeout=None):
if self._backup_thread.is_alive():
self._backup_thread_event.set()
if timeout:
self._backup_thread.join(timeout)
else:
self._backup_thread.join()
return True
return False
def thread_backup_db(self):
single_sleep_time = 2
sleep_count = self.backup_interval
while not self._backup_thread_event.is_set():
if sleep_count >= self.backup_interval:
self.backup_db()
sleep_count = 0
else:
time.sleep(single_sleep_time)
sleep_count += single_sleep_time
def sync(self):
"""
" Call Linux 'sync' command to write data from RAM to flash.
"""
cmd = "sync"
subprocess.call(cmd, shell=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.