repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
solsozluk/backend | refs/heads/master | solsozluk/solsozluk/urls.py | 1 | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'solsozluk.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
nevir/plexability | refs/heads/master | extern/gyp/test/lib/TestCmd.py | 330 | """
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing
of executable commands and scripts (in any language, not just Python),
especially commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd
module manages and cleans up one or more temporary workspace
directories, and provides methods for creating files and directories in
those workspace directories from in-line data, here-documents), allowing
tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
import TestCmd
test = TestCmd.TestCmd()
There are a bunch of keyword arguments available at instantiation:
test = TestCmd.TestCmd(description = 'string',
program = 'program_or_script_to_test',
interpreter = 'script_interpreter',
workdir = 'prefix',
subdir = 'subdir',
verbose = Boolean,
match = default_match_function,
diff = default_diff_function,
combine = Boolean)
There are a bunch of methods that let you do different things:
test.verbose_set(1)
test.description_set('string')
test.program_set('program_or_script_to_test')
test.interpreter_set('script_interpreter')
test.interpreter_set(['script_interpreter', 'arg'])
test.workdir_set('prefix')
test.workdir_set('')
test.workpath('file')
test.workpath('subdir', 'file')
test.subdir('subdir', ...)
test.rmdir('subdir', ...)
test.write('file', "contents\n")
test.write(['subdir', 'file'], "contents\n")
test.read('file')
test.read(['subdir', 'file'])
test.read('file', mode)
test.read(['subdir', 'file'], mode)
test.writable('dir', 1)
test.writable('dir', None)
test.preserve(condition, ...)
test.cleanup(condition)
test.command_args(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program')
test.run(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
chdir = 'directory_to_chdir_to',
stdin = 'input to feed to the program\n')
universal_newlines = True)
p = test.start(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
universal_newlines = None)
test.finish(self, p)
test.pass_test()
test.pass_test(condition)
test.pass_test(condition, function)
test.fail_test()
test.fail_test(condition)
test.fail_test(condition, function)
test.fail_test(condition, function, skip)
test.no_result()
test.no_result(condition)
test.no_result(condition, function)
test.no_result(condition, function, skip)
test.stdout()
test.stdout(run)
test.stderr()
test.stderr(run)
test.symlink(target, link)
test.banner(string)
test.banner(string, width)
test.diff(actual, expected)
test.match(actual, expected)
test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n")
test.match_exact(["actual 1\n", "actual 2\n"],
["expected 1\n", "expected 2\n"])
test.match_re("actual 1\nactual 2\n", regex_string)
test.match_re(["actual 1\n", "actual 2\n"], list_of_regexes)
test.match_re_dotall("actual 1\nactual 2\n", regex_string)
test.match_re_dotall(["actual 1\n", "actual 2\n"], list_of_regexes)
test.tempdir()
test.tempdir('temporary-directory')
test.sleep()
test.sleep(seconds)
test.where_is('foo')
test.where_is('foo', 'PATH1:PATH2')
test.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
test.unlink('file')
test.unlink('subdir', 'file')
The TestCmd module provides pass_test(), fail_test(), and no_result()
unbound functions that report test results for use with the Aegis change
management system. These methods terminate the test immediately,
reporting PASSED, FAILED, or NO RESULT respectively, and exiting with
status 0 (success), 1 or 2 respectively. This allows for a distinction
between an actual failed test and a test that could not be properly
evaluated because of an external condition (such as a full file system
or incorrect permissions).
import TestCmd
TestCmd.pass_test()
TestCmd.pass_test(condition)
TestCmd.pass_test(condition, function)
TestCmd.fail_test()
TestCmd.fail_test(condition)
TestCmd.fail_test(condition, function)
TestCmd.fail_test(condition, function, skip)
TestCmd.no_result()
TestCmd.no_result(condition)
TestCmd.no_result(condition, function)
TestCmd.no_result(condition, function, skip)
The TestCmd module also provides unbound functions that handle matching
in the same way as the match_*() methods described above.
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_exact)
test = TestCmd.TestCmd(match = TestCmd.match_re)
test = TestCmd.TestCmd(match = TestCmd.match_re_dotall)
The TestCmd module provides unbound functions that can be used for the
"diff" argument to TestCmd.TestCmd instantiation:
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_re,
diff = TestCmd.diff_re)
test = TestCmd.TestCmd(diff = TestCmd.simple_diff)
The "diff" argument can also be used with standard difflib functions:
import difflib
test = TestCmd.TestCmd(diff = difflib.context_diff)
test = TestCmd.TestCmd(diff = difflib.unified_diff)
Lastly, the where_is() method also exists in an unbound function
version.
import TestCmd
TestCmd.where_is('foo')
TestCmd.where_is('foo', 'PATH1:PATH2')
TestCmd.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
"""
# Copyright 2000-2010 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCmd.py 0.37.D001 2010/01/11 16:55:50 knight"
__version__ = "0.37"
import errno
import os
import os.path
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import traceback
import types
import UserList
__all__ = [
'diff_re',
'fail_test',
'no_result',
'pass_test',
'match_exact',
'match_re',
'match_re_dotall',
'python_executable',
'TestCmd'
]
try:
import difflib
except ImportError:
__all__.append('simple_diff')
def is_List(e):
return type(e) is types.ListType \
or isinstance(e, UserList.UserList)
try:
from UserString import UserString
except ImportError:
class UserString:
pass
if hasattr(types, 'UnicodeType'):
def is_String(e):
return type(e) is types.StringType \
or type(e) is types.UnicodeType \
or isinstance(e, UserString)
else:
def is_String(e):
return type(e) is types.StringType or isinstance(e, UserString)
tempfile.template = 'testcmd.'
if os.name in ('posix', 'nt'):
tempfile.template = 'testcmd.' + str(os.getpid()) + '.'
else:
tempfile.template = 'testcmd.'
re_space = re.compile('\s')
_Cleanup = []
_chain_to_exitfunc = None
def _clean():
global _Cleanup
cleanlist = filter(None, _Cleanup)
del _Cleanup[:]
cleanlist.reverse()
for test in cleanlist:
test.cleanup()
if _chain_to_exitfunc:
_chain_to_exitfunc()
try:
import atexit
except ImportError:
# TODO(1.5): atexit requires python 2.0, so chain sys.exitfunc
try:
_chain_to_exitfunc = sys.exitfunc
except AttributeError:
pass
sys.exitfunc = _clean
else:
atexit.register(_clean)
try:
zip
except NameError:
def zip(*lists):
result = []
for i in xrange(min(map(len, lists))):
result.append(tuple(map(lambda l, i=i: l[i], lists)))
return result
class Collector:
def __init__(self, top):
self.entries = [top]
def __call__(self, arg, dirname, names):
pathjoin = lambda n, d=dirname: os.path.join(d, n)
self.entries.extend(map(pathjoin, names))
def _caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name in ("?", "<module>"):
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self = None, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
By default, the fail_test() method reports that the test FAILED
and exits with a status of 1. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("FAILED test" + of + desc + sep + at)
sys.exit(1)
def no_result(self = None, condition = 1, function = None, skip = 0):
"""Causes a test to exit with no valid result.
By default, the no_result() method reports NO RESULT for the test
and exits with a status of 2. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
if os.environ.get('TESTCMD_DEBUG_SKIPS'):
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
else:
sys.stderr.write("NO RESULT\n")
sys.exit(2)
def pass_test(self = None, condition = 1, function = None):
"""Causes a test to pass.
By default, the pass_test() method reports PASSED for the test
and exits with a status of 0. If a condition argument is supplied,
the test passes only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
def match_exact(lines = None, matches = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(matches):
matches = string.split(matches, "\n")
if len(lines) != len(matches):
return
for i in range(len(lines)):
if lines[i] != matches[i]:
return
return 1
def match_re(lines = None, res = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(res):
res = string.split(res, "\n")
if len(lines) != len(res):
return
for i in range(len(lines)):
s = "^" + res[i] + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(lines[i]):
return
return 1
def match_re_dotall(lines = None, res = None):
"""
"""
if not type(lines) is type(""):
lines = string.join(lines, "\n")
if not type(res) is type(""):
res = string.join(res, "\n")
s = "^" + res + "$"
try:
expr = re.compile(s, re.DOTALL)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if expr.match(lines):
return 1
try:
import difflib
except ImportError:
pass
else:
def simple_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A function with the same calling signature as difflib.context_diff
(diff -c) and difflib.unified_diff (diff -u) but which prints
output like the simple, unadorned 'diff" command.
"""
sm = difflib.SequenceMatcher(None, a, b)
def comma(x1, x2):
return x1+1 == x2 and str(x2) or '%s,%s' % (x1+1, x2)
result = []
for op, a1, a2, b1, b2 in sm.get_opcodes():
if op == 'delete':
result.append("%sd%d" % (comma(a1, a2), b1))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
elif op == 'insert':
result.append("%da%s" % (a1, comma(b1, b2)))
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
elif op == 'replace':
result.append("%sc%s" % (comma(a1, a2), comma(b1, b2)))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
result.append('---')
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
return result
def diff_re(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A simple "diff" of two sets of lines when the expected lines
are regular expressions. This is a really dumb thing that
just compares each line in turn, so it doesn't look for
chunks of matching lines and the like--but at least it lets
you know exactly which line first didn't compare correctl...
"""
result = []
diff = len(a) - len(b)
if diff < 0:
a = a + ['']*(-diff)
elif diff > 0:
b = b + ['']*diff
i = 0
for aline, bline in zip(a, b):
s = "^" + aline + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(bline):
result.append("%sc%s" % (i+1, i+1))
result.append('< ' + repr(a[i]))
result.append('---')
result.append('> ' + repr(b[i]))
i = i+1
return result
if os.name == 'java':
python_executable = os.path.join(sys.prefix, 'jython')
else:
python_executable = sys.executable
if sys.platform == 'win32':
default_sleep_seconds = 2
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
if pathext is None:
pathext = os.environ['PATHEXT']
if is_String(pathext):
pathext = string.split(pathext, os.pathsep)
for ext in pathext:
if string.lower(ext) == string.lower(file[-len(ext):]):
pathext = ['']
break
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
else:
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
for dir in path:
f = os.path.join(dir, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
return f
return None
default_sleep_seconds = 1
try:
import subprocess
except ImportError:
# The subprocess module doesn't exist in this version of Python,
# so we're going to cobble up something that looks just enough
# like its API for our purposes below.
import new
subprocess = new.module('subprocess')
subprocess.PIPE = 'PIPE'
subprocess.STDOUT = 'STDOUT'
subprocess.mswindows = (sys.platform == 'win32')
try:
import popen2
popen2.Popen3
except AttributeError:
class Popen3:
universal_newlines = 1
def __init__(self, command, **kw):
if sys.platform == 'win32' and command[0] == '"':
command = '"' + command + '"'
(stdin, stdout, stderr) = os.popen3(' ' + command)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def close_output(self):
self.stdout.close()
self.resultcode = self.stderr.close()
def wait(self):
resultcode = self.resultcode
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
else:
try:
popen2.Popen4
except AttributeError:
# A cribbed Popen4 class, with some retrofitted code from
# the Python 1.5 Popen3 class methods to do certain things
# by hand.
class Popen4(popen2.Popen3):
childerr = None
def __init__(self, cmd, bufsize=-1):
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.dup2(p2cread, 0)
os.dup2(c2pwrite, 1)
os.dup2(c2pwrite, 2)
for i in range(3, popen2.MAXFD):
try:
os.close(i)
except: pass
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
# Shouldn't come here, I guess
os._exit(1)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
popen2._active.append(self)
popen2.Popen4 = Popen4
class Popen3(popen2.Popen3, popen2.Popen4):
universal_newlines = 1
def __init__(self, command, **kw):
if kw.get('stderr') == 'STDOUT':
apply(popen2.Popen4.__init__, (self, command, 1))
else:
apply(popen2.Popen3.__init__, (self, command, 1))
self.stdin = self.tochild
self.stdout = self.fromchild
self.stderr = self.childerr
def wait(self, *args, **kw):
resultcode = apply(popen2.Popen3.wait, (self,)+args, kw)
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
subprocess.Popen = Popen3
# From Josiah Carlson,
# ASPN : Python Cookbook : Module to allow Asynchronous subprocess use on Windows and Posix platforms
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440554
PIPE = subprocess.PIPE
if subprocess.mswindows:
from win32file import ReadFile, WriteFile
from win32pipe import PeekNamedPipe
import msvcrt
else:
import select
import fcntl
try: fcntl.F_GETFL
except AttributeError: fcntl.F_GETFL = 3
try: fcntl.F_SETFL
except AttributeError: fcntl.F_SETFL = 4
class Popen(subprocess.Popen):
def recv(self, maxsize=None):
return self._recv('stdout', maxsize)
def recv_err(self, maxsize=None):
return self._recv('stderr', maxsize)
def send_recv(self, input='', maxsize=None):
return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
def get_conn_maxsize(self, which, maxsize):
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return getattr(self, which), maxsize
def _close(self, which):
getattr(self, which).close()
setattr(self, which, None)
if subprocess.mswindows:
def send(self, input):
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
except ValueError:
return self._close('stdin')
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
#if self.universal_newlines:
# read = self._translate_newlines(read)
return read
else:
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
except OSError, why:
if why[0] == errno.EPIPE: #broken pipe
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
except TypeError:
flags = None
else:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags| os.O_NONBLOCK)
try:
if not select.select([conn], [], [], 0)[0]:
return ''
r = conn.read(maxsize)
if not r:
return self._close(which)
#if self.universal_newlines:
# r = self._translate_newlines(r)
return r
finally:
if not conn.closed and not flags is None:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
disconnect_message = "Other end disconnected!"
def recv_some(p, t=.1, e=1, tr=5, stderr=0):
if tr < 1:
tr = 1
x = time.time()+t
y = []
r = ''
pr = p.recv
if stderr:
pr = p.recv_err
while time.time() < x or r:
r = pr()
if r is None:
if e:
raise Exception(disconnect_message)
else:
break
elif r:
y.append(r)
else:
time.sleep(max((x-time.time())/tr, 0))
return ''.join(y)
# TODO(3.0: rewrite to use memoryview()
def send_all(p, data):
while len(data):
sent = p.send(data)
if sent is None:
raise Exception(disconnect_message)
data = buffer(data, sent)
try:
object
except NameError:
class object:
pass
class TestCmd(object):
"""Class TestCmd
"""
def __init__(self, description = None,
program = None,
interpreter = None,
workdir = None,
subdir = None,
verbose = None,
match = None,
diff = None,
combine = 0,
universal_newlines = 1):
self._cwd = os.getcwd()
self.description_set(description)
self.program_set(program)
self.interpreter_set(interpreter)
if verbose is None:
try:
verbose = max( 0, int(os.environ.get('TESTCMD_VERBOSE', 0)) )
except ValueError:
verbose = 0
self.verbose_set(verbose)
self.combine = combine
self.universal_newlines = universal_newlines
if match is not None:
self.match_function = match
else:
self.match_function = match_re
if diff is not None:
self.diff_function = diff
else:
try:
difflib
except NameError:
pass
else:
self.diff_function = simple_diff
#self.diff_function = difflib.context_diff
#self.diff_function = difflib.unified_diff
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
if os.environ.has_key('PRESERVE') and not os.environ['PRESERVE'] is '':
self._preserve['pass_test'] = os.environ['PRESERVE']
self._preserve['fail_test'] = os.environ['PRESERVE']
self._preserve['no_result'] = os.environ['PRESERVE']
else:
try:
self._preserve['pass_test'] = os.environ['PRESERVE_PASS']
except KeyError:
pass
try:
self._preserve['fail_test'] = os.environ['PRESERVE_FAIL']
except KeyError:
pass
try:
self._preserve['no_result'] = os.environ['PRESERVE_NO_RESULT']
except KeyError:
pass
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
banner_char = '='
banner_width = 80
def banner(self, s, width=None):
if width is None:
width = self.banner_width
return s + self.banner_char * (width - len(s))
if os.name == 'posix':
def escape(self, arg):
"escape shell special characters"
slash = '\\'
special = '"$'
arg = string.replace(arg, slash, slash+slash)
for c in special:
arg = string.replace(arg, c, slash+c)
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
else:
# Windows does not allow special characters in file names
# anyway, so no need for an escape function, we will just quote
# the arg.
def escape(self, arg):
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
def canonicalize(self, path):
if is_List(path):
path = apply(os.path.join, tuple(path))
if not os.path.isabs(path):
path = os.path.join(self.workdir, path)
return path
def chmod(self, path, mode):
"""Changes permissions on the specified file or directory
path name."""
path = self.canonicalize(path)
os.chmod(path, mode)
def cleanup(self, condition = None):
"""Removes any temporary working directories for the specified
TestCmd environment. If the environment variable PRESERVE was
set when the TestCmd environment was created, temporary working
directories are not removed. If any of the environment variables
PRESERVE_PASS, PRESERVE_FAIL, or PRESERVE_NO_RESULT were set
when the TestCmd environment was created, then temporary working
directories are not removed if the test passed, failed, or had
no result, respectively. Temporary working directories are also
preserved for conditions specified via the preserve method.
Typically, this method is not called directly, but is used when
the script exits to clean up temporary working directories as
appropriate for the exit status.
"""
if not self._dirlist:
return
os.chdir(self._cwd)
self.workdir = None
if condition is None:
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
print "Preserved directory", dir
else:
list = self._dirlist[:]
list.reverse()
for dir in list:
self.writable(dir, 1)
shutil.rmtree(dir, ignore_errors = 1)
self._dirlist = []
try:
global _Cleanup
_Cleanup.remove(self)
except (AttributeError, ValueError):
pass
def command_args(self, program = None,
interpreter = None,
arguments = None):
if program:
if type(program) == type('') and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
else:
program = self.program
if not interpreter:
interpreter = self.interpreter
if not type(program) in [type([]), type(())]:
program = [program]
cmd = list(program)
if interpreter:
if not type(interpreter) in [type([]), type(())]:
interpreter = [interpreter]
cmd = list(interpreter) + cmd
if arguments:
if type(arguments) == type(''):
arguments = string.split(arguments)
cmd.extend(arguments)
return cmd
def description_set(self, description):
"""Set the description of the functionality being tested.
"""
self.description = description
try:
difflib
except NameError:
def diff(self, a, b, name, *args, **kw):
print self.banner('Expected %s' % name)
print a
print self.banner('Actual %s' % name)
print b
else:
def diff(self, a, b, name, *args, **kw):
print self.banner(name)
args = (a.splitlines(), b.splitlines()) + args
lines = apply(self.diff_function, args, kw)
for l in lines:
print l
def fail_test(self, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
"""
if not condition:
return
self.condition = 'fail_test'
fail_test(self = self,
condition = condition,
function = function,
skip = skip)
def interpreter_set(self, interpreter):
"""Set the program to be used to interpret the program
under test as a script.
"""
self.interpreter = interpreter
def match(self, lines, matches):
"""Compare actual and expected file contents.
"""
return self.match_function(lines, matches)
def match_exact(self, lines, matches):
"""Compare actual and expected file contents.
"""
return match_exact(lines, matches)
def match_re(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re(lines, res)
def match_re_dotall(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re_dotall(lines, res)
def no_result(self, condition = 1, function = None, skip = 0):
"""Report that the test could not be run.
"""
if not condition:
return
self.condition = 'no_result'
no_result(self = self,
condition = condition,
function = function,
skip = skip)
def pass_test(self, condition = 1, function = None):
"""Cause the test to pass.
"""
if not condition:
return
self.condition = 'pass_test'
pass_test(self = self, condition = condition, function = function)
def preserve(self, *conditions):
"""Arrange for the temporary working directories for the
specified TestCmd environment to be preserved for one or more
conditions. If no conditions are specified, arranges for
the temporary working directories to be preserved for all
conditions.
"""
if conditions is ():
conditions = ('pass_test', 'fail_test', 'no_result')
for cond in conditions:
self._preserve[cond] = 1
def program_set(self, program):
"""Set the executable program or script to be tested.
"""
if program and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
self.program = program
def read(self, file, mode = 'rb'):
"""Reads and returns the contents of the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name. The I/O mode for the file may
be specified; it must begin with an 'r'. The default is
'rb' (binary read).
"""
file = self.canonicalize(file)
if mode[0] != 'r':
raise ValueError, "mode must begin with 'r'"
with open(file, mode) as f:
result = f.read()
return result
def rmdir(self, dir):
"""Removes the specified dir name.
The dir name may be a list, in which case the elements are
concatenated with the os.path.join() method. The dir is
assumed to be under the temporary working directory unless it
is an absolute path name.
The dir must be empty.
"""
dir = self.canonicalize(dir)
os.rmdir(dir)
def start(self, program = None,
interpreter = None,
arguments = None,
universal_newlines = None,
**kw):
"""
Starts a program or script for the test environment.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
cmd = self.command_args(program, interpreter, arguments)
cmd_string = string.join(map(self.escape, cmd), ' ')
if self.verbose:
sys.stderr.write(cmd_string + "\n")
if universal_newlines is None:
universal_newlines = self.universal_newlines
# On Windows, if we make stdin a pipe when we plan to send
# no input, and the test program exits before
# Popen calls msvcrt.open_osfhandle, that call will fail.
# So don't use a pipe for stdin if we don't need one.
stdin = kw.get('stdin', None)
if stdin is not None:
stdin = subprocess.PIPE
combine = kw.get('combine', self.combine)
if combine:
stderr_value = subprocess.STDOUT
else:
stderr_value = subprocess.PIPE
return Popen(cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=stderr_value,
universal_newlines=universal_newlines)
def finish(self, popen, **kw):
"""
Finishes and waits for the process being run under control of
the specified popen argument, recording the exit status,
standard output and error output.
"""
popen.stdin.close()
self.status = popen.wait()
if not self.status:
self.status = 0
self._stdout.append(popen.stdout.read())
if popen.stderr:
stderr = popen.stderr.read()
else:
stderr = ''
self._stderr.append(stderr)
def run(self, program = None,
interpreter = None,
arguments = None,
chdir = None,
stdin = None,
universal_newlines = None):
"""Runs a test of the program or script for the test
environment. Standard output and error output are saved for
future retrieval via the stdout() and stderr() methods.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
if chdir:
oldcwd = os.getcwd()
if not os.path.isabs(chdir):
chdir = os.path.join(self.workpath(chdir))
if self.verbose:
sys.stderr.write("chdir(" + chdir + ")\n")
os.chdir(chdir)
p = self.start(program,
interpreter,
arguments,
universal_newlines,
stdin=stdin)
if stdin:
if is_List(stdin):
for line in stdin:
p.stdin.write(line)
else:
p.stdin.write(stdin)
p.stdin.close()
out = p.stdout.read()
if p.stderr is None:
err = ''
else:
err = p.stderr.read()
try:
close_output = p.close_output
except AttributeError:
p.stdout.close()
if not p.stderr is None:
p.stderr.close()
else:
close_output()
self._stdout.append(out)
self._stderr.append(err)
self.status = p.wait()
if not self.status:
self.status = 0
if chdir:
os.chdir(oldcwd)
if self.verbose >= 2:
write = sys.stdout.write
write('============ STATUS: %d\n' % self.status)
out = self.stdout()
if out or self.verbose >= 3:
write('============ BEGIN STDOUT (len=%d):\n' % len(out))
write(out)
write('============ END STDOUT\n')
err = self.stderr()
if err or self.verbose >= 3:
write('============ BEGIN STDERR (len=%d)\n' % len(err))
write(err)
write('============ END STDERR\n')
def sleep(self, seconds = default_sleep_seconds):
"""Sleeps at least the specified number of seconds. If no
number is specified, sleeps at least the minimum number of
seconds necessary to advance file time stamps on the current
system. Sleeping more seconds is all right.
"""
time.sleep(seconds)
def stderr(self, run = None):
"""Returns the error output from the specified run number.
If there is no specified run number, then returns the error
output of the last run. If the run number is less than zero,
then returns the error output from that many runs back from the
current run.
"""
if not run:
run = len(self._stderr)
elif run < 0:
run = len(self._stderr) + run
run = run - 1
return self._stderr[run]
def stdout(self, run = None):
"""Returns the standard output from the specified run number.
If there is no specified run number, then returns the standard
output of the last run. If the run number is less than zero,
then returns the standard output from that many runs back from
the current run.
"""
if not run:
run = len(self._stdout)
elif run < 0:
run = len(self._stdout) + run
run = run - 1
return self._stdout[run]
def subdir(self, *subdirs):
"""Create new subdirectories under the temporary working
directory, one for each argument. An argument may be a list,
in which case the list elements are concatenated using the
os.path.join() method. Subdirectories multiple levels deep
must be created using a separate argument for each level:
test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
Returns the number of subdirectories actually created.
"""
count = 0
for sub in subdirs:
if sub is None:
continue
if is_List(sub):
sub = apply(os.path.join, tuple(sub))
new = os.path.join(self.workdir, sub)
try:
os.mkdir(new)
except OSError:
pass
else:
count = count + 1
return count
def symlink(self, target, link):
"""Creates a symlink to the specified target.
The link name may be a list, in which case the elements are
concatenated with the os.path.join() method. The link is
assumed to be under the temporary working directory unless it
is an absolute path name. The target is *not* assumed to be
under the temporary working directory.
"""
link = self.canonicalize(link)
os.symlink(target, link)
def tempdir(self, path=None):
"""Creates a temporary directory.
A unique directory name is generated if no path name is specified.
The directory is created, and will be removed when the TestCmd
object is destroyed.
"""
if path is None:
try:
path = tempfile.mktemp(prefix=tempfile.template)
except TypeError:
path = tempfile.mktemp()
os.mkdir(path)
# Symlinks in the path will report things
# differently from os.getcwd(), so chdir there
# and back to fetch the canonical path.
cwd = os.getcwd()
try:
os.chdir(path)
path = os.getcwd()
finally:
os.chdir(cwd)
# Uppercase the drive letter since the case of drive
# letters is pretty much random on win32:
drive,rest = os.path.splitdrive(path)
if drive:
path = string.upper(drive) + rest
#
self._dirlist.append(path)
global _Cleanup
try:
_Cleanup.index(self)
except ValueError:
_Cleanup.append(self)
return path
def touch(self, path, mtime=None):
"""Updates the modification time on the specified file or
directory path name. The default is to update to the
current time if no explicit modification time is specified.
"""
path = self.canonicalize(path)
atime = os.path.getatime(path)
if mtime is None:
mtime = time.time()
os.utime(path, (atime, mtime))
def unlink(self, file):
"""Unlinks the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name.
"""
file = self.canonicalize(file)
os.unlink(file)
def verbose_set(self, verbose):
"""Set the verbose level.
"""
self.verbose = verbose
def where_is(self, file, path=None, pathext=None):
"""Find an executable file.
"""
if is_List(file):
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = where_is(file, path, pathext)
return file
def workdir_set(self, path):
"""Creates a temporary working directory with the specified
path name. If the path is a null string (''), a unique
directory name is created.
"""
if (path != None):
if path == '':
path = None
path = self.tempdir(path)
self.workdir = path
def workpath(self, *args):
"""Returns the absolute path name to a subdirectory or file
within the current temporary working directory. Concatenates
the temporary working directory name with the specified
arguments using the os.path.join() method.
"""
return apply(os.path.join, (self.workdir,) + tuple(args))
def readable(self, top, read=1):
"""Make the specified directory tree readable (read == 1)
or not (read == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file readability.
"""
if sys.platform == 'win32':
return
if read:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IREAD))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IREAD))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif read:
# It's a directory and we're trying to turn on read
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree. Because os.path.walk() is top-down, we'll enable
# read permission on any directories that have it disabled
# before os.path.walk() tries to list their contents.
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
# It's a directory and we're trying to turn off read
# permission, which means we have to chmod the directoreis
# in the tree bottom-up, lest disabling read permission from
# the top down get in the way of being able to get at lower
# parts of the tree. But os.path.walk() visits things top
# down, so we just use an object to collect a list of all
# of the entries in the tree, reverse the list, and then
# chmod the reversed (bottom-up) list.
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def writable(self, top, write=1):
"""Make the specified directory tree writable (write == 1)
or not (write == None).
"""
if sys.platform == 'win32':
if write:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IWRITE)
except OSError: pass
else:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IREAD)
except OSError: pass
else:
if write:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0200))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0200))
if os.path.isfile(top):
do_chmod(top)
else:
col = Collector(top)
os.path.walk(top, col, None)
for d in col.entries: do_chmod(d)
def executable(self, top, execute=1):
"""Make the specified directory tree executable (execute == 1)
or not (execute == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file executability.
"""
if sys.platform == 'win32':
return
if execute:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IEXEC))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IEXEC))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif execute:
# It's a directory and we're trying to turn on execute
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree. Because os.path.walk() is top-down, we'll enable
# execute permission on any directories that have it disabled
# before os.path.walk() tries to list their contents.
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
# It's a directory and we're trying to turn off execute
# permission, which means we have to chmod the directories
# in the tree bottom-up, lest disabling execute permission from
# the top down get in the way of being able to get at lower
# parts of the tree. But os.path.walk() visits things top
# down, so we just use an object to collect a list of all
# of the entries in the tree, reverse the list, and then
# chmod the reversed (bottom-up) list.
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def write(self, file, content, mode = 'wb'):
"""Writes the specified content text (second argument) to the
specified file name (first argument). The file name may be
a list, in which case the elements are concatenated with the
os.path.join() method. The file is created under the temporary
working directory. Any subdirectories in the path must already
exist. The I/O mode for the file may be specified; it must
begin with a 'w'. The default is 'wb' (binary write).
"""
file = self.canonicalize(file)
if mode[0] != 'w':
raise ValueError, "mode must begin with 'w'"
with open(file, mode) as f:
f.write(content)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
CubicERP/geraldo | refs/heads/master | site/newsite/django_1_0/django/contrib/webdesign/lorem_ipsum.py | 439 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [u' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = u', '.join(sections)
# Convert to sentence case and add end punctuation.
return u'%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return u' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return u' '.join(word_list)
|
drpngx/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py | 10 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.linalg import linear_operator_circulant
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
_to_complex = linear_operator_circulant._to_complex
class LinearOperatorCirculantBaseTest(object):
"""Common class for circulant tests."""
@contextlib.contextmanager
def test_session(self, *args, **kwargs):
with test.TestCase.test_session(self, *args, **kwargs) as sess:
with spectral_ops_test_util.fft_kernel_label_map():
yield sess
def _shape_to_spectrum_shape(self, shape):
# If spectrum.shape = batch_shape + [N],
# this creates an operator of shape batch_shape + [N, N]
return shape[:-1]
def _spectrum_to_circulant_1d(self, spectrum, shape, dtype):
"""Creates a circulant matrix from a spectrum.
Intentionally done in an explicit yet inefficient way. This provides a
cross check to the main code that uses fancy reshapes.
Args:
spectrum: Float or complex `Tensor`.
shape: Python list. Desired shape of returned matrix.
dtype: Type to cast the returned matrix to.
Returns:
Circulant (batch) matrix of desired `dtype`.
"""
spectrum = _to_complex(spectrum)
spectrum_shape = self._shape_to_spectrum_shape(shape)
domain_dimension = spectrum_shape[-1]
if not domain_dimension:
return array_ops.zeros(shape, dtype)
# Explicitly compute the action of spectrum on basis vectors.
matrix_rows = []
for m in range(domain_dimension):
x = np.zeros([domain_dimension])
# x is a basis vector.
x[m] = 1.0
fft_x = math_ops.fft(x.astype(np.complex64))
h_convolve_x = math_ops.ifft(spectrum * fft_x)
matrix_rows.append(h_convolve_x)
matrix = array_ops.stack(matrix_rows, axis=-1)
return math_ops.cast(matrix, dtype)
class LinearOperatorCirculantTestSelfAdjointOperator(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when operator is self-adjoint.
Real spectrum <==> Self adjoint operator.
Note that when the spectrum is real, the operator may still be complex.
"""
@property
def _dtypes_to_test(self):
# This operator will always be complex because, although the spectrum is
# real, the matrix will not be real.
return [dtypes.complex64]
def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = build_info.shape
# For this test class, we are creating real spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
#
# spectrum is bounded away from zero.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
# If dtype is complex, cast spectrum to complex. The imaginary part will be
# zero, so the operator will still be self-adjoint.
spectrum = math_ops.cast(spectrum, dtype)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(
spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum, is_self_adjoint=True, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.test_session():
spectrum = math_ops.cast([1., 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(0, imag_matrix.eval(), rtol=0, atol=eps * 3)
class LinearOperatorCirculantTestHermitianSpectrum(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when the spectrum is Hermitian.
Hermitian spectrum <==> Real valued operator. We test both real and complex
dtypes here though. So in some cases the matrix will be complex but with
zero imaginary part.
"""
@property
def _dtypes_to_test(self):
return [dtypes.float32, dtypes.complex64]
def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = build_info.shape
# For this test class, we are creating Hermitian spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
#
# pre_spectrum is bounded away from zero.
pre_spectrum = linear_operator_test_util.random_uniform(
shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
pre_spectrum_c = _to_complex(pre_spectrum)
# Real{IFFT[pre_spectrum]}
# = IFFT[EvenPartOf[pre_spectrum]]
# is the IFFT of something that is also bounded away from zero.
# Therefore, FFT[pre_h] would be a well-conditioned spectrum.
pre_h = math_ops.ifft(pre_spectrum_c)
# A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
# So we will make spectrum = FFT[h], for real valued h.
h = math_ops.real(pre_h)
h_c = _to_complex(h)
spectrum = math_ops.fft(h_c)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(
spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.test_session():
spectrum = math_ops.cast([1., 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(0, imag_matrix.eval(), rtol=0, atol=eps * 3)
class LinearOperatorCirculantTestNonHermitianSpectrum(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when the spectrum is not Hermitian.
Non-Hermitian spectrum <==> Complex valued operator.
We test only complex dtypes here.
"""
@property
def _dtypes_to_test(self):
return [dtypes.complex64]
def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = build_info.shape
# Will be well conditioned enough to get accurate solves.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape),
dtype=dtypes.complex64,
minval=1.,
maxval=2.)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(
spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.test_session():
spectrum = math_ops.cast([1., 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(0, imag_matrix.eval(), rtol=0, atol=eps * 3)
def test_simple_positive_real_spectrum_gives_self_adjoint_pos_def_oper(self):
with self.test_session() as sess:
spectrum = math_ops.cast([6., 4, 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix, matrix_h = sess.run(
[operator.to_dense(),
linalg.adjoint(operator.to_dense())])
self.assertAllClose(matrix, matrix_h)
operator.assert_positive_definite().run() # Should not fail
operator.assert_self_adjoint().run() # Should not fail
def test_defining_operator_using_real_convolution_kernel(self):
with self.test_session():
convolution_kernel = [1., 2., 1.]
spectrum = math_ops.fft(
math_ops.cast(convolution_kernel, dtypes.complex64))
# spectrum is shape [3] ==> operator is shape [3, 3]
# spectrum is Hermitian ==> operator is real.
operator = linalg.LinearOperatorCirculant(spectrum)
# Allow for complex output so we can make sure it has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix = operator.to_dense().eval()
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
def test_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.test_session():
# Make spectrum the FFT of a real convolution kernel h. This ensures that
# spectrum is Hermitian.
h = linear_operator_test_util.random_normal(shape=(3, 4))
spectrum = math_ops.fft(math_ops.cast(h, dtypes.complex64))
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, imag_matrix.eval(), rtol=0, atol=eps * 3 * 4)
def test_convolution_kernel_same_as_first_row_of_to_dense(self):
spectrum = [[3., 2., 1.], [2., 1.5, 1.]]
with self.test_session():
operator = linalg.LinearOperatorCirculant(spectrum)
h = operator.convolution_kernel()
c = operator.to_dense()
self.assertAllEqual((2, 3), h.get_shape())
self.assertAllEqual((2, 3, 3), c.get_shape())
self.assertAllClose(h.eval(), c.eval()[:, :, 0])
def test_assert_non_singular_fails_for_singular_operator(self):
spectrum = math_ops.cast([0, 4, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.test_session():
with self.assertRaisesOpError("Singular operator"):
operator.assert_non_singular().run()
def test_assert_non_singular_does_not_fail_for_non_singular_operator(self):
spectrum = math_ops.cast([-3j, 4, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.test_session():
operator.assert_non_singular().run() # Should not fail
def test_assert_positive_definite_fails_for_non_positive_definite(self):
spectrum = math_ops.cast([6., 4, 2j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.test_session():
with self.assertRaisesOpError("Not positive definite"):
operator.assert_positive_definite().run()
def test_assert_positive_definite_does_not_fail_when_pos_def(self):
spectrum = math_ops.cast([6., 4, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.test_session():
operator.assert_positive_definite().run() # Should not fail
def test_real_spectrum_and_not_self_adjoint_hint_raises(self):
spectrum = [1., 2.]
with self.assertRaisesRegexp(ValueError, "real.*always.*self-adjoint"):
linalg.LinearOperatorCirculant(spectrum, is_self_adjoint=False)
def test_real_spectrum_auto_sets_is_self_adjoint_to_true(self):
spectrum = [1., 2.]
operator = linalg.LinearOperatorCirculant(spectrum)
self.assertTrue(operator.is_self_adjoint)
class LinearOperatorCirculant2DBaseTest(object):
"""Common class for 2D circulant tests."""
@contextlib.contextmanager
def test_session(self, *args, **kwargs):
with test.TestCase.test_session(self, *args, **kwargs) as sess:
with spectral_ops_test_util.fft_kernel_label_map():
yield sess
@property
def _operator_build_infos(self):
build_info = linear_operator_test_util.OperatorBuildInfo
# non-batch operators (n, n) and batch operators.
return [
build_info((0, 0)),
build_info((1, 1)),
build_info((1, 6, 6)),
build_info((3, 4, 4)),
build_info((2, 1, 3, 3))
]
def _shape_to_spectrum_shape(self, shape):
"""Get a spectrum shape that will make an operator of desired shape."""
# This 2D block circulant operator takes a spectrum of shape
# batch_shape + [N0, N1],
# and creates and operator of shape
# batch_shape + [N0*N1, N0*N1]
if shape == (0, 0):
return (0, 0)
elif shape == (1, 1):
return (1, 1)
elif shape == (1, 6, 6):
return (1, 2, 3)
elif shape == (3, 4, 4):
return (3, 2, 2)
elif shape == (2, 1, 3, 3):
return (2, 1, 3, 1)
else:
raise ValueError("Unhandled shape: %s" % shape)
def _spectrum_to_circulant_2d(self, spectrum, shape, dtype):
"""Creates a block circulant matrix from a spectrum.
Intentionally done in an explicit yet inefficient way. This provides a
cross check to the main code that uses fancy reshapes.
Args:
spectrum: Float or complex `Tensor`.
shape: Python list. Desired shape of returned matrix.
dtype: Type to cast the returned matrix to.
Returns:
Block circulant (batch) matrix of desired `dtype`.
"""
spectrum = _to_complex(spectrum)
spectrum_shape = self._shape_to_spectrum_shape(shape)
domain_dimension = spectrum_shape[-1]
if not domain_dimension:
return array_ops.zeros(shape, dtype)
block_shape = spectrum_shape[-2:]
# Explicitly compute the action of spectrum on basis vectors.
matrix_rows = []
for n0 in range(block_shape[0]):
for n1 in range(block_shape[1]):
x = np.zeros(block_shape)
# x is a basis vector.
x[n0, n1] = 1.0
fft_x = math_ops.fft2d(x.astype(np.complex64))
h_convolve_x = math_ops.ifft2d(spectrum * fft_x)
# We want the flat version of the action of the operator on a basis
# vector, not the block version.
h_convolve_x = array_ops.reshape(h_convolve_x, shape[:-1])
matrix_rows.append(h_convolve_x)
matrix = array_ops.stack(matrix_rows, axis=-1)
return math_ops.cast(matrix, dtype)
class LinearOperatorCirculant2DTestHermitianSpectrum(
LinearOperatorCirculant2DBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant2D when the spectrum is Hermitian.
Hermitian spectrum <==> Real valued operator. We test both real and complex
dtypes here though. So in some cases the matrix will be complex but with
zero imaginary part.
"""
@property
def _dtypes_to_test(self):
return [dtypes.float32, dtypes.complex64]
def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = build_info.shape
# For this test class, we are creating Hermitian spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
#
# pre_spectrum is bounded away from zero.
pre_spectrum = linear_operator_test_util.random_uniform(
shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
pre_spectrum_c = _to_complex(pre_spectrum)
# Real{IFFT[pre_spectrum]}
# = IFFT[EvenPartOf[pre_spectrum]]
# is the IFFT of something that is also bounded away from zero.
# Therefore, FFT[pre_h] would be a well-conditioned spectrum.
pre_h = math_ops.ifft2d(pre_spectrum_c)
# A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
# So we will make spectrum = FFT[h], for real valued h.
h = math_ops.real(pre_h)
h_c = _to_complex(h)
spectrum = math_ops.fft2d(h_c)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(
spectrum, shape=None)
operator = linalg.LinearOperatorCirculant2D(
lin_op_spectrum, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)
return operator, mat
class LinearOperatorCirculant2DTestNonHermitianSpectrum(
LinearOperatorCirculant2DBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when the spectrum is not Hermitian.
Non-Hermitian spectrum <==> Complex valued operator.
We test only complex dtypes here.
"""
@property
def _dtypes_to_test(self):
return [dtypes.complex64]
def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = build_info.shape
# Will be well conditioned enough to get accurate solves.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape),
dtype=dtype,
minval=1.,
maxval=2.)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(
spectrum, shape=None)
operator = linalg.LinearOperatorCirculant2D(
lin_op_spectrum, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)
return operator, mat
def test_real_hermitian_spectrum_gives_real_symmetric_operator(self):
with self.test_session() as sess:
# This is a real and hermitian spectrum.
spectrum = [[1., 2., 2.], [3., 4., 4.], [3., 4., 4.]]
operator = linalg.LinearOperatorCirculant(spectrum)
matrix_tensor = operator.to_dense()
self.assertEqual(matrix_tensor.dtype,
linear_operator_circulant._DTYPE_COMPLEX)
matrix_t = array_ops.matrix_transpose(matrix_tensor)
imag_matrix = math_ops.imag(matrix_tensor)
matrix, matrix_transpose, imag_matrix = sess.run(
[matrix_tensor, matrix_t, imag_matrix])
np.testing.assert_allclose(0, imag_matrix, atol=1e-6)
self.assertAllClose(matrix, matrix_transpose, atol=0)
def test_real_spectrum_gives_self_adjoint_operator(self):
with self.test_session() as sess:
# This is a real and hermitian spectrum.
spectrum = linear_operator_test_util.random_normal(
shape=(3, 3), dtype=dtypes.float32)
operator = linalg.LinearOperatorCirculant2D(spectrum)
matrix_tensor = operator.to_dense()
self.assertEqual(matrix_tensor.dtype,
linear_operator_circulant._DTYPE_COMPLEX)
matrix_h = linalg.adjoint(matrix_tensor)
matrix, matrix_h = sess.run([matrix_tensor, matrix_h])
self.assertAllClose(matrix, matrix_h, atol=0)
def test_assert_non_singular_fails_for_singular_operator(self):
spectrum = math_ops.cast([[0, 4], [2j + 2, 3.]], dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.test_session():
with self.assertRaisesOpError("Singular operator"):
operator.assert_non_singular().run()
def test_assert_non_singular_does_not_fail_for_non_singular_operator(self):
spectrum = math_ops.cast([[-3j, 4], [2j + 2, 3.]], dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.test_session():
operator.assert_non_singular().run() # Should not fail
def test_assert_positive_definite_fails_for_non_positive_definite(self):
spectrum = math_ops.cast([[6., 4], [2j, 3.]], dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.test_session():
with self.assertRaisesOpError("Not positive definite"):
operator.assert_positive_definite().run()
def test_assert_positive_definite_does_not_fail_when_pos_def(self):
spectrum = math_ops.cast([[6., 4], [2j + 2, 3.]], dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.test_session():
operator.assert_positive_definite().run() # Should not fail
def test_real_spectrum_and_not_self_adjoint_hint_raises(self):
spectrum = [[1., 2.], [3., 4]]
with self.assertRaisesRegexp(ValueError, "real.*always.*self-adjoint"):
linalg.LinearOperatorCirculant2D(spectrum, is_self_adjoint=False)
def test_real_spectrum_auto_sets_is_self_adjoint_to_true(self):
spectrum = [[1., 2.], [3., 4]]
operator = linalg.LinearOperatorCirculant2D(spectrum)
self.assertTrue(operator.is_self_adjoint)
def test_invalid_dtype_raises(self):
spectrum = array_ops.constant(rng.rand(2, 2, 2))
with self.assertRaisesRegexp(TypeError, "must have dtype"):
linalg.LinearOperatorCirculant2D(spectrum)
def test_invalid_rank_raises(self):
spectrum = array_ops.constant(np.float32(rng.rand(2)))
with self.assertRaisesRegexp(ValueError, "must have at least 2 dimensions"):
linalg.LinearOperatorCirculant2D(spectrum)
class LinearOperatorCirculant3DTest(test.TestCase):
"""Simple test of the 3D case. See also the 1D and 2D tests."""
@contextlib.contextmanager
def test_session(self, *args, **kwargs):
with test.TestCase.test_session(self, *args, **kwargs) as sess:
with spectral_ops_test_util.fft_kernel_label_map():
yield sess
def test_real_spectrum_gives_self_adjoint_operator(self):
with self.test_session() as sess:
# This is a real and hermitian spectrum.
spectrum = linear_operator_test_util.random_normal(
shape=(2, 2, 3, 5), dtype=dtypes.float32)
operator = linalg.LinearOperatorCirculant3D(spectrum)
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), operator.shape)
matrix_tensor = operator.to_dense()
self.assertEqual(matrix_tensor.dtype,
linear_operator_circulant._DTYPE_COMPLEX)
matrix_h = linalg.adjoint(matrix_tensor)
matrix, matrix_h = sess.run([matrix_tensor, matrix_h])
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), matrix.shape)
self.assertAllClose(matrix, matrix_h)
def test_defining_operator_using_real_convolution_kernel(self):
with self.test_session():
convolution_kernel = linear_operator_test_util.random_normal(
shape=(2, 2, 3, 5), dtype=dtypes.float32)
# Convolution kernel is real ==> spectrum is Hermitian.
spectrum = math_ops.fft3d(
math_ops.cast(convolution_kernel, dtypes.complex64))
# spectrum is Hermitian ==> operator is real.
operator = linalg.LinearOperatorCirculant3D(spectrum)
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), operator.shape)
# Allow for complex output so we can make sure it has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix = operator.to_dense().eval()
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), matrix.shape)
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
def test_defining_spd_operator_by_taking_real_part(self):
with self.test_session() as sess:
# S is real and positive.
s = linear_operator_test_util.random_uniform(
shape=(10, 2, 3, 4), dtype=dtypes.float32, minval=1., maxval=2.)
# Let S = S1 + S2, the Hermitian and anti-hermitian parts.
# S1 = 0.5 * (S + S^H), S2 = 0.5 * (S - S^H),
# where ^H is the Hermitian transpose of the function:
# f(n0, n1, n2)^H := ComplexConjugate[f(N0-n0, N1-n1, N2-n2)].
# We want to isolate S1, since
# S1 is Hermitian by construction
# S1 is real since S is
# S1 is positive since it is the sum of two positive kernels
# IDFT[S] = IDFT[S1] + IDFT[S2]
# = H1 + H2
# where H1 is real since it is Hermitian,
# and H2 is imaginary since it is anti-Hermitian.
ifft_s = math_ops.ifft3d(math_ops.cast(s, dtypes.complex64))
# Throw away H2, keep H1.
real_ifft_s = math_ops.real(ifft_s)
# This is the perfect spectrum!
# spectrum = DFT[H1]
# = S1,
fft_real_ifft_s = math_ops.fft3d(
math_ops.cast(real_ifft_s, dtypes.complex64))
# S1 is Hermitian ==> operator is real.
# S1 is real ==> operator is self-adjoint.
# S1 is positive ==> operator is positive-definite.
operator = linalg.LinearOperatorCirculant3D(fft_real_ifft_s)
# Allow for complex output so we can check operator has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix, matrix_t = sess.run([
operator.to_dense(),
array_ops.matrix_transpose(operator.to_dense())
])
operator.assert_positive_definite().run() # Should not fail.
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
self.assertAllClose(matrix, matrix_t)
# Just to test the theory, get S2 as well.
# This should create an imaginary operator.
# S2 is anti-Hermitian ==> operator is imaginary.
# S2 is real ==> operator is self-adjoint.
imag_ifft_s = math_ops.imag(ifft_s)
fft_imag_ifft_s = math_ops.fft3d(
1j * math_ops.cast(imag_ifft_s, dtypes.complex64))
operator_imag = linalg.LinearOperatorCirculant3D(fft_imag_ifft_s)
matrix, matrix_h = sess.run([
operator_imag.to_dense(),
array_ops.matrix_transpose(math_ops.conj(operator_imag.to_dense()))
])
self.assertAllClose(matrix, matrix_h)
np.testing.assert_allclose(0, np.real(matrix), atol=1e-7)
if __name__ == "__main__":
test.main()
|
Elico-Corp/odoo_OCB | refs/heads/9.0 | addons/event_sale/models/sale_order.py | 26 | # -*- coding: utf-8 -*-
from openerp import api
from openerp.osv import fields, osv
class sale_order(osv.osv):
_inherit = "sale.order"
def action_confirm(self, cr, uid, ids, context=None):
res = super(sale_order, self).action_confirm(cr, uid, ids, context=context)
for order in self.browse(cr, uid, ids, context=context):
redirect_to_event_registration, so_id = any(line.event_id for line in order.order_line), order.id
order.order_line._update_registrations(confirm=True)
if redirect_to_event_registration:
event_ctx = dict(context, default_sale_order_id=so_id)
return self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'event_sale', 'action_sale_order_event_registration', event_ctx)
else:
return res
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
_columns = {
'event_id': fields.many2one(
'event.event', 'Event',
help="Choose an event and it will automatically create a registration for this event."),
'event_ticket_id': fields.many2one(
'event.event.ticket', 'Event Ticket',
help="Choose an event ticket and it will automatically create a registration for this event ticket."),
# those 2 fields are used for dynamic domains and filled by onchange
# TDE: really necessary ? ...
'event_type_id': fields.related('product_id', 'event_type_id', type='many2one', relation="event.type", string="Event Type", readonly=True),
'event_ok': fields.related('product_id', 'event_ok', string='event_ok', type='boolean', readonly=True),
}
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
res = super(sale_order_line, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)
if line.event_id:
event = self.pool['event.event'].read(cr, uid, line.event_id.id, ['name'], context=context)
res['name'] = '%s: %s' % (res.get('name', ''), event['name'])
return res
@api.onchange('product_id')
def product_id_change_event(self):
if self.product_id.event_ok:
values = dict(event_type_id=self.product_id.event_type_id.id,
event_ok=self.product_id.event_ok)
else:
values = dict(event_type_id=False, event_ok=False)
self.update(values)
@api.multi
def _update_registrations(self, confirm=True, registration_data=None):
""" Create or update registrations linked to a sale order line. A sale
order line has a product_uom_qty attribute that will be the number of
registrations linked to this line. This method update existing registrations
and create new one for missing one. """
Registration = self.env['event.registration']
registrations = Registration.search([('sale_order_line_id', 'in', self.ids)])
for so_line in [l for l in self if l.event_id]:
existing_registrations = registrations.filtered(lambda self: self.sale_order_line_id.id == so_line.id)
if confirm:
existing_registrations.filtered(lambda self: self.state != 'open').confirm_registration()
else:
existing_registrations.filtered(lambda self: self.state == 'cancel').do_draft()
for count in range(int(so_line.product_uom_qty) - len(existing_registrations)):
registration = {}
if registration_data:
registration = registration_data.pop()
# TDE CHECK: auto confirmation
registration['sale_order_line_id'] = so_line
self.env['event.registration'].with_context(registration_force_draft=True).create(
Registration._prepare_attendee_values(registration))
return True
def onchange_event_ticket_id(self, cr, uid, ids, event_ticket_id=False, context=None):
price = event_ticket_id and self.pool["event.event.ticket"].browse(cr, uid, event_ticket_id, context=context).price or False
return {'value': {'price_unit': price}}
|
imoseyon/leanKernel-d2usc-deprecated | refs/heads/lk-tw | external/webkit/Tools/Scripts/webkitpy/tool/comments.py | 148 | # Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# A tool for automating dealing with bugzilla, posting patches, committing
# patches, etc.
from webkitpy.common.config import urls
def bug_comment_from_svn_revision(svn_revision):
return "Committed r%s: <%s>" % (svn_revision, urls.view_revision_url(svn_revision))
def bug_comment_from_commit_text(scm, commit_text):
svn_revision = scm.svn_revision_from_commit_text(commit_text)
return bug_comment_from_svn_revision(svn_revision)
|
krisys/SpojBot | refs/heads/master | src/spojbot/bot/views.py | 1 | from django.contrib.auth.decorators import login_required
from django.shortcuts import *
from models import *
from forms import SpojUserForm, CodeGroupForm
from django.core.mail import send_mail
from django.utils import simplejson as json
from django.views.decorators.csrf import csrf_exempt
class Story(object):
def __init__(self, user, submission=None):
self.user = user
self.submissions = []
if submission:
self.submissions.append(submission)
self.count = len(self.submissions)
def duration(self):
if len(self.submissions) == 1:
return self.submissions[0].timestamp.strftime('%d %b %Y')
else:
start = self.submissions[-1].timestamp.strftime('%d %b %Y')
end = self.submissions[0].timestamp.strftime('%d %b %Y')
return start + ' - ' + end
def format_feed(feed):
story = []
for item in feed:
if story:
if item.user == story[-1].user:
story[-1].submissions.append(item)
story[-1].count = len(story[-1].submissions)
else:
story.append(Story(user=item.user, submission=item))
else:
story.append(Story(user=item.user, submission=item))
return story
def index(request, template_name='index.html'):
if request.user.is_authenticated():
return HttpResponseRedirect('/spoj')
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
def update_jobs(request):
belongs_to = GroupMember.objects.filter(user_email=request.user.email,
user=None)
for group in belongs_to:
group.user = request.user
group.receive_emails = True
group.save()
belongs_to = GroupMember.objects.filter(user=request.user)
if not belongs_to:
group = CodeGroup.objects.create(name='My group', notifications=1)
GroupMember.objects.create(user_email=request.user.email,
user=request.user, group=group, is_owner=True, receive_emails=True)
@login_required
def spoj(request, template_name='spoj.html'):
update_jobs(request)
groups = [x.group for x in GroupMember.objects.filter(user=request.user)]
users = [request.user]
users += [x.user for x in GroupMember.objects.filter(group__in=groups)]
feed = Submission.objects.filter(user__in=users).order_by('-timestamp')[:300]
feed = format_feed(feed)
suggested_problems = ProblemSuggestion.objects.filter(user=request.user)
solved_by_me = SpojProblem.objects.filter(submission__user=request.user)
friend_suggestions = UserSuggestion.objects.filter(group__in=groups)
friend_suggestions = friend_suggestions.exclude(problem__in=solved_by_me)
todo = suggested_problems.exclude(problem__in=solved_by_me)
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
@login_required
def config(request, template_name='settings.html'):
user, created = SpojUser.objects.get_or_create(user=request.user)
form = SpojUserForm(request.POST or None, instance=user)
if form.is_valid():
form.save()
user.fetch_spoj_data()
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
@login_required
def create_group(request):
group = CodeGroup.objects.create(name=request.POST['group'])
GroupMember.objects.create(user_email=request.user.email,
user=request.user, group=group, is_owner=True, receive_emails=True)
return HttpResponseRedirect("/group/%d/" % (group.id))
def user_belongs_to_group(user, group_members):
for member in group_members:
if member.user == user:
return True
return False
@login_required
def view_group(request, id, template_name="group.html"):
try:
group = CodeGroup.objects.get(id=id)
group_members = GroupMember.objects.filter(group=group)
group_members = group_members.order_by('user__spojuser__rank')
if request.user.is_superuser:
pass
elif not user_belongs_to_group(request.user, group_members):
return HttpResponseRedirect("/")
except:
return HttpResponseRedirect("/")
groups = [x.group for x in GroupMember.objects.filter(user=request.user)]
group_users = []
for member in group_members:
if member.is_owner and member.user == request.user:
is_owner = True
group_users.append(member.user)
feed = Submission.objects.filter(user__in=group_users).order_by(
'-timestamp')[:300]
feed = format_feed(feed)
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
def get_or_none(model, **kwargs):
try:
return model.objects.get(**kwargs)
except model.DoesNotExist:
return None
def validateEmail(email):
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
try:
validate_email(email)
return True
except ValidationError:
return False
@login_required
def view_group_members(request, id, template_name="group_members.html"):
try:
group = CodeGroup.objects.get(id=id)
group_members = GroupMember.objects.filter(group=group)
if not user_belongs_to_group(request.user, group_members):
return HttpResponseRedirect("/")
current_user = GroupMember.objects.get(user=request.user, group=group)
if not current_user.is_owner:
return HttpResponseRedirect("/")
if request.POST:
email = request.POST['email']
if validateEmail(email):
user = get_or_none(User, email=email)
g = GroupMember.objects.create(user_email=email,
user=user, group=group)
group_members = GroupMember.objects.filter(group=group)
if not user:
try:
subject = 'I just added you to my SpojBot Group!'
content = 'Check this out.. This site emails one problem everyday to all members of the group. http://www.spojbot.com '
send_mail(subject, content, '%s <[email protected]>' % (request.user.get_full_name()), [email], fail_silently=False)
except:
pass
else:
g.receive_emails = True
g.save()
form = CodeGroupForm(request.POST or None, instance=group)
if form.is_valid():
form.save()
except:
return HttpResponseRedirect("/")
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
@login_required
def delete_member(request, id, template_name="delete_member.html"):
member = GroupMember.objects.get(id=id)
group = member.group
current_user = GroupMember.objects.get(user=request.user,
group=group)
if not current_user.is_owner:
return HttpResponseRedirect("/")
if request.POST:
member.delete()
return HttpResponseRedirect("/group/%d/" % group.id)
return render_to_response(template_name, locals(),
context_instance=RequestContext(request))
@login_required
@csrf_exempt
def delete_group(request):
response = {'status': 'Error'}
group = CodeGroup.objects.get(id=request.POST['id'])
current_user = GroupMember.objects.get(user=request.user, group=group)
if current_user.is_owner:
group.delete()
response['status'] = 'OK'
return HttpResponse(json.dumps(response))
return HttpResponse(json.dumps(response))
@login_required
@csrf_exempt
def leave_group(request):
response = {'status': 'Error'}
try:
group = CodeGroup.objects.get(id=request.POST['id'])
current_user = GroupMember.objects.get(user=request.user, group=group)
current_user.delete()
response['status'] = 'OK'
except:
pass
return HttpResponse(json.dumps(response))
@login_required
def suggest_problem(request):
response = {'status': 'Error'}
try:
group = CodeGroup.objects.get(id=request.GET.get('id'))
current_user = GroupMember.objects.get(user=request.user, group=group)
if current_user:
# belongs to this group
problem = request.GET.get('problem')
if '/' in problem:
return HttpResponse(json.dumps(response))
problem, created = SpojProblem.objects.get_or_create(
problem=problem)
if not created:
problem.source = 'user_suggestion'
problem.save()
try:
UserSuggestion.objects.get(group=group, problem=problem)
except:
UserSuggestion.objects.get_or_create(group=group,
problem=problem, user=request.user)
response['status'] = 'OK'
return HttpResponse(json.dumps(response))
except:
pass
return HttpResponse(json.dumps(response))
|
Avinash-Raj/appengine-django-skeleton | refs/heads/master | lib/django/conf/locale/es/formats.py | 504 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y \a \l\a\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
# '31/12/2009', '31/12/09'
'%d/%m/%Y', '%d/%m/%y'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S',
'%d/%m/%Y %H:%M:%S.%f',
'%d/%m/%Y %H:%M',
'%d/%m/%y %H:%M:%S',
'%d/%m/%y %H:%M:%S.%f',
'%d/%m/%y %H:%M',
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
robbiet480/home-assistant | refs/heads/dev | tests/components/ffmpeg/test_sensor.py | 15 | """The tests for Home Assistant ffmpeg binary sensor."""
from unittest.mock import patch
from homeassistant.setup import setup_component
from tests.common import assert_setup_component, get_test_home_assistant, mock_coro
class TestFFmpegNoiseSetup:
"""Test class for ffmpeg."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.config = {
"binary_sensor": {"platform": "ffmpeg_noise", "input": "testinputvideo"}
}
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Set up ffmpeg component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config)
self.hass.block_till_done()
assert self.hass.data["ffmpeg"].binary == "ffmpeg"
assert self.hass.states.get("binary_sensor.ffmpeg_noise") is not None
@patch("haffmpeg.sensor.SensorNoise.open_sensor", return_value=mock_coro())
def test_setup_component_start(self, mock_start):
"""Set up ffmpeg component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config)
self.hass.block_till_done()
assert self.hass.data["ffmpeg"].binary == "ffmpeg"
assert self.hass.states.get("binary_sensor.ffmpeg_noise") is not None
self.hass.start()
assert mock_start.called
entity = self.hass.states.get("binary_sensor.ffmpeg_noise")
assert entity.state == "unavailable"
@patch("haffmpeg.sensor.SensorNoise")
def test_setup_component_start_callback(self, mock_ffmpeg):
"""Set up ffmpeg component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config)
self.hass.block_till_done()
assert self.hass.data["ffmpeg"].binary == "ffmpeg"
assert self.hass.states.get("binary_sensor.ffmpeg_noise") is not None
self.hass.start()
entity = self.hass.states.get("binary_sensor.ffmpeg_noise")
assert entity.state == "off"
self.hass.add_job(mock_ffmpeg.call_args[0][2], True)
self.hass.block_till_done()
entity = self.hass.states.get("binary_sensor.ffmpeg_noise")
assert entity.state == "on"
class TestFFmpegMotionSetup:
"""Test class for ffmpeg."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.config = {
"binary_sensor": {"platform": "ffmpeg_motion", "input": "testinputvideo"}
}
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Set up ffmpeg component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config)
self.hass.block_till_done()
assert self.hass.data["ffmpeg"].binary == "ffmpeg"
assert self.hass.states.get("binary_sensor.ffmpeg_motion") is not None
@patch("haffmpeg.sensor.SensorMotion.open_sensor", return_value=mock_coro())
def test_setup_component_start(self, mock_start):
"""Set up ffmpeg component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config)
self.hass.block_till_done()
assert self.hass.data["ffmpeg"].binary == "ffmpeg"
assert self.hass.states.get("binary_sensor.ffmpeg_motion") is not None
self.hass.start()
assert mock_start.called
entity = self.hass.states.get("binary_sensor.ffmpeg_motion")
assert entity.state == "unavailable"
@patch("haffmpeg.sensor.SensorMotion")
def test_setup_component_start_callback(self, mock_ffmpeg):
"""Set up ffmpeg component."""
with assert_setup_component(1, "binary_sensor"):
setup_component(self.hass, "binary_sensor", self.config)
self.hass.block_till_done()
assert self.hass.data["ffmpeg"].binary == "ffmpeg"
assert self.hass.states.get("binary_sensor.ffmpeg_motion") is not None
self.hass.start()
entity = self.hass.states.get("binary_sensor.ffmpeg_motion")
assert entity.state == "off"
self.hass.add_job(mock_ffmpeg.call_args[0][2], True)
self.hass.block_till_done()
entity = self.hass.states.get("binary_sensor.ffmpeg_motion")
assert entity.state == "on"
|
pabloborrego93/edx-platform | refs/heads/master | openedx/core/djangoapps/content/block_structure/apps.py | 39 | """
Configuration for block_structure djangoapp
"""
from django.apps import AppConfig
class BlockStructureConfig(AppConfig):
"""
block_structure django app.
"""
name = u'openedx.core.djangoapps.content.block_structure'
def ready(self):
"""
Define tasks to perform at app loading time
* Connect signal handlers
* Register celery tasks
These happen at import time. Hence the unused imports
"""
from . import signals, tasks # pylint: disable=unused-variable
|
zxteloiv/ShadowVPN | refs/heads/master | tools/gen_foreign_sh.py | 169 | #!/usr/bin/env python3
#
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from ipaddress import ip_network
import sys
print('''#!/bin/sh
tun=tun0
add_or_delete=add
if [ "$1" == "down" ] || [ "$1" == "del" ]; then
add_or_delete=del
fi
''')
for line in sys.stdin:
line = line.strip()
if not line:
continue
elif line.startswith('#'):
continue
subnet = ip_network(line)
print('route $add_or_delete -net %s netmask %s $tun' %
(subnet.network_address, subnet.netmask))
|
SangDeukLee/futsal | refs/heads/develop | public/assets/node_js/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/common.py | 366 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to normalized (and therefore absolute paths).
path = os.path.realpath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
|
natebeacham/saml2 | refs/heads/master | tests/conftest.py | 6 | import os
#TODO: On my system this function seems to be returning an incorrect location
def pytest_funcarg__xmlsec(request):
for path in os.environ["PATH"].split(":"):
fil = os.path.join(path, "xmlsec1")
if os.access(fil,os.X_OK):
return fil
raise Exception("Can't find xmlsec1")
def pytest_funcarg__AVA(request):
return [
{
"surName": ["Jeter"],
"givenName": ["Derek"],
},
{
"surName": ["Howard"],
"givenName": ["Ryan"],
},
{
"surName": ["Suzuki"],
"givenName": ["Ischiro"],
},
{
"surName": ["Hedberg"],
"givenName": ["Roland"],
},
]
|
piotrpawlaczek/suds-jurko | refs/heads/master | suds/mx/typer.py | 9 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides sx typing classes.
"""
from suds import *
from suds.mx import *
from suds.sax import Namespace as NS
from suds.sax.text import Text
class Typer:
"""
Provides XML node typing as either automatic or manual.
@cvar types: A dict of class to xs type mapping.
@type types: dict
"""
types = {
int : ('int', NS.xsdns),
long : ('long', NS.xsdns),
float : ('float', NS.xsdns),
str : ('string', NS.xsdns),
unicode : ('string', NS.xsdns),
Text : ('string', NS.xsdns),
bool : ('boolean', NS.xsdns),
}
@classmethod
def auto(cls, node, value=None):
"""
Automatically set the node's xsi:type attribute based on either I{value}'s
class or the class of the node's text. When I{value} is an unmapped class,
the default type (xs:any) is set.
@param node: An XML node
@type node: L{sax.element.Element}
@param value: An object that is or would be the node's text.
@type value: I{any}
@return: The specified node.
@rtype: L{sax.element.Element}
"""
if value is None:
value = node.getText()
if isinstance(value, Object):
known = cls.known(value)
if known.name is None:
return node
tm = (known.name, known.namespace())
else:
tm = cls.types.get(value.__class__, cls.types.get(str))
cls.manual(node, *tm)
return node
@classmethod
def manual(cls, node, tval, ns=None):
"""
Set the node's xsi:type attribute based on either I{value}'s
class or the class of the node's text. Then adds the referenced
prefix(s) to the node's prefix mapping.
@param node: An XML node
@type node: L{sax.element.Element}
@param tval: The name of the schema type.
@type tval: str
@param ns: The XML namespace of I{tval}.
@type ns: (prefix, uri)
@return: The specified node.
@rtype: L{sax.element.Element}
"""
xta = ':'.join((NS.xsins[0], 'type'))
node.addPrefix(NS.xsins[0], NS.xsins[1])
if ns is None:
node.set(xta, tval)
else:
ns = cls.genprefix(node, ns)
qname = ':'.join((ns[0], tval))
node.set(xta, qname)
node.addPrefix(ns[0], ns[1])
return node
@classmethod
def genprefix(cls, node, ns):
"""
Generate a prefix.
@param node: An XML node on which the prefix will be used.
@type node: L{sax.element.Element}
@param ns: A namespace needing an unique prefix.
@type ns: (prefix, uri)
@return: The I{ns} with a new prefix.
"""
for n in range(1, 1024):
p = 'ns%d' % n
u = node.resolvePrefix(p, default=None)
if u is None or u == ns[1]:
return (p, ns[1])
raise Exception('auto prefix, exhausted')
@classmethod
def known(cls, object):
try:
md = object.__metadata__
known = md.sxtype
return known
except:
pass
|
pkexcellent/luigi | refs/heads/master | luigi/contrib/scalding.py | 22 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import subprocess
from luigi import six
import luigi.configuration
import luigi.contrib.hadoop
import luigi.contrib.hadoop_jar
import luigi.contrib.hdfs
from luigi import LocalTarget
from luigi.task import flatten
logger = logging.getLogger('luigi-interface')
"""
Scalding support for Luigi.
Example configuration section in luigi.cfg::
[scalding]
# scala home directory, which should include a lib subdir with scala jars.
scala-home: /usr/share/scala
# scalding home directory, which should include a lib subdir with
# scalding-*-assembly-* jars as built from the official Twitter build script.
scalding-home: /usr/share/scalding
# provided dependencies, e.g. jars required for compiling but not executing
# scalding jobs. Currently requred jars:
# org.apache.hadoop/hadoop-core/0.20.2
# org.slf4j/slf4j-log4j12/1.6.6
# log4j/log4j/1.2.15
# commons-httpclient/commons-httpclient/3.1
# commons-cli/commons-cli/1.2
# org.apache.zookeeper/zookeeper/3.3.4
scalding-provided: /usr/share/scalding/provided
# additional jars required.
scalding-libjars: /usr/share/scalding/libjars
"""
class ScaldingJobRunner(luigi.contrib.hadoop.JobRunner):
"""
JobRunner for `pyscald` commands. Used to run a ScaldingJobTask.
"""
def __init__(self):
conf = luigi.configuration.get_config()
default = os.environ.get('SCALA_HOME', '/usr/share/scala')
self.scala_home = conf.get('scalding', 'scala-home', default)
default = os.environ.get('SCALDING_HOME', '/usr/share/scalding')
self.scalding_home = conf.get('scalding', 'scalding-home', default)
self.provided_dir = conf.get(
'scalding', 'scalding-provided', os.path.join(default, 'provided'))
self.libjars_dir = conf.get(
'scalding', 'scalding-libjars', os.path.join(default, 'libjars'))
self.tmp_dir = LocalTarget(is_tmp=True)
def _get_jars(self, path):
return [os.path.join(path, j) for j in os.listdir(path)
if j.endswith('.jar')]
def get_scala_jars(self, include_compiler=False):
lib_dir = os.path.join(self.scala_home, 'lib')
jars = [os.path.join(lib_dir, 'scala-library.jar')]
# additional jar for scala 2.10 only
reflect = os.path.join(lib_dir, 'scala-reflect.jar')
if os.path.exists(reflect):
jars.append(reflect)
if include_compiler:
jars.append(os.path.join(lib_dir, 'scala-compiler.jar'))
return jars
def get_scalding_jars(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
return self._get_jars(lib_dir)
def get_scalding_core(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
for j in os.listdir(lib_dir):
if j.startswith('scalding-core-'):
p = os.path.join(lib_dir, j)
logger.debug('Found scalding-core: %s', p)
return p
raise luigi.contrib.hadoop.HadoopJobError('Could not find scalding-core.')
def get_provided_jars(self):
return self._get_jars(self.provided_dir)
def get_libjars(self):
return self._get_jars(self.libjars_dir)
def get_tmp_job_jar(self, source):
job_name = os.path.basename(os.path.splitext(source)[0])
return os.path.join(self.tmp_dir.path, job_name + '.jar')
def get_build_dir(self, source):
build_dir = os.path.join(self.tmp_dir.path, 'build')
return build_dir
def get_job_class(self, source):
# find name of the job class
# usually the one that matches file name or last class that extends Job
job_name = os.path.splitext(os.path.basename(source))[0]
package = None
job_class = None
for l in open(source).readlines():
p = re.search(r'package\s+([^\s\(]+)', l)
if p:
package = p.groups()[0]
p = re.search(r'class\s+([^\s\(]+).*extends\s+.*Job', l)
if p:
job_class = p.groups()[0]
if job_class == job_name:
break
if job_class:
if package:
job_class = package + '.' + job_class
logger.debug('Found scalding job class: %s', job_class)
return job_class
else:
raise luigi.contrib.hadoop.HadoopJobError('Coudl not find scalding job class.')
def build_job_jar(self, job):
job_jar = job.jar()
if job_jar:
if not os.path.exists(job_jar):
logger.error("Can't find jar: %s, full path %s", job_jar, os.path.abspath(job_jar))
raise Exception("job jar does not exist")
if not job.job_class():
logger.error("Undefined job_class()")
raise Exception("Undefined job_class()")
return job_jar
job_src = job.source()
if not job_src:
logger.error("Both source() and jar() undefined")
raise Exception("Both source() and jar() undefined")
if not os.path.exists(job_src):
logger.error("Can't find source: %s, full path %s", job_src, os.path.abspath(job_src))
raise Exception("job source does not exist")
job_src = job.source()
job_jar = self.get_tmp_job_jar(job_src)
build_dir = self.get_build_dir(job_src)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
classpath = ':'.join(filter(None,
self.get_scalding_jars() +
self.get_provided_jars() +
self.get_libjars() +
job.extra_jars()))
scala_cp = ':'.join(self.get_scala_jars(include_compiler=True))
# compile scala source
arglist = ['java', '-cp', scala_cp, 'scala.tools.nsc.Main',
'-classpath', classpath,
'-d', build_dir, job_src]
logger.info('Compiling scala source: %s', ' '.join(arglist))
subprocess.check_call(arglist)
# build job jar file
arglist = ['jar', 'cf', job_jar, '-C', build_dir, '.']
logger.info('Building job jar: %s', ' '.join(arglist))
subprocess.check_call(arglist)
return job_jar
def run_job(self, job):
job_jar = self.build_job_jar(job)
jars = [job_jar] + self.get_libjars() + job.extra_jars()
scalding_core = self.get_scalding_core()
libjars = ','.join(filter(None, jars))
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', scalding_core, '-libjars', libjars]
arglist += ['-D%s' % c for c in job.jobconfs()]
job_class = job.job_class() or self.get_job_class(job.source())
arglist += [job_class, '--hdfs']
# scalding does not parse argument with '=' properly
arglist += ['--name', job.task_id.replace('=', ':')]
(tmp_files, job_args) = luigi.contrib.hadoop_jar.fix_paths(job)
arglist += job_args
env = os.environ.copy()
jars.append(scalding_core)
hadoop_cp = ':'.join(filter(None, jars))
env['HADOOP_CLASSPATH'] = hadoop_cp
logger.info("Submitting Hadoop job: HADOOP_CLASSPATH=%s %s",
hadoop_cp, ' '.join(arglist))
luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, env=env)
for a, b in tmp_files:
a.move(b)
class ScaldingJobTask(luigi.contrib.hadoop.BaseHadoopJobTask):
"""
A job task for Scalding that define a scala source and (optional) main method.
requires() should return a dictionary where the keys are Scalding argument
names and values are sub tasks or lists of subtasks.
For example:
.. code-block:: python
{'input1': A, 'input2': C} => --input1 <Aoutput> --input2 <Coutput>
{'input1': [A, B], 'input2': [C]} => --input1 <Aoutput> <Boutput> --input2 <Coutput>
"""
def relpath(self, current_file, rel_path):
"""
Compute path given current file and relative path.
"""
script_dir = os.path.dirname(os.path.abspath(current_file))
rel_path = os.path.abspath(os.path.join(script_dir, rel_path))
return rel_path
def source(self):
"""
Path to the scala source for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def jar(self):
"""
Path to the jar file for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def extra_jars(self):
"""
Extra jars for building and running this Scalding Job.
"""
return []
def job_class(self):
"""
optional main job class for this Scalding Job.
"""
return None
def job_runner(self):
return ScaldingJobRunner()
def atomic_output(self):
"""
If True, then rewrite output arguments to be temp locations and
atomically move them into place after the job finishes.
"""
return True
def requires(self):
return {}
def job_args(self):
"""
Extra arguments to pass to the Scalding job.
"""
return []
def args(self):
"""
Returns an array of args to pass to the job.
"""
arglist = []
for k, v in six.iteritems(self.requires_hadoop()):
arglist.append('--' + k)
arglist.extend([t.output().path for t in flatten(v)])
arglist.extend(['--output', self.output()])
arglist.extend(self.job_args())
return arglist
|
yohanko88/gem5-DC | refs/heads/master | src/arch/x86/isa/insts/general_purpose/no_operation.py | 91 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop NOP
{
fault "NoFault"
};
def macroop HINT_NOP
{
fault "NoFault"
};
'''
|
ajnirp/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/py/testing/io_/__init__.py | 9480 | #
|
matthewoliver/swift | refs/heads/master | swift/common/middleware/x_profile/profile_model.py | 52 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import pstats
import tempfile
import time
from swift import gettext_ as _
from swift.common.middleware.x_profile.exceptions import ODFLIBNotInstalled
ODFLIB_INSTALLED = True
try:
from odf.opendocument import OpenDocumentSpreadsheet
from odf.table import Table, TableRow, TableCell
from odf.text import P
except ImportError:
ODFLIB_INSTALLED = False
class Stats2(pstats.Stats):
def __init__(self, *args, **kwds):
pstats.Stats.__init__(self, *args, **kwds)
def func_to_dict(self, func):
return {'module': func[0], 'line': func[1], 'function': func[2]}
def func_std_string(self, func):
return pstats.func_std_string(func)
def to_json(self, *selection):
d = dict()
d['files'] = [f for f in self.files]
d['prim_calls'] = (self.prim_calls)
d['total_calls'] = (self.total_calls)
if hasattr(self, 'sort_type'):
d['sort_type'] = self.sort_type
else:
d['sort_type'] = 'random'
d['total_tt'] = (self.total_tt)
if self.fcn_list:
stat_list = self.fcn_list[:]
else:
stat_list = self.stats.keys()
for s in selection:
stat_list, __ = self.eval_print_amount(s, stat_list, '')
self.calc_callees()
function_calls = []
for func in stat_list:
cc, nc, tt, ct, callers = self.stats[func]
fdict = dict()
fdict.update(self.func_to_dict(func))
fdict.update({'cc': (cc), 'nc': (nc), 'tt': (tt),
'ct': (ct)})
if self.all_callees:
fdict.update({'callees': []})
for key in self.all_callees[func]:
cee = self.func_to_dict(key)
metric = self.all_callees[func][key]
# FIXME: eventlet profiler don't provide full list of
# the metrics
if type(metric) is tuple:
cc1, nc1, tt1, ct1 = metric
cee.update({'cc': cc1, 'nc': nc1, 'tt': tt1,
'ct': ct1})
else:
cee['nc'] = metric
fdict['callees'].append(cee)
cer = []
for caller in callers:
fd = self.func_to_dict(caller)
metric2 = callers[caller]
if isinstance(metric2, tuple):
cc2, nc2, tt2, ct2 = metric2
fd.update({'cc': cc2, 'nc': nc2, 'tt': tt2, 'ct': ct2})
else:
fd.update({'nc': metric2})
cer.append(fd)
fdict.update({'callers': cer})
function_calls.append(fdict)
d['stats'] = function_calls
return json.dumps(d, indent=2)
def to_csv(self, *selection):
if self.fcn_list:
stat_list = self.fcn_list[:]
order_text = "Ordered by: " + self.sort_type + '\r\n'
else:
stat_list = self.stats.keys()
order_text = "Random listing order was used\r\n"
for s in selection:
stat_list, __ = self.eval_print_amount(s, stat_list, '')
csv = '%d function calls (%d primitive calls) in %.6f seconds.' % (
self.total_calls, self.prim_calls, self.total_tt)
csv = csv + order_text + 'call count(nc), primitive call count(cc), \
total time(tt), time per call, \
cumulative time(ct), time per call, \
function\r\n'
for func in stat_list:
cc, nc, tt, ct, __ = self.stats[func]
tpc = '' if nc == 0 else '%3f' % (tt / nc)
cpc = '' if cc == 0 else '%3f' % (ct / cc)
fn = '%s:%d(%s)' % (func[0], func[1], func[2])
csv = csv + '%d,%d,%3f,%s,%3f,%s,%s\r\n' % (
nc, cc, tt, tpc, ct, cpc, fn)
return csv
def to_ods(self, *selection):
if not ODFLIB_INSTALLED:
raise ODFLIBNotInstalled(_('odfpy not installed.'))
if self.fcn_list:
stat_list = self.fcn_list[:]
order_text = " Ordered by: " + self.sort_type + '\n'
else:
stat_list = self.stats.keys()
order_text = " Random listing order was used\n"
for s in selection:
stat_list, __ = self.eval_print_amount(s, stat_list, '')
spreadsheet = OpenDocumentSpreadsheet()
table = Table(name="Profile")
for fn in self.files:
tcf = TableCell()
tcf.addElement(P(text=fn))
trf = TableRow()
trf.addElement(tcf)
table.addElement(trf)
tc_summary = TableCell()
summary_text = '%d function calls (%d primitive calls) in %.6f \
seconds' % (self.total_calls, self.prim_calls,
self.total_tt)
tc_summary.addElement(P(text=summary_text))
tr_summary = TableRow()
tr_summary.addElement(tc_summary)
table.addElement(tr_summary)
tc_order = TableCell()
tc_order.addElement(P(text=order_text))
tr_order = TableRow()
tr_order.addElement(tc_order)
table.addElement(tr_order)
tr_header = TableRow()
tc_cc = TableCell()
tc_cc.addElement(P(text='Total Call Count'))
tr_header.addElement(tc_cc)
tc_pc = TableCell()
tc_pc.addElement(P(text='Primitive Call Count'))
tr_header.addElement(tc_pc)
tc_tt = TableCell()
tc_tt.addElement(P(text='Total Time(seconds)'))
tr_header.addElement(tc_tt)
tc_pc = TableCell()
tc_pc.addElement(P(text='Time Per call(seconds)'))
tr_header.addElement(tc_pc)
tc_ct = TableCell()
tc_ct.addElement(P(text='Cumulative Time(seconds)'))
tr_header.addElement(tc_ct)
tc_pt = TableCell()
tc_pt.addElement(P(text='Cumulative Time per call(seconds)'))
tr_header.addElement(tc_pt)
tc_nfl = TableCell()
tc_nfl.addElement(P(text='filename:lineno(function)'))
tr_header.addElement(tc_nfl)
table.addElement(tr_header)
for func in stat_list:
cc, nc, tt, ct, __ = self.stats[func]
tr_header = TableRow()
tc_nc = TableCell()
tc_nc.addElement(P(text=nc))
tr_header.addElement(tc_nc)
tc_pc = TableCell()
tc_pc.addElement(P(text=cc))
tr_header.addElement(tc_pc)
tc_tt = TableCell()
tc_tt.addElement(P(text=tt))
tr_header.addElement(tc_tt)
tc_tpc = TableCell()
tc_tpc.addElement(P(text=(None if nc == 0 else float(tt) / nc)))
tr_header.addElement(tc_tpc)
tc_ct = TableCell()
tc_ct.addElement(P(text=ct))
tr_header.addElement(tc_ct)
tc_tpt = TableCell()
tc_tpt.addElement(P(text=(None if cc == 0 else float(ct) / cc)))
tr_header.addElement(tc_tpt)
tc_nfl = TableCell()
tc_nfl.addElement(P(text=func))
tr_header.addElement(tc_nfl)
table.addElement(tr_header)
spreadsheet.spreadsheet.addElement(table)
with tempfile.TemporaryFile() as tmp_ods:
spreadsheet.write(tmp_ods)
tmp_ods.seek(0)
data = tmp_ods.read()
return data
class ProfileLog(object):
def __init__(self, log_filename_prefix, dump_timestamp):
self.log_filename_prefix = log_filename_prefix
self.dump_timestamp = dump_timestamp
def get_all_pids(self):
profile_ids = [l.replace(self.log_filename_prefix, '') for l
in glob.glob(self.log_filename_prefix + '*')
if not l.endswith('.tmp')]
return sorted(profile_ids, reverse=True)
def get_logfiles(self, id_or_name):
# The first file with timestamp in the sorted log_files
# (PREFIX)(PROCESS_ID)-(TIMESTAMP)
if id_or_name in ['all']:
if self.dump_timestamp:
latest_dict = {}
for pid in self.get_all_pids():
[process_id, __] = pid.split('-')
if process_id not in latest_dict.keys():
latest_dict[process_id] = self.log_filename_prefix +\
pid
log_files = latest_dict.values()
else:
log_files = [l for l in glob.glob(self.log_filename_prefix
+ '*') if not l.endswith('.tmp')]
else:
pid = str(os.getpid()) if id_or_name in [None, '', 'current']\
else id_or_name
log_files = [l for l in glob.glob(self.log_filename_prefix +
pid + '*') if not l.endswith('.tmp')]
if len(log_files) > 0:
log_files = sorted(log_files, reverse=True)[0:1]
return log_files
def dump_profile(self, profiler, pid):
if self.log_filename_prefix:
pfn = self.log_filename_prefix + str(pid)
if self.dump_timestamp:
pfn = pfn + "-" + str(time.time())
tmpfn = pfn + ".tmp"
profiler.dump_stats(tmpfn)
os.rename(tmpfn, pfn)
return pfn
def clear(self, id_or_name):
log_files = self.get_logfiles(id_or_name)
for l in log_files:
os.path.exists(l) and os.remove(l)
|
googleapis/googleapis-gen | refs/heads/master | google/cloud/aiplatform/v1beta1/aiplatform-v1beta1-py/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py | 3 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.aiplatform.v1beta1.schema.predict.instance',
manifest={
'TextExtractionPredictionInstance',
},
)
class TextExtractionPredictionInstance(proto.Message):
r"""Prediction input format for Text Extraction.
Attributes:
content (str):
The text snippet to make the predictions on.
mime_type (str):
The MIME type of the text snippet. The
supported MIME types are listed below.
- text/plain
key (str):
This field is only used for batch prediction.
If a key is provided, the batch prediction
result will by mapped to this key. If omitted,
then the batch prediction result will contain
the entire input instance. AI Platform will not
check if keys in the request are duplicates, so
it is up to the caller to ensure the keys are
unique.
"""
content = proto.Field(
proto.STRING,
number=1,
)
mime_type = proto.Field(
proto.STRING,
number=2,
)
key = proto.Field(
proto.STRING,
number=3,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
danguria/linux-kernel-study | refs/heads/master | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 886 | # Util.py - Python extension for perf trace, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def clear_term():
print("\x1b[H\x1b[2J")
|
linvictor88/vse-lbaas-driver | refs/heads/master | quantum/api/api_common.py | 3 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Citrix System.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo.config import cfg
from webob import exc
from quantum.common import constants
from quantum.common import exceptions
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def get_filters(request, attr_info, skips=[]):
"""Extracts the filters from the request string.
Returns a dict of lists for the filters:
check=a&check=b&name=Bob&
becomes:
{'check': [u'a', u'b'], 'name': [u'Bob']}
"""
res = {}
for key, values in request.GET.dict_of_lists().iteritems():
if key in skips:
continue
values = [v for v in values if v]
key_attr_info = attr_info.get(key, {})
if 'convert_list_to' in key_attr_info:
values = key_attr_info['convert_list_to'](values)
elif 'convert_to' in key_attr_info:
convert_to = key_attr_info['convert_to']
values = [convert_to(v) for v in values]
if values:
res[key] = values
return res
def get_previous_link(request, items, id_key):
params = request.GET.copy()
params.pop('marker', None)
if items:
marker = items[0][id_key]
params['marker'] = marker
params['page_reverse'] = True
return "%s?%s" % (request.path_url, urllib.urlencode(params))
def get_next_link(request, items, id_key):
params = request.GET.copy()
params.pop('marker', None)
if items:
marker = items[-1][id_key]
params['marker'] = marker
params.pop('page_reverse', None)
return "%s?%s" % (request.path_url, urllib.urlencode(params))
def get_limit_and_marker(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If limit == 0, it means we needn't
pagination, then return None.
"""
max_limit = _get_pagination_max_limit()
limit = _get_limit_param(request, max_limit)
if max_limit > 0:
limit = min(max_limit, limit) or max_limit
if not limit:
return None, None
marker = request.GET.get('marker', None)
return limit, marker
def _get_pagination_max_limit():
max_limit = -1
if (cfg.CONF.pagination_max_limit.lower() !=
constants.PAGINATION_INFINITE):
try:
max_limit = int(cfg.CONF.pagination_max_limit)
if max_limit == 0:
raise ValueError()
except ValueError:
LOG.warn(_("Invalid value for pagination_max_limit: %s. It "
"should be an integer greater to 0"),
cfg.CONF.pagination_max_limit)
return max_limit
def _get_limit_param(request, max_limit):
"""Extract integer limit from request or fail."""
try:
limit = int(request.GET.get('limit', 0))
if limit >= 0:
return limit
except ValueError:
pass
msg = _("Limit must be an integer 0 or greater and not '%d'")
raise exceptions.BadRequest(resource='limit', msg=msg)
def list_args(request, arg):
"""Extracts the list of arg from request."""
return [v for v in request.GET.getall(arg) if v]
def get_sorts(request, attr_info):
"""Extract sort_key and sort_dir from request.
Return as: [(key1, value1), (key2, value2)]
"""
sort_keys = list_args(request, "sort_key")
sort_dirs = list_args(request, "sort_dir")
if len(sort_keys) != len(sort_dirs):
msg = _("The number of sort_keys and sort_dirs must be same")
raise exc.HTTPBadRequest(explanation=msg)
valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC]
absent_keys = [x for x in sort_keys if x not in attr_info]
if absent_keys:
msg = _("%s is invalid attribute for sort_keys") % absent_keys
raise exc.HTTPBadRequest(explanation=msg)
invalid_dirs = [x for x in sort_dirs if x not in valid_dirs]
if invalid_dirs:
msg = (_("%(invalid_dirs)s is invalid value for sort_dirs, "
"valid value is '%(asc)s' and '%(desc)s'") %
{'invalid_dirs': invalid_dirs,
'asc': constants.SORT_DIRECTION_ASC,
'desc': constants.SORT_DIRECTION_DESC})
raise exc.HTTPBadRequest(explanation=msg)
return zip(sort_keys,
[x == constants.SORT_DIRECTION_ASC for x in sort_dirs])
def get_page_reverse(request):
data = request.GET.get('page_reverse', 'False')
return data.lower() == "true"
def get_pagination_links(request, items, limit,
marker, page_reverse, key="id"):
key = key if key else 'id'
links = []
if not limit:
return links
if not (len(items) < limit and not page_reverse):
links.append({"rel": "next",
"href": get_next_link(request, items,
key)})
if not (len(items) < limit and page_reverse):
links.append({"rel": "previous",
"href": get_previous_link(request, items,
key)})
return links
class PaginationHelper(object):
def __init__(self, request, primary_key='id'):
self.request = request
self.primary_key = primary_key
def update_fields(self, original_fields, fields_to_add):
pass
def update_args(self, args):
pass
def paginate(self, items):
return items
def get_links(self, items):
return {}
class PaginationEmulatedHelper(PaginationHelper):
def __init__(self, request, primary_key='id'):
super(PaginationEmulatedHelper, self).__init__(request, primary_key)
self.limit, self.marker = get_limit_and_marker(request)
self.page_reverse = get_page_reverse(request)
def update_fields(self, original_fields, fields_to_add):
if not original_fields:
return
if self.primary_key not in original_fields:
original_fields.append(self.primary_key)
fields_to_add.append(self.primary_key)
def paginate(self, items):
if not self.limit:
return items
i = -1
if self.marker:
for item in items:
i = i + 1
if item[self.primary_key] == self.marker:
break
if self.page_reverse:
return items[i - self.limit:i]
return items[i + 1:i + self.limit + 1]
def get_links(self, items):
return get_pagination_links(
self.request, items, self.limit, self.marker,
self.page_reverse, self.primary_key)
class PaginationNativeHelper(PaginationEmulatedHelper):
def update_args(self, args):
if self.primary_key not in dict(args.get('sorts', [])).keys():
args.setdefault('sorts', []).append((self.primary_key, True))
args.update({'limit': self.limit, 'marker': self.marker,
'page_reverse': self.page_reverse})
def paginate(self, items):
return items
class NoPaginationHelper(PaginationHelper):
pass
class SortingHelper(object):
def __init__(self, request, attr_info):
pass
def update_args(self, args):
pass
def update_fields(self, original_fields, fields_to_add):
pass
def sort(self, items):
return items
class SortingEmulatedHelper(SortingHelper):
def __init__(self, request, attr_info):
super(SortingEmulatedHelper, self).__init__(request, attr_info)
self.sort_dict = get_sorts(request, attr_info)
def update_fields(self, original_fields, fields_to_add):
if not original_fields:
return
for key in dict(self.sort_dict).keys():
if key not in original_fields:
original_fields.append(key)
fields_to_add.append(key)
def sort(self, items):
def cmp_func(obj1, obj2):
for key, direction in self.sort_dict:
ret = cmp(obj1[key], obj2[key])
if ret:
return ret * (1 if direction else -1)
return 0
return sorted(items, cmp=cmp_func)
class SortingNativeHelper(SortingHelper):
def __init__(self, request, attr_info):
self.sort_dict = get_sorts(request, attr_info)
def update_args(self, args):
args['sorts'] = self.sort_dict
class NoSortingHelper(SortingHelper):
pass
class QuantumController(object):
"""Base controller class for Quantum API."""
# _resource_name will be redefined in sub concrete controller
_resource_name = None
def __init__(self, plugin):
self._plugin = plugin
super(QuantumController, self).__init__()
def _prepare_request_body(self, body, params):
"""Verifies required parameters are in request body.
Sets default value for missing optional parameters.
Body argument must be the deserialized body.
"""
try:
if body is None:
# Initialize empty resource for setting default value
body = {self._resource_name: {}}
data = body[self._resource_name]
except KeyError:
# raise if _resource_name is not in req body.
raise exc.HTTPBadRequest(_("Unable to find '%s' in request body") %
self._resource_name)
for param in params:
param_name = param['param-name']
param_value = data.get(param_name)
# If the parameter wasn't found and it was required, return 400
if param_value is None and param['required']:
msg = (_("Failed to parse request. "
"Parameter '%s' not specified") % param_name)
LOG.error(msg)
raise exc.HTTPBadRequest(msg)
data[param_name] = param_value or param.get('default-value')
return body
|
ivandevp/django | refs/heads/master | tests/migrations/test_migrations_no_default/0001_initial.py | 381 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='SillyModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('silly_field', models.BooleanField(default=False)),
],
options={
},
bases=(models.Model,),
),
]
|
JacobJacob/volatility | refs/heads/master | volatility/plugins/malware/__init__.py | 12133432 | |
sasukeh/neutron | refs/heads/master | neutron/api/__init__.py | 12133432 | |
gangadhar-kadam/nassimlib | refs/heads/master | core/doctype/db_sync/__init__.py | 12133432 | |
flatherskevin/TGML | refs/heads/master | Tgml.py | 1 | """
Last Edited By: Kevin Flathers
Date Last Edited: 06/26/2017
Author: Kevin Flathers
Date Created: 05/27/2017
Purpose:
"""
from lxml import etree
import os
from os.path import splitext
from .errors import *
class Tgml:
DEFAULT_PROPERTIES = {
'Id': '',
'Name': '',
'Background': '#FFFFFF',
'Stretch': 'Uniform',
'UseGlobalScripts': 'False',
'DisablePanAndZoom': 'False',
'GridSize': '10',
'Height': '600',
'Width': '800'
}
SUPPORTED_CHILDREN = (
'Animate',
'AnimatedImage',
'Arc',
'Bind',
'Chord',
'Component',
'Curve',
'Ellipse',
'Expose',
'Group',
'Image',
'Layer',
'Line',
'Metadata',
'Path',
'Pie',
'Polygon',
'Polyline',
'Rectangle',
'Script',
'TargetArea',
'Text',
'TextBox'
)
def __init__(self, obj_in, input_type='file'):
self.obj_in = obj_in
#obj_in is a tgml file
if input_type == 'file':
self.read_tgml_file(self.obj_in)
#obj_in is the immediate child of the Tgml tag in a file
#Helps with creating objects from dependency files
elif input_type == 'child':
self.element = self.read_tgml_file(self.obj_in)
self.element = self.element[0]
#obj_in is an etree.Element object
elif input_type == 'element':
self.element = self.obj_in
#obj_in is a string
elif input_type == 'blank':
self.element = etree.Element(obj_in)
else:
raise BadInputObject('Input type does not exist')
self.__properties = {
'Id': '',
'Name': '',
'Background': '#FFFFFF',
'Stretch': 'Uniform',
'UseGlobalScripts': 'False',
'DisablePanAndZoom': 'False',
'GridSize': '10',
'Height': '600',
'Width': '800'
}
self.__exposed_properties = {}
@property
def properties(self):
return self.__properties
@properties.setter
def properties(self, value):
self.__properties = value
@property
def exposed_properties(self):
return self.__properties
@exposed_properties.setter
def exposed_properties(self, value):
self.exposed_properties = value
def __call__(self):
return self.element
#Fixes CDATA issue with all scripts under 'element'
def fix_cdata(self):
for script in self.element.xpath('.//Script'):
script.text = etree.CDATA(script.text)
for text in self.element.xpath('.//Text'):
content = None
if text.get('Content') not in ['', None]:
content = text.get('Content')
else:
if str(text.text).isspace():
content = ''
else:
content = text.text
text.text = etree.CDATA(content)
etree.strip_attributes(text, 'Content')
for text_box in self.element.xpath('.//TextBox'):
if text_box.get('Content') not in ['', None]:
content = text_box.get('Content')
else:
if str(text_box.text).isspace():
content = ''
else:
content = text_box.text
text_box.text = etree.CDATA(content)
etree.strip_attributes(text_box, 'Content')
#Set a value to all exposed attributes nested beneath an element
def set_exposed_properties(self, exposed_properties):
for key in exposed_properties.keys():
for item in self.element.xpath('.//*'):
if item.get('ExposedAttribute') != '':
expose = str(item.get('Name')).replace(' ', '')
if expose == key:
item.getparent().set(item.get('ExposedAttribute'), exposed_properties[key])
#Compiles the Tgml object
def compile(self, validate_element=True):
try:
if validate_element:
self.validate_element()
self.set_properties(self.properties)
self.set_exposed_properties(self.exposed_properties)
self.fix_cdata()
except Exception as err:
print(err)
#Transforms the Tgml object
def transform(self):
raise NotImplementedError
#Sets Tgml properties
#Takes in a dictionary
def set_properties(self, properties):
for key in properties.keys():
self.element.set(key, properties[key])
#Check if property is set to a value, then set it to a new value
def checkset_property(self, attribute, value, value_check=[None,'','None']):
if(str(self.element.get(attribute)) in value_check):
self.element.set(attribute, value)
#Return etree object from file
def read_from_file(self, file):
with open(file, 'r') as file:
return etree.fromstring(file.read())
#Checks file for Tgml properties and returns etree object, or returns 0 if an error occurs
def read_tgml_file(self, file):
if(self.validate_file(file)):
self.element = self.read_from_file(file)
else:
self.element = etree.Element('Tgml')
#Writes content to a file
def write_to_file(self, directory, name):
with open(os.path.join(directory, name + '.tgml'), 'w') as save_file:
save_file.write(etree.tostring(self.element).decode('utf-8'))
#Checks to see if a file is properly setup for use as a TGML
def validate_file(self, file, extension_check=True, tag_check=False):
try:
if extension_check:
if(str(splitext(file)[1]) != '.tgml'):
raise BadTgmlFileError('File is not a .tgml file')
if tag_check:
if(etree.tostring(self.element.tag) != 'Tgml'):
raise BadTGMLFileError('TGML file does not contain a <Tgml> or <Tgml/> tag at the beginning / end of the file')
return 1
except Exception as err:
print(err)
return 0
#Checks that element contents are supported
def validate_element(self, children_check=False):
if children_check:
for child in self.element.xpath('./*'):
if child.tag not in self.SUPPORTED_CHILDREN:
raise BadElementChildError('Element does not support this child: ' + str(child.tag)) |
grevutiu-gabriel/sympy | refs/heads/master | sympy/core/operations.py | 40 | from __future__ import print_function, division
from sympy.core.sympify import _sympify, sympify
from sympy.core.basic import Basic, _aresame
from sympy.core.cache import cacheit
from sympy.core.compatibility import ordered, range
from sympy.core.logic import fuzzy_and
from sympy.core.evaluate import global_evaluate
class AssocOp(Basic):
""" Associative operations, can separate noncommutative and
commutative parts.
(a op b) op c == a op (b op c) == a op b op c.
Base class for Add and Mul.
This is an abstract base class, concrete derived classes must define
the attribute `identity`.
"""
# for performance reason, we don't let is_commutative go to assumptions,
# and keep it right here
__slots__ = ['is_commutative']
@cacheit
def __new__(cls, *args, **options):
from sympy import Order
args = list(map(_sympify, args))
args = [a for a in args if a is not cls.identity]
if not options.pop('evaluate', global_evaluate[0]):
return cls._from_args(args)
if len(args) == 0:
return cls.identity
if len(args) == 1:
return args[0]
c_part, nc_part, order_symbols = cls.flatten(args)
is_commutative = not nc_part
obj = cls._from_args(c_part + nc_part, is_commutative)
if order_symbols is not None:
return Order(obj, *order_symbols)
return obj
@classmethod
def _from_args(cls, args, is_commutative=None):
"""Create new instance with already-processed args"""
if len(args) == 0:
return cls.identity
elif len(args) == 1:
return args[0]
obj = super(AssocOp, cls).__new__(cls, *args)
if is_commutative is None:
is_commutative = fuzzy_and(a.is_commutative for a in args)
obj.is_commutative = is_commutative
return obj
def _new_rawargs(self, *args, **kwargs):
"""Create new instance of own class with args exactly as provided by
caller but returning the self class identity if args is empty.
This is handy when we want to optimize things, e.g.
>>> from sympy import Mul, S
>>> from sympy.abc import x, y
>>> e = Mul(3, x, y)
>>> e.args
(3, x, y)
>>> Mul(*e.args[1:])
x*y
>>> e._new_rawargs(*e.args[1:]) # the same as above, but faster
x*y
Note: use this with caution. There is no checking of arguments at
all. This is best used when you are rebuilding an Add or Mul after
simply removing one or more terms. If modification which result,
for example, in extra 1s being inserted (as when collecting an
expression's numerators and denominators) they will not show up in
the result but a Mul will be returned nonetheless:
>>> m = (x*y)._new_rawargs(S.One, x); m
x
>>> m == x
False
>>> m.is_Mul
True
Another issue to be aware of is that the commutativity of the result
is based on the commutativity of self. If you are rebuilding the
terms that came from a commutative object then there will be no
problem, but if self was non-commutative then what you are
rebuilding may now be commutative.
Although this routine tries to do as little as possible with the
input, getting the commutativity right is important, so this level
of safety is enforced: commutativity will always be recomputed if
self is non-commutative and kwarg `reeval=False` has not been
passed.
"""
if kwargs.pop('reeval', True) and self.is_commutative is False:
is_commutative = None
else:
is_commutative = self.is_commutative
return self._from_args(args, is_commutative)
@classmethod
def flatten(cls, seq):
"""Return seq so that none of the elements are of type `cls`. This is
the vanilla routine that will be used if a class derived from AssocOp
does not define its own flatten routine."""
# apply associativity, no commutativity property is used
new_seq = []
while seq:
o = seq.pop()
if o.__class__ is cls: # classes must match exactly
seq.extend(o.args)
else:
new_seq.append(o)
# c_part, nc_part, order_symbols
return [], new_seq, None
def _matches_commutative(self, expr, repl_dict={}, old=False):
"""
Matches Add/Mul "pattern" to an expression "expr".
repl_dict ... a dictionary of (wild: expression) pairs, that get
returned with the results
This function is the main workhorse for Add/Mul.
For instance:
>>> from sympy import symbols, Wild, sin
>>> a = Wild("a")
>>> b = Wild("b")
>>> c = Wild("c")
>>> x, y, z = symbols("x y z")
>>> (a+sin(b)*c)._matches_commutative(x+sin(y)*z)
{a_: x, b_: y, c_: z}
In the example above, "a+sin(b)*c" is the pattern, and "x+sin(y)*z" is
the expression.
The repl_dict contains parts that were already matched. For example
here:
>>> (x+sin(b)*c)._matches_commutative(x+sin(y)*z, repl_dict={a: x})
{a_: x, b_: y, c_: z}
the only function of the repl_dict is to return it in the
result, e.g. if you omit it:
>>> (x+sin(b)*c)._matches_commutative(x+sin(y)*z)
{b_: y, c_: z}
the "a: x" is not returned in the result, but otherwise it is
equivalent.
"""
# make sure expr is Expr if pattern is Expr
from .expr import Add, Expr
from sympy import Mul
if isinstance(self, Expr) and not isinstance(expr, Expr):
return None
# handle simple patterns
if self == expr:
return repl_dict
d = self._matches_simple(expr, repl_dict)
if d is not None:
return d
# eliminate exact part from pattern: (2+a+w1+w2).matches(expr) -> (w1+w2).matches(expr-a-2)
from .function import WildFunction
from .symbol import Wild
wild_part = []
exact_part = []
for p in ordered(self.args):
if p.has(Wild, WildFunction) and (not expr.has(p)):
# not all Wild should stay Wilds, for example:
# (w2+w3).matches(w1) -> (w1+w3).matches(w1) -> w3.matches(0)
wild_part.append(p)
else:
exact_part.append(p)
if exact_part:
exact = self.func(*exact_part)
free = expr.free_symbols
if free and (exact.free_symbols - free):
# there are symbols in the exact part that are not
# in the expr; but if there are no free symbols, let
# the matching continue
return None
newpattern = self.func(*wild_part)
newexpr = self._combine_inverse(expr, exact)
if not old and (expr.is_Add or expr.is_Mul):
if newexpr.count_ops() > expr.count_ops():
return None
return newpattern.matches(newexpr, repl_dict)
# now to real work ;)
i = 0
saw = set()
while expr not in saw:
saw.add(expr)
expr_list = (self.identity,) + tuple(ordered(self.make_args(expr)))
for last_op in reversed(expr_list):
for w in reversed(wild_part):
d1 = w.matches(last_op, repl_dict)
if d1 is not None:
d2 = self.xreplace(d1).matches(expr, d1)
if d2 is not None:
return d2
if i == 0:
if self.is_Mul:
# make e**i look like Mul
if expr.is_Pow and expr.exp.is_Integer:
if expr.exp > 0:
expr = Mul(*[expr.base, expr.base**(expr.exp - 1)], evaluate=False)
else:
expr = Mul(*[1/expr.base, expr.base**(expr.exp + 1)], evaluate=False)
i += 1
continue
elif self.is_Add:
# make i*e look like Add
c, e = expr.as_coeff_Mul()
if abs(c) > 1:
if c > 0:
expr = Add(*[e, (c - 1)*e], evaluate=False)
else:
expr = Add(*[-e, (c + 1)*e], evaluate=False)
i += 1
continue
# try collection on non-Wild symbols
from sympy.simplify.radsimp import collect
was = expr
did = set()
for w in reversed(wild_part):
c, w = w.as_coeff_mul(Wild)
free = c.free_symbols - did
if free:
did.update(free)
expr = collect(expr, free)
if expr != was:
i += 0
continue
break # if we didn't continue, there is nothing more to do
return
def _has_matcher(self):
"""Helper for .has()"""
def _ncsplit(expr):
# this is not the same as args_cnc because here
# we don't assume expr is a Mul -- hence deal with args --
# and always return a set.
cpart, ncpart = [], []
for arg in expr.args:
if arg.is_commutative:
cpart.append(arg)
else:
ncpart.append(arg)
return set(cpart), ncpart
c, nc = _ncsplit(self)
cls = self.__class__
def is_in(expr):
if expr == self:
return True
elif not isinstance(expr, Basic):
return False
elif isinstance(expr, cls):
_c, _nc = _ncsplit(expr)
if (c & _c) == c:
if not nc:
return True
elif len(nc) <= len(_nc):
for i in range(len(_nc) - len(nc)):
if _nc[i:i + len(nc)] == nc:
return True
return False
return is_in
def _eval_evalf(self, prec):
"""
Evaluate the parts of self that are numbers; if the whole thing
was a number with no functions it would have been evaluated, but
it wasn't so we must judiciously extract the numbers and reconstruct
the object. This is *not* simply replacing numbers with evaluated
numbers. Nunmbers should be handled in the largest pure-number
expression as possible. So the code below separates ``self`` into
number and non-number parts and evaluates the number parts and
walks the args of the non-number part recursively (doing the same
thing).
"""
from sympy import Symbol
from sympy.core.function import AppliedUndef
x, tail = self.as_independent(Symbol, AppliedUndef)
if tail is not self.identity:
# here, we have a number so we just call to _evalf with prec;
# prec is not the same as n, it is the binary precision so
# that's why we don't call to evalf.
x = x._evalf(prec) if x is not self.identity else self.identity
args = []
for a in self.func.make_args(tail):
# here we call to _eval_evalf since we don't know what we
# are dealing with and all other _eval_evalf routines should
# be doing the same thing (i.e. taking binary prec and
# finding the evalf-able args)
newa = a._eval_evalf(prec)
if newa is None:
args.append(a)
else:
args.append(newa)
if not _aresame(tuple(args), self.func.make_args(tail)):
tail = self.func(*args)
return self.func(x, tail)
# this is the same as above, but there were no pure-number args to
# deal with
args = []
for a in self.args:
newa = a._eval_evalf(prec)
if newa is None:
args.append(a)
else:
args.append(newa)
if not _aresame(tuple(args), self.args):
return self.func(*args)
return self
@classmethod
def make_args(cls, expr):
"""
Return a sequence of elements `args` such that cls(*args) == expr
>>> from sympy import Symbol, Mul, Add
>>> x, y = map(Symbol, 'xy')
>>> Mul.make_args(x*y)
(x, y)
>>> Add.make_args(x*y)
(x*y,)
>>> set(Add.make_args(x*y + y)) == set([y, x*y])
True
"""
if isinstance(expr, cls):
return expr.args
else:
return (expr,)
class ShortCircuit(Exception):
pass
class LatticeOp(AssocOp):
"""
Join/meet operations of an algebraic lattice[1].
These binary operations are associative (op(op(a, b), c) = op(a, op(b, c))),
commutative (op(a, b) = op(b, a)) and idempotent (op(a, a) = op(a) = a).
Common examples are AND, OR, Union, Intersection, max or min. They have an
identity element (op(identity, a) = a) and an absorbing element
conventionally called zero (op(zero, a) = zero).
This is an abstract base class, concrete derived classes must declare
attributes zero and identity. All defining properties are then respected.
>>> from sympy import Integer
>>> from sympy.core.operations import LatticeOp
>>> class my_join(LatticeOp):
... zero = Integer(0)
... identity = Integer(1)
>>> my_join(2, 3) == my_join(3, 2)
True
>>> my_join(2, my_join(3, 4)) == my_join(2, 3, 4)
True
>>> my_join(0, 1, 4, 2, 3, 4)
0
>>> my_join(1, 2)
2
References:
[1] - http://en.wikipedia.org/wiki/Lattice_%28order%29
"""
is_commutative = True
def __new__(cls, *args, **options):
args = (_sympify(arg) for arg in args)
try:
_args = frozenset(cls._new_args_filter(args))
except ShortCircuit:
return sympify(cls.zero)
if not _args:
return sympify(cls.identity)
elif len(_args) == 1:
return set(_args).pop()
else:
# XXX in almost every other case for __new__, *_args is
# passed along, but the expectation here is for _args
obj = super(AssocOp, cls).__new__(cls, _args)
obj._argset = _args
return obj
@classmethod
def _new_args_filter(cls, arg_sequence, call_cls=None):
"""Generator filtering args"""
ncls = call_cls or cls
for arg in arg_sequence:
if arg == ncls.zero:
raise ShortCircuit(arg)
elif arg == ncls.identity:
continue
elif arg.func == ncls:
for x in arg.args:
yield x
else:
yield arg
@classmethod
def make_args(cls, expr):
"""
Return a sequence of elements `args` such that cls(*args) == expr
>>> from sympy import Symbol, Mul, Add
>>> x, y = map(Symbol, 'xy')
>>> Mul.make_args(x*y)
(x, y)
>>> Add.make_args(x*y)
(x*y,)
>>> set(Add.make_args(x*y + y)) == set([y, x*y])
True
"""
if isinstance(expr, cls):
return expr._argset
else:
return frozenset([expr])
@property
@cacheit
def args(self):
return tuple(ordered(self._argset))
@staticmethod
def _compare_pretty(a, b):
return (str(a) > str(b)) - (str(a) < str(b))
|
kauser-cse-buet/CerebralCortex | refs/heads/master | cerebralcortex/data_processor/preprocessor/parser.py | 3 | # Copyright (c) 2017, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
import pytz
from cerebralcortex.kernel.datatypes.datapoint import DataPoint
def data_processor(input_string):
try:
[val, ts] = input_string.split(' ')
timestamp = datetime.fromtimestamp(float(ts) / 1000.0, pytz.timezone('US/Central'))
return DataPoint.from_tuple(start_time=timestamp, sample=float(val))
except ValueError:
# Skip bad values and filter them later
# print("ValueError: " + str(input))
return
|
laiqiqi886/kbengine | refs/heads/master | kbe/src/lib/python/Lib/poplib.py | 74 | """A POP3 client class.
Based on the J. Myers POP3 draft, Jan. 96
"""
# Author: David Ascher <[email protected]>
# [heavily stealing from nntplib.py]
# Updated: Piers Lauder <[email protected]> [Jul '97]
# String method conversion and test jig improvements by ESR, February 2001.
# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <[email protected]> Aug 2003
# Example (see the test function at the end of this file)
# Imports
import errno
import re
import socket
try:
import ssl
HAVE_SSL = True
except ImportError:
HAVE_SSL = False
__all__ = ["POP3","error_proto"]
# Exception raised when an error or invalid response is received:
class error_proto(Exception): pass
# Standard Port
POP3_PORT = 110
# POP SSL PORT
POP3_SSL_PORT = 995
# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
CR = b'\r'
LF = b'\n'
CRLF = CR+LF
# maximal line length when calling readline(). This is to prevent
# reading arbitrary lenght lines. RFC 1939 limits POP3 line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
class POP3:
"""This class supports both the minimal and optional command sets.
Arguments can be strings or integers (where appropriate)
(e.g.: retr(1) and retr('1') both work equally well.
Minimal Command Set:
USER name user(name)
PASS string pass_(string)
STAT stat()
LIST [msg] list(msg = None)
RETR msg retr(msg)
DELE msg dele(msg)
NOOP noop()
RSET rset()
QUIT quit()
Optional Commands (some servers support these):
RPOP name rpop(name)
APOP name digest apop(name, digest)
TOP msg n top(msg, n)
UIDL [msg] uidl(msg = None)
CAPA capa()
STLS stls()
Raises one exception: 'error_proto'.
Instantiate with:
POP3(hostname, port=110)
NB: the POP protocol locks the mailbox from user
authorization until QUIT, so be sure to get in, suck
the messages, and quit, each time you access the
mailbox.
POP is a line-based protocol, which means large mail
messages consume lots of python cycles reading them
line-by-line.
If it's available on your mail server, use IMAP4
instead, it doesn't suffer from the two problems
above.
"""
encoding = 'UTF-8'
def __init__(self, host, port=POP3_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.port = port
self._tls_established = False
self.sock = self._create_socket(timeout)
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
def _create_socket(self, timeout):
return socket.create_connection((self.host, self.port), timeout)
def _putline(self, line):
if self._debugging > 1: print('*put*', repr(line))
self.sock.sendall(line + CRLF)
# Internal: send one command to the server (through _putline())
def _putcmd(self, line):
if self._debugging: print('*cmd*', repr(line))
line = bytes(line, self.encoding)
self._putline(line)
# Internal: return one line from the server, stripping CRLF.
# This is where all the CPU time of this module is consumed.
# Raise error_proto('-ERR EOF') if the connection is closed.
def _getline(self):
line = self.file.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise error_proto('line too long')
if self._debugging > 1: print('*get*', repr(line))
if not line: raise error_proto('-ERR EOF')
octets = len(line)
# server can send any combination of CR & LF
# however, 'readline()' returns lines ending in LF
# so only possibilities are ...LF, ...CRLF, CR...LF
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
# Internal: get a response from the server.
# Raise 'error_proto' if the response doesn't start with '+'.
def _getresp(self):
resp, o = self._getline()
if self._debugging > 1: print('*resp*', repr(resp))
if not resp.startswith(b'+'):
raise error_proto(resp)
return resp
# Internal: get a response plus following text from the server.
def _getlongresp(self):
resp = self._getresp()
list = []; octets = 0
line, o = self._getline()
while line != b'.':
if line.startswith(b'..'):
o = o-1
line = line[1:]
octets = octets + o
list.append(line)
line, o = self._getline()
return resp, list, octets
# Internal: send a command and get the response
def _shortcmd(self, line):
self._putcmd(line)
return self._getresp()
# Internal: send a command and get the response plus following text
def _longcmd(self, line):
self._putcmd(line)
return self._getlongresp()
# These can be useful:
def getwelcome(self):
return self.welcome
def set_debuglevel(self, level):
self._debugging = level
# Here are all the POP commands:
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
def pass_(self, pswd):
"""Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to 'quit()'
"""
return self._shortcmd('PASS %s' % pswd)
def stat(self):
"""Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)
"""
retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging: print('*stat*', repr(rets))
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
def list(self, which=None):
"""Request listing, return result.
Result without a message number argument is in form
['response', ['mesg_num octets', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.
"""
if which is not None:
return self._shortcmd('LIST %s' % which)
return self._longcmd('LIST')
def retr(self, which):
"""Retrieve whole message number 'which'.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('RETR %s' % which)
def dele(self, which):
"""Delete message number 'which'.
Result is 'response'.
"""
return self._shortcmd('DELE %s' % which)
def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP')
def rset(self):
"""Unmark all messages marked for deletion."""
return self._shortcmd('RSET')
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
resp = self._shortcmd('QUIT')
self.close()
return resp
def close(self):
"""Close the connection without assuming anything about it."""
if self.file is not None:
self.file.close()
if self.sock is not None:
try:
self.sock.shutdown(socket.SHUT_RDWR)
except OSError as e:
# The server might already have closed the connection
if e.errno != errno.ENOTCONN:
raise
finally:
self.sock.close()
self.file = self.sock = None
#__del__ = quit
# optional commands:
def rpop(self, user):
"""Not sure what this does."""
return self._shortcmd('RPOP %s' % user)
timestamp = re.compile(br'\+OK.*(<[^>]+>)')
def apop(self, user, password):
"""Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
password - mailbox password.
NB: mailbox is locked by server from here to 'quit()'
"""
secret = bytes(password, self.encoding)
m = self.timestamp.match(self.welcome)
if not m:
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = m.group(1)+secret
digest = hashlib.md5(digest).hexdigest()
return self._shortcmd('APOP %s %s' % (user, digest))
def top(self, which, howmuch):
"""Retrieve message header of message number 'which'
and first 'howmuch' lines of message body.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('TOP %s %s' % (which, howmuch))
def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL')
def capa(self):
"""Return server capabilities (RFC 2449) as a dictionary
>>> c=poplib.POP3('localhost')
>>> c.capa()
{'IMPLEMENTATION': ['Cyrus', 'POP3', 'server', 'v2.2.12'],
'TOP': [], 'LOGIN-DELAY': ['0'], 'AUTH-RESP-CODE': [],
'EXPIRE': ['NEVER'], 'USER': [], 'STLS': [], 'PIPELINING': [],
'UIDL': [], 'RESP-CODES': []}
>>>
Really, according to RFC 2449, the cyrus folks should avoid
having the implementation split into multiple arguments...
"""
def _parsecap(line):
lst = line.decode('ascii').split()
return lst[0], lst[1:]
caps = {}
try:
resp = self._longcmd('CAPA')
rawcaps = resp[1]
for capline in rawcaps:
capnm, capargs = _parsecap(capline)
caps[capnm] = capargs
except error_proto as _err:
raise error_proto('-ERR CAPA not supported by server')
return caps
def stls(self, context=None):
"""Start a TLS session on the active connection as specified in RFC 2595.
context - a ssl.SSLContext
"""
if not HAVE_SSL:
raise error_proto('-ERR TLS support missing')
if self._tls_established:
raise error_proto('-ERR TLS session already established')
caps = self.capa()
if not 'STLS' in caps:
raise error_proto('-ERR STLS not supported by server')
if context is None:
context = ssl._create_stdlib_context()
resp = self._shortcmd('STLS')
server_hostname = self.host if ssl.HAS_SNI else None
self.sock = context.wrap_socket(self.sock,
server_hostname=server_hostname)
self.file = self.sock.makefile('rb')
self._tls_established = True
return resp
if HAVE_SSL:
class POP3_SSL(POP3):
"""POP3 client class over SSL connection
Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None,
context=None)
hostname - the hostname of the pop3 over ssl server
port - port number
keyfile - PEM formatted file that contains your private key
certfile - PEM formatted certificate chain file
context - a ssl.SSLContext
See the methods of the parent class POP3 for more documentation.
"""
def __init__(self, host, port=POP3_SSL_PORT, keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, context=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.context = context
POP3.__init__(self, host, port, timeout)
def _create_socket(self, timeout):
sock = POP3._create_socket(self, timeout)
server_hostname = self.host if ssl.HAS_SNI else None
sock = self.context.wrap_socket(sock,
server_hostname=server_hostname)
return sock
def stls(self, keyfile=None, certfile=None, context=None):
"""The method unconditionally raises an exception since the
STLS command doesn't make any sense on an already established
SSL/TLS session.
"""
raise error_proto('-ERR TLS session already established')
__all__.append("POP3_SSL")
if __name__ == "__main__":
import sys
a = POP3(sys.argv[1])
print(a.getwelcome())
a.user(sys.argv[2])
a.pass_(sys.argv[3])
a.list()
(numMsgs, totalSize) = a.stat()
for i in range(1, numMsgs + 1):
(header, msg, octets) = a.retr(i)
print("Message %d:" % i)
for line in msg:
print(' ' + line)
print('-----------------------')
a.quit()
|
Fale/ansible | refs/heads/devel | hacking/build_library/build_ansible/command_plugins/collection_meta.py | 35 | # coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
import pathlib
import yaml
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes
from antsibull.jinja2.environment import doc_environment
# Pylint doesn't understand Python3 namespace modules.
from ..change_detection import update_file_if_different # pylint: disable=relative-beyond-top-level
from ..commands import Command # pylint: disable=relative-beyond-top-level
DEFAULT_TEMPLATE_FILE = 'collections_galaxy_meta.rst.j2'
DEFAULT_TEMPLATE_DIR = pathlib.Path(__file__).parents[4] / 'docs/templates'
def normalize_options(options):
"""Normalize the options to make for easy templating"""
for opt in options:
if isinstance(opt['description'], string_types):
opt['description'] = [opt['description']]
class DocumentCollectionMeta(Command):
name = 'collection-meta'
@classmethod
def init_parser(cls, add_parser):
parser = add_parser(cls.name, description='Generate collection galaxy.yml documentation from shared metadata')
parser.add_argument("-t", "--template-file", action="store", dest="template_file",
default=DEFAULT_TEMPLATE_FILE,
help="Jinja2 template to use for the config")
parser.add_argument("-T", "--template-dir", action="store", dest="template_dir",
default=str(DEFAULT_TEMPLATE_DIR),
help="directory containing Jinja2 templates")
parser.add_argument("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/',
help="Output directory for rst files")
parser.add_argument("collection_defs", metavar="COLLECTION-OPTION-DEFINITIONS.yml", type=str,
help="Source for collection metadata option docs")
@staticmethod
def main(args):
output_dir = os.path.abspath(args.output_dir)
template_file_full_path = os.path.abspath(os.path.join(args.template_dir, args.template_file))
template_file = os.path.basename(template_file_full_path)
template_dir = os.path.dirname(template_file_full_path)
with open(args.collection_defs) as f:
options = yaml.safe_load(f)
normalize_options(options)
env = doc_environment(template_dir)
template = env.get_template(template_file)
output_name = os.path.join(output_dir, template_file.replace('.j2', ''))
temp_vars = {'options': options}
data = to_bytes(template.render(temp_vars))
update_file_if_different(output_name, data)
return 0
|
axbaretto/beam | refs/heads/master | sdks/python/.tox/lint/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py | 395 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
from . import tarfile
try:
import bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive", "ignore_patterns"]
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
def copymode(src, dst):
"""Copy mode bits from src to dst"""
if hasattr(os, 'chmod'):
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
os.chmod(dst, mode)
def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except OSError as why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
def copy2(src, dst):
"""Copy data and all stat info ("cp -p src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
os.symlink(linkto, dstname)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file"),
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not isinstance(function, collections.Callable):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not isinstance(function, collections.Callable):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registry."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
|
martynovp/edx-platform | refs/heads/master | common/djangoapps/track/utils.py | 230 | """Utility functions and classes for track backends"""
from datetime import datetime, date
import json
from pytz import UTC
class DateTimeJSONEncoder(json.JSONEncoder):
"""JSON encoder aware of datetime.datetime and datetime.date objects"""
def default(self, obj): # pylint: disable=method-hidden
"""
Serialize datetime and date objects of iso format.
datatime objects are converted to UTC.
"""
if isinstance(obj, datetime):
if obj.tzinfo is None:
# Localize to UTC naive datetime objects
obj = UTC.localize(obj)
else:
# Convert to UTC datetime objects from other timezones
obj = obj.astimezone(UTC)
return obj.isoformat()
elif isinstance(obj, date):
return obj.isoformat()
return super(DateTimeJSONEncoder, self).default(obj)
|
ramadhane/odoo | refs/heads/8.0 | addons/survey/controllers/main.py | 189 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import json
import logging
import werkzeug
import werkzeug.utils
from datetime import datetime
from math import ceil
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT as DTF, ustr
_logger = logging.getLogger(__name__)
class WebsiteSurvey(http.Controller):
## HELPER METHODS ##
def _check_bad_cases(self, cr, uid, request, survey_obj, survey, user_input_obj, context=None):
# In case of bad survey, redirect to surveys list
if survey_obj.exists(cr, SUPERUSER_ID, survey.id, context=context) == []:
return werkzeug.utils.redirect("/survey/")
# In case of auth required, block public user
if survey.auth_required and uid == request.website.user_id.id:
return request.website.render("survey.auth_required", {'survey': survey})
# In case of non open surveys
if survey.stage_id.closed:
return request.website.render("survey.notopen")
# If there is no pages
if not survey.page_ids:
return request.website.render("survey.nopages")
# Everything seems to be ok
return None
def _check_deadline(self, cr, uid, user_input, context=None):
'''Prevent opening of the survey if the deadline has turned out
! This will NOT disallow access to users who have already partially filled the survey !'''
if user_input.deadline:
dt_deadline = datetime.strptime(user_input.deadline, DTF)
dt_now = datetime.now()
if dt_now > dt_deadline: # survey is not open anymore
return request.website.render("survey.notopen")
return None
## ROUTES HANDLERS ##
# Survey start
@http.route(['/survey/start/<model("survey.survey"):survey>',
'/survey/start/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def start_survey(self, survey, token=None, **post):
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
user_input_obj = request.registry['survey.user_input']
# Test mode
if token and token == "phantom":
_logger.info("[survey] Phantom mode")
user_input_id = user_input_obj.create(cr, uid, {'survey_id': survey.id, 'test_entry': True}, context=context)
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
data = {'survey': survey, 'page': None, 'token': user_input.token}
return request.website.render('survey.survey_init', data)
# END Test mode
# Controls if the survey can be displayed
errpage = self._check_bad_cases(cr, uid, request, survey_obj, survey, user_input_obj, context=context)
if errpage:
return errpage
# Manual surveying
if not token:
vals = {'survey_id': survey.id}
if request.website.user_id.id != uid:
vals['partner_id'] = request.registry['res.users'].browse(cr, uid, uid, context=context).partner_id.id
user_input_id = user_input_obj.create(cr, uid, vals, context=context)
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
else:
try:
user_input_id = user_input_obj.search(cr, SUPERUSER_ID, [('token', '=', token)], context=context)[0]
except IndexError: # Invalid token
return request.website.render("website.403")
else:
user_input = user_input_obj.browse(cr, SUPERUSER_ID, [user_input_id], context=context)[0]
# Do not open expired survey
errpage = self._check_deadline(cr, uid, user_input, context=context)
if errpage:
return errpage
# Select the right page
if user_input.state == 'new': # Intro page
data = {'survey': survey, 'page': None, 'token': user_input.token}
return request.website.render('survey.survey_init', data)
else:
return request.redirect('/survey/fill/%s/%s' % (survey.id, user_input.token))
# Survey displaying
@http.route(['/survey/fill/<model("survey.survey"):survey>/<string:token>',
'/survey/fill/<model("survey.survey"):survey>/<string:token>/<string:prev>'],
type='http', auth='public', website=True)
def fill_survey(self, survey, token, prev=None, **post):
'''Display and validates a survey'''
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
user_input_obj = request.registry['survey.user_input']
# Controls if the survey can be displayed
errpage = self._check_bad_cases(cr, uid, request, survey_obj, survey, user_input_obj, context=context)
if errpage:
return errpage
# Load the user_input
try:
user_input_id = user_input_obj.search(cr, SUPERUSER_ID, [('token', '=', token)])[0]
except IndexError: # Invalid token
return request.website.render("website.403")
else:
user_input = user_input_obj.browse(cr, SUPERUSER_ID, [user_input_id], context=context)[0]
# Do not display expired survey (even if some pages have already been
# displayed -- There's a time for everything!)
errpage = self._check_deadline(cr, uid, user_input, context=context)
if errpage:
return errpage
# Select the right page
if user_input.state == 'new': # First page
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, 0, go_back=False, context=context)
data = {'survey': survey, 'page': page, 'page_nr': page_nr, 'token': user_input.token}
if last:
data.update({'last': True})
return request.website.render('survey.survey', data)
elif user_input.state == 'done': # Display success message
return request.website.render('survey.sfinished', {'survey': survey,
'token': token,
'user_input': user_input})
elif user_input.state == 'skip':
flag = (True if prev and prev == 'prev' else False)
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, user_input.last_displayed_page_id.id, go_back=flag, context=context)
#special case if you click "previous" from the last page, then leave the survey, then reopen it from the URL, avoid crash
if not page:
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, user_input.last_displayed_page_id.id, go_back=True, context=context)
data = {'survey': survey, 'page': page, 'page_nr': page_nr, 'token': user_input.token}
if last:
data.update({'last': True})
return request.website.render('survey.survey', data)
else:
return request.website.render("website.403")
# AJAX prefilling of a survey
@http.route(['/survey/prefill/<model("survey.survey"):survey>/<string:token>',
'/survey/prefill/<model("survey.survey"):survey>/<string:token>/<model("survey.page"):page>'],
type='http', auth='public', website=True)
def prefill(self, survey, token, page=None, **post):
cr, uid, context = request.cr, request.uid, request.context
user_input_line_obj = request.registry['survey.user_input_line']
ret = {}
# Fetch previous answers
if page:
ids = user_input_line_obj.search(cr, SUPERUSER_ID, [('user_input_id.token', '=', token), ('page_id', '=', page.id)], context=context)
else:
ids = user_input_line_obj.search(cr, SUPERUSER_ID, [('user_input_id.token', '=', token)], context=context)
previous_answers = user_input_line_obj.browse(cr, uid, ids, context=context)
# Return non empty answers in a JSON compatible format
for answer in previous_answers:
if not answer.skipped:
answer_tag = '%s_%s_%s' % (answer.survey_id.id, answer.page_id.id, answer.question_id.id)
answer_value = None
if answer.answer_type == 'free_text':
answer_value = answer.value_free_text
elif answer.answer_type == 'text' and answer.question_id.type == 'textbox':
answer_value = answer.value_text
elif answer.answer_type == 'text' and answer.question_id.type != 'textbox':
# here come comment answers for matrices, simple choice and multiple choice
answer_tag = "%s_%s" % (answer_tag, 'comment')
answer_value = answer.value_text
elif answer.answer_type == 'number':
answer_value = answer.value_number.__str__()
elif answer.answer_type == 'date':
answer_value = answer.value_date
elif answer.answer_type == 'suggestion' and not answer.value_suggested_row:
answer_value = answer.value_suggested.id
elif answer.answer_type == 'suggestion' and answer.value_suggested_row:
answer_tag = "%s_%s" % (answer_tag, answer.value_suggested_row.id)
answer_value = answer.value_suggested.id
if answer_value:
dict_soft_update(ret, answer_tag, answer_value)
else:
_logger.warning("[survey] No answer has been found for question %s marked as non skipped" % answer_tag)
return json.dumps(ret)
# AJAX scores loading for quiz correction mode
@http.route(['/survey/scores/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def get_scores(self, survey, token, page=None, **post):
cr, uid, context = request.cr, request.uid, request.context
user_input_line_obj = request.registry['survey.user_input_line']
ret = {}
# Fetch answers
ids = user_input_line_obj.search(cr, SUPERUSER_ID, [('user_input_id.token', '=', token)], context=context)
previous_answers = user_input_line_obj.browse(cr, uid, ids, context=context)
# Compute score for each question
for answer in previous_answers:
tmp_score = ret.get(answer.question_id.id, 0.0)
ret.update({answer.question_id.id: tmp_score + answer.quizz_mark})
return json.dumps(ret)
# AJAX submission of a page
@http.route(['/survey/submit/<model("survey.survey"):survey>'],
type='http', methods=['POST'], auth='public', website=True)
def submit(self, survey, **post):
_logger.debug('Incoming data: %s', post)
page_id = int(post['page_id'])
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
questions_obj = request.registry['survey.question']
questions_ids = questions_obj.search(cr, uid, [('page_id', '=', page_id)], context=context)
questions = questions_obj.browse(cr, uid, questions_ids, context=context)
# Answer validation
errors = {}
for question in questions:
answer_tag = "%s_%s_%s" % (survey.id, page_id, question.id)
errors.update(questions_obj.validate_question(cr, uid, question, post, answer_tag, context=context))
ret = {}
if (len(errors) != 0):
# Return errors messages to webpage
ret['errors'] = errors
else:
# Store answers into database
user_input_obj = request.registry['survey.user_input']
user_input_line_obj = request.registry['survey.user_input_line']
try:
user_input_id = user_input_obj.search(cr, SUPERUSER_ID, [('token', '=', post['token'])], context=context)[0]
except KeyError: # Invalid token
return request.website.render("website.403")
user_input = user_input_obj.browse(cr, SUPERUSER_ID, user_input_id, context=context)
user_id = uid if user_input.type != 'link' else SUPERUSER_ID
for question in questions:
answer_tag = "%s_%s_%s" % (survey.id, page_id, question.id)
user_input_line_obj.save_lines(cr, user_id, user_input_id, question, post, answer_tag, context=context)
go_back = post['button_submit'] == 'previous'
next_page, _, last = survey_obj.next_page(cr, uid, user_input, page_id, go_back=go_back, context=context)
vals = {'last_displayed_page_id': page_id}
if next_page is None and not go_back:
vals.update({'state': 'done'})
else:
vals.update({'state': 'skip'})
user_input_obj.write(cr, user_id, user_input_id, vals, context=context)
ret['redirect'] = '/survey/fill/%s/%s' % (survey.id, post['token'])
if go_back:
ret['redirect'] += '/prev'
return json.dumps(ret)
# Printing routes
@http.route(['/survey/print/<model("survey.survey"):survey>',
'/survey/print/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def print_survey(self, survey, token=None, **post):
'''Display an survey in printable view; if <token> is set, it will
grab the answers of the user_input_id that has <token>.'''
return request.website.render('survey.survey_print',
{'survey': survey,
'token': token,
'page_nr': 0,
'quizz_correction': True if survey.quizz_mode and token else False})
@http.route(['/survey/results/<model("survey.survey"):survey>'],
type='http', auth='user', website=True)
def survey_reporting(self, survey, token=None, **post):
'''Display survey Results & Statistics for given survey.'''
result_template ='survey.result'
current_filters = []
filter_display_data = []
filter_finish = False
survey_obj = request.registry['survey.survey']
if not survey.user_input_ids or not [input_id.id for input_id in survey.user_input_ids if input_id.state != 'new']:
result_template = 'survey.no_result'
if 'finished' in post:
post.pop('finished')
filter_finish = True
if post or filter_finish:
filter_data = self.get_filter_data(post)
current_filters = survey_obj.filter_input_ids(request.cr, request.uid, survey, filter_data, filter_finish, context=request.context)
filter_display_data = survey_obj.get_filter_display_data(request.cr, request.uid, filter_data, context=request.context)
return request.website.render(result_template,
{'survey': survey,
'survey_dict': self.prepare_result_dict(survey, current_filters),
'page_range': self.page_range,
'current_filters': current_filters,
'filter_display_data': filter_display_data,
'filter_finish': filter_finish
})
# Quick retroengineering of what is injected into the template for now:
# (TODO: flatten and simplify this)
#
# survey: a browse record of the survey
# survey_dict: very messy dict containing all the info to display answers
# {'page_ids': [
#
# ...
#
# {'page': browse record of the page,
# 'question_ids': [
#
# ...
#
# {'graph_data': data to be displayed on the graph
# 'input_summary': number of answered, skipped...
# 'prepare_result': {
# answers displayed in the tables
# }
# 'question': browse record of the question_ids
# }
#
# ...
#
# ]
# }
#
# ...
#
# ]
# }
#
# page_range: pager helper function
# current_filters: a list of ids
# filter_display_data: [{'labels': ['a', 'b'], question_text} ... ]
# filter_finish: boolean => only finished surveys or not
#
def prepare_result_dict(self,survey, current_filters=None):
"""Returns dictionary having values for rendering template"""
current_filters = current_filters if current_filters else []
survey_obj = request.registry['survey.survey']
result = {'page_ids': []}
for page in survey.page_ids:
page_dict = {'page': page, 'question_ids': []}
for question in page.question_ids:
question_dict = {'question':question, 'input_summary':survey_obj.get_input_summary(request.cr, request.uid, question, current_filters, context=request.context), 'prepare_result':survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context), 'graph_data': self.get_graph_data(question, current_filters)}
page_dict['question_ids'].append(question_dict)
result['page_ids'].append(page_dict)
return result
def get_filter_data(self, post):
"""Returns data used for filtering the result"""
filters = []
for ids in post:
#if user add some random data in query URI, ignore it
try:
row_id, answer_id = ids.split(',')
filters.append({'row_id': int(row_id), 'answer_id': int(answer_id)})
except:
return filters
return filters
def page_range(self, total_record, limit):
'''Returns number of pages required for pagination'''
total = ceil(total_record / float(limit))
return range(1, int(total + 1))
def get_graph_data(self, question, current_filters=None):
'''Returns formatted data required by graph library on basis of filter'''
# TODO refactor this terrible method and merge it with prepare_result_dict
current_filters = current_filters if current_filters else []
survey_obj = request.registry['survey.survey']
result = []
if question.type == 'multiple_choice':
result.append({'key': ustr(question.question),
'values': survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)['answers']
})
if question.type == 'simple_choice':
result = survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)['answers']
if question.type == 'matrix':
data = survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)
for answer in data['answers']:
values = []
for row in data['rows']:
values.append({'text': data['rows'].get(row), 'count': data['result'].get((row, answer))})
result.append({'key': data['answers'].get(answer), 'values': values})
return json.dumps(result)
def dict_soft_update(dictionary, key, value):
''' Insert the pair <key>: <value> into the <dictionary>. If <key> is
already present, this function will append <value> to the list of
existing data (instead of erasing it) '''
if key in dictionary:
dictionary[key].append(value)
else:
dictionary.update({key: [value]})
|
ville-k/tensorflow | refs/heads/master | tensorflow/python/summary/writer/writer_cache.py | 75 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A cache for FileWriters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.framework import ops
from tensorflow.python.summary.writer.writer import FileWriter
class FileWriterCache(object):
"""Cache for file writers.
This class caches file writers, one per directory.
"""
# Cache, keyed by directory.
_cache = {}
# Lock protecting _FILE_WRITERS.
_lock = threading.RLock()
@staticmethod
def clear():
"""Clear cached summary writers. Currently only used for unit tests."""
with FileWriterCache._lock:
# Make sure all the writers are closed now (otherwise open file handles
# may hang around, blocking deletions on Windows).
for item in FileWriterCache._cache.values():
item.close()
FileWriterCache._cache = {}
@staticmethod
def get(logdir):
"""Returns the FileWriter for the specified directory.
Args:
logdir: str, name of the directory.
Returns:
A `FileWriter`.
"""
with FileWriterCache._lock:
if logdir not in FileWriterCache._cache:
FileWriterCache._cache[logdir] = FileWriter(
logdir, graph=ops.get_default_graph())
return FileWriterCache._cache[logdir]
|
mconstantin/watchdog | refs/heads/master | tests/test_emitter.py | 7 | # -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import os
import time
import pytest
import logging
from tests import Queue
from functools import partial
from .shell import mkdir, touch, mv, rm, mkdtemp
from watchdog.utils import platform
from watchdog.utils.unicode_paths import str_cls
from watchdog.events import *
from watchdog.observers.api import ObservedWatch
pytestmark = pytest.mark.skipif(not platform.is_linux() and not platform.is_darwin(), reason="")
if platform.is_linux():
from watchdog.observers.inotify import InotifyEmitter as Emitter
elif platform.is_darwin():
from watchdog.observers.fsevents2 import FSEventsEmitter as Emitter
logging.basicConfig(level=logging.DEBUG)
def setup_function(function):
global p, event_queue
tmpdir = os.path.realpath(mkdtemp())
p = partial(os.path.join, tmpdir)
event_queue = Queue()
def start_watching(path=None):
path = p('') if path is None else path
global emitter
emitter = Emitter(event_queue, ObservedWatch(path, recursive=True))
if platform.is_darwin():
# FSEvents will report old evens (like create for mkdtemp in test
# setup. Waiting for a considerable time seems to 'flush' the events.
time.sleep(10)
emitter.start()
def teardown_function(function):
emitter.stop()
emitter.join(5)
rm(p(''), recursive=True)
assert not emitter.is_alive()
def test_create():
start_watching()
open(p('a'), 'a').close()
event = event_queue.get(timeout=5)[0]
assert event.src_path == p('a')
assert isinstance(event, FileCreatedEvent)
event = event_queue.get(timeout=5)[0]
assert os.path.normpath(event.src_path) == os.path.normpath(p(''))
assert isinstance(event, DirModifiedEvent)
def test_delete():
touch(p('a'))
start_watching()
rm(p('a'))
event = event_queue.get(timeout=5)[0]
assert event.src_path == p('a')
assert isinstance(event, FileDeletedEvent)
event = event_queue.get(timeout=5)[0]
assert os.path.normpath(event.src_path) == os.path.normpath(p(''))
assert isinstance(event, DirModifiedEvent)
def test_modify():
touch(p('a'))
start_watching()
touch(p('a'))
event = event_queue.get(timeout=5)[0]
assert event.src_path == p('a')
assert isinstance(event, FileModifiedEvent)
def test_move():
mkdir(p('dir1'))
mkdir(p('dir2'))
touch(p('dir1', 'a'))
start_watching()
mv(p('dir1', 'a'), p('dir2', 'b'))
event = event_queue.get(timeout=5)[0]
assert event.src_path == p('dir1', 'a')
assert event.dest_path == p('dir2', 'b')
assert isinstance(event, FileMovedEvent)
event = event_queue.get(timeout=5)[0]
assert event.src_path == p('dir1')
assert isinstance(event, DirModifiedEvent)
event = event_queue.get(timeout=5)[0]
assert event.src_path == p('dir2')
assert isinstance(event, DirModifiedEvent)
def test_move_to():
mkdir(p('dir1'))
mkdir(p('dir2'))
touch(p('dir1', 'a'))
start_watching(p('dir2'))
mv(p('dir1', 'a'), p('dir2', 'b'))
event = event_queue.get(timeout=5)[0]
assert isinstance(event, FileCreatedEvent)
assert event.src_path == p('dir2', 'b')
def test_move_from():
mkdir(p('dir1'))
mkdir(p('dir2'))
touch(p('dir1', 'a'))
start_watching(p('dir1'))
mv(p('dir1', 'a'), p('dir2', 'b'))
event = event_queue.get(timeout=5)[0]
assert isinstance(event, FileDeletedEvent)
assert event.src_path == p('dir1', 'a')
def test_separate_consecutive_moves():
mkdir(p('dir1'))
touch(p('dir1', 'a'))
touch(p('b'))
start_watching(p('dir1'))
mv(p('dir1', 'a'), p('c'))
mv(p('b'), p('dir1', 'd'))
event = event_queue.get(timeout=5)[0]
assert isinstance(event, FileDeletedEvent)
assert event.src_path == p('dir1', 'a')
assert isinstance(event_queue.get(timeout=5)[0], DirModifiedEvent)
event = event_queue.get(timeout=5)[0]
assert isinstance(event, FileCreatedEvent)
assert event.src_path == p('dir1', 'd')
assert isinstance(event_queue.get(timeout=5)[0], DirModifiedEvent)
@pytest.mark.skipif(platform.is_linux(), reason="bug. inotify will deadlock")
def test_delete_self():
mkdir(p('dir1'))
start_watching(p('dir1'))
rm(p('dir1'), True)
event_queue.get(timeout=5)[0]
def test_passing_unicode_should_give_unicode():
start_watching(p(''))
touch(p('a'))
event = event_queue.get(timeout=5)[0]
assert isinstance(event.src_path, str_cls)
def test_passing_bytes_should_give_bytes():
start_watching(p('').encode())
touch(p('a'))
event = event_queue.get(timeout=5)[0]
assert isinstance(event.src_path, bytes)
|
pennlabs/penn-mobile-server | refs/heads/master | buildingRedis.py | 1 | import csv
import json
import redis
from server import app
db = redis.StrictRedis().from_url(app.config["REDIS_URL"])
with open("buildings.csv") as f:
reader = csv.DictReader(f)
for row in reader:
db.set("buildings:%s" % (row["code_courses"]), json.dumps(row))
|
rspavel/spack | refs/heads/develop | var/spack/repos/builtin/packages/minimap2/package.py | 4 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Minimap2(PythonPackage):
"""Minimap2 is a versatile sequence alignment program that aligns DNA or
mRNA sequences against a large reference database.
Mappy provides a convenient interface to minimap2."""
homepage = "https://github.com/lh3/minimap2"
url = "https://github.com/lh3/minimap2/releases/download/v2.2/minimap2-2.2.tar.bz2"
version('2.14', sha256='9088b785bb0c33488ca3a27c8994648ce21a8be54cb117f5ecee26343facd03b')
version('2.10', sha256='52b36f726ec00bfca4a2ffc23036d1a2b5f96f0aae5a92fd826be6680c481c20')
version('2.2', sha256='7e8683aa74c4454a8cfe3821f405c4439082e24c152b4b834fdb56a117ecaed9')
conflicts('target=aarch64:', when='@:2.10')
depends_on('zlib', type='link')
depends_on('py-cython', type='build')
@run_after('install')
def install_minimap2(self):
make_arg = []
if self.spec.target.family == 'aarch64':
make_arg.extend([
'arm_neon=1',
'aarch64=1'
])
make(*make_arg)
mkdirp(prefix.bin)
install('minimap2', prefix.bin)
|
eeroniemi/dd-agent | refs/heads/master | tests/checks/integration/test_tokumx.py | 46 | # 3p
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
GAUGES = [
# FIXME: For some reason these metrics are not always available
# 'tokumx.indexCounters.btree.missRatio',
# 'tokumx.globalLock.ratio',
# 'tokumx.mem.mapped',
# 'tokumx.replSet.health',
# 'tokumx.replSet.state',
# 'tokumx.replSet.replicationLag',
# 'tokumx.metrics.repl.buffer.maxSizeBytes',
'tokumx.connections.available',
'tokumx.connections.current',
'tokumx.cursors.timedOut',
'tokumx.cursors.totalOpen',
'tokumx.ft.alerts.checkpointFailures',
'tokumx.ft.alerts.locktreeRequestsPending',
'tokumx.ft.cachetable.size.current',
'tokumx.ft.cachetable.size.limit',
'tokumx.ft.cachetable.size.writing',
'tokumx.ft.checkpoint.lastComplete.time',
'tokumx.ft.compressionRatio.leaf',
'tokumx.ft.compressionRatio.nonleaf',
'tokumx.ft.compressionRatio.overall',
'tokumx.ft.locktree.size.current',
'tokumx.ft.locktree.size.limit',
'tokumx.mem.resident',
'tokumx.mem.virtual',
'tokumx.metrics.repl.buffer.count',
'tokumx.metrics.repl.buffer.sizeBytes',
'tokumx.stats.dataSize',
'tokumx.stats.indexSize',
'tokumx.stats.indexes',
'tokumx.stats.objects',
'tokumx.stats.storageSize',
'tokumx.uptime',
]
RATES = [
# FIXME: For some reason these metrics are not available
# 'tokumx.indexCounters.btree.missRatio',
# 'tokumx.indexCounters.btree.accessesps',
# 'tokumx.indexCounters.btree.hitsps',
# 'tokumx.indexCounters.btree.missesps',
# 'tokumx.metrics.operation.fastmodps',
# 'tokumx.metrics.record.movesps',
'tokumx.asserts.msgps',
'tokumx.asserts.regularps',
'tokumx.asserts.rolloversps',
'tokumx.asserts.userps',
'tokumx.asserts.warningps',
'tokumx.ft.alerts.longWaitEvents.cachePressure.countps',
'tokumx.ft.alerts.longWaitEvents.cachePressure.timeps',
'tokumx.ft.alerts.longWaitEvents.checkpointBegin.countps',
'tokumx.ft.alerts.longWaitEvents.checkpointBegin.timeps',
'tokumx.ft.alerts.longWaitEvents.fsync.countps',
'tokumx.ft.alerts.longWaitEvents.fsync.timeps',
'tokumx.ft.alerts.longWaitEvents.locktreeWait.countps',
'tokumx.ft.alerts.longWaitEvents.locktreeWait.timeps',
'tokumx.ft.alerts.longWaitEvents.locktreeWaitEscalation.countps',
'tokumx.ft.alerts.longWaitEvents.locktreeWaitEscalation.timeps',
'tokumx.ft.alerts.longWaitEvents.logBufferWaitps',
'tokumx.ft.cachetable.evictions.full.leaf.clean.bytesps',
'tokumx.ft.cachetable.evictions.full.leaf.clean.countps',
'tokumx.ft.cachetable.evictions.full.leaf.dirty.bytesps',
'tokumx.ft.cachetable.evictions.full.leaf.dirty.countps',
'tokumx.ft.cachetable.evictions.full.leaf.dirty.timeps',
'tokumx.ft.cachetable.evictions.full.nonleaf.clean.bytesps',
'tokumx.ft.cachetable.evictions.full.nonleaf.clean.countps',
'tokumx.ft.cachetable.evictions.full.nonleaf.dirty.bytesps',
'tokumx.ft.cachetable.evictions.full.nonleaf.dirty.countps',
'tokumx.ft.cachetable.evictions.full.nonleaf.dirty.timeps',
'tokumx.ft.cachetable.evictions.partial.leaf.clean.bytesps',
'tokumx.ft.cachetable.evictions.partial.leaf.clean.countps',
'tokumx.ft.cachetable.evictions.partial.nonleaf.clean.bytesps',
'tokumx.ft.cachetable.evictions.partial.nonleaf.clean.countps',
'tokumx.ft.cachetable.miss.countps',
'tokumx.ft.cachetable.miss.full.countps',
'tokumx.ft.cachetable.miss.full.timeps',
'tokumx.ft.cachetable.miss.partial.countps',
'tokumx.ft.cachetable.miss.partial.timeps',
'tokumx.ft.cachetable.miss.timeps',
'tokumx.ft.checkpoint.begin.timeps',
'tokumx.ft.checkpoint.countps',
'tokumx.ft.checkpoint.timeps',
'tokumx.ft.checkpoint.write.leaf.bytes.compressedps',
'tokumx.ft.checkpoint.write.leaf.bytes.uncompressedps',
'tokumx.ft.checkpoint.write.leaf.countps',
'tokumx.ft.checkpoint.write.leaf.timeps',
'tokumx.ft.checkpoint.write.nonleaf.bytes.compressedps',
'tokumx.ft.checkpoint.write.nonleaf.bytes.uncompressedps',
'tokumx.ft.checkpoint.write.nonleaf.countps',
'tokumx.ft.checkpoint.write.nonleaf.timeps',
'tokumx.ft.fsync.countps',
'tokumx.ft.fsync.timeps',
'tokumx.ft.log.bytesps',
'tokumx.ft.log.countps',
'tokumx.ft.log.timeps',
'tokumx.ft.serializeTime.leaf.compressps',
'tokumx.ft.serializeTime.leaf.decompressps',
'tokumx.ft.serializeTime.leaf.deserializeps',
'tokumx.ft.serializeTime.leaf.serializeps',
'tokumx.ft.serializeTime.nonleaf.compressps',
'tokumx.ft.serializeTime.nonleaf.decompressps',
'tokumx.ft.serializeTime.nonleaf.deserializeps',
'tokumx.ft.serializeTime.nonleaf.serializeps',
'tokumx.metrics.document.deletedps',
'tokumx.metrics.document.insertedps',
'tokumx.metrics.document.returnedps',
'tokumx.metrics.document.updatedps',
'tokumx.metrics.getLastError.wtime.numps',
'tokumx.metrics.getLastError.wtime.totalMillisps',
'tokumx.metrics.getLastError.wtimeoutsps',
'tokumx.metrics.operation.idhackps',
'tokumx.metrics.operation.scanAndOrderps',
'tokumx.metrics.queryExecutor.scannedps',
'tokumx.metrics.repl.apply.batches.numps',
'tokumx.metrics.repl.apply.batches.totalMillisps',
'tokumx.metrics.repl.apply.opsps',
'tokumx.metrics.repl.network.bytesps',
'tokumx.metrics.repl.network.getmores.numps',
'tokumx.metrics.repl.network.getmores.totalMillisps',
'tokumx.metrics.repl.network.opsps',
'tokumx.metrics.repl.network.readersCreatedps',
'tokumx.metrics.repl.oplog.insert.numps',
'tokumx.metrics.repl.oplog.insert.totalMillisps',
'tokumx.metrics.repl.oplog.insertBytesps',
'tokumx.metrics.ttl.deletedDocumentsps',
'tokumx.metrics.ttl.passesps',
'tokumx.opcounters.commandps',
'tokumx.opcounters.deleteps',
'tokumx.opcounters.getmoreps',
'tokumx.opcounters.insertps',
'tokumx.opcounters.queryps',
'tokumx.opcounters.updateps',
'tokumx.opcountersRepl.commandps',
'tokumx.opcountersRepl.deleteps',
'tokumx.opcountersRepl.getmoreps',
'tokumx.opcountersRepl.insertps',
'tokumx.opcountersRepl.queryps',
'tokumx.opcountersRepl.updateps',
]
IDX_HISTS = [
'size',
'count',
'avgObjSize',
'storageSize',
]
# LocalRates are computed as rates but sent as histograms
# FIXME ['nscanned', 'nscannedObjects', 'inserts', 'deletes'] are N/A
IDX_LCL_RATES = ['queries']
COLL_HISTS = [
'totalIndexSize',
'nindexes',
'size',
'count',
'nindexesbeingbuilt',
'totalIndexStorageSize',
'storageSize',
]
DB_STATS = [
'avgObjSize',
'collections',
'dataSize',
'indexSize',
'indexStorageSize',
'indexes',
'objects',
'storageSize'
]
HIST_SUFFIXES = ['avg', 'max', 'count', '95percentile', 'median']
@attr(requires='tokumx')
class TestTokuMXTest(AgentCheckTest):
CHECK_NAME = 'tokumx'
def testTokuMXCheck(self):
mongo_server = 'mongodb://localhost:37017/test'
config = {
'instances': [{
'server': mongo_server
}]
}
server_tag = 'server:%s' % mongo_server
self.run_check_twice(config)
# TODO: assert more tags
for mname in GAUGES:
self.assertMetric(mname, count=1, tags=[server_tag])
for mname in RATES:
self.assertMetric(mname, count=1)
for msuff in IDX_HISTS:
for hsuff in HIST_SUFFIXES:
self.assertMetric('tokumx.stats.idx.%s.%s' % (msuff, hsuff), count=1)
for msuff in IDX_LCL_RATES:
for hsuff in HIST_SUFFIXES:
self.assertMetric('tokumx.statsd.idx.%s.%s' % (msuff, hsuff), count=1)
for msuff in COLL_HISTS:
for hsuff in HIST_SUFFIXES:
self.assertMetric('tokumx.stats.coll.%s.%s' % (msuff, hsuff), count=1)
for msuff in DB_STATS:
for dbname in ('admin', 'local', 'test'):
self.assertMetric('tokumx.stats.db.%s' % (msuff), count=1, tags=[server_tag, 'db:%s' % dbname])
self.assertServiceCheck('tokumx.can_connect', count=1, status=AgentCheck.OK, tags=['db:test', 'host:localhost', 'port:37017'])
self.coverage_report()
|
oliverhr/odoo | refs/heads/8.0-pos-pademobile-payment | addons/pad/__openerp__.py | 249 | # -*- coding: utf-8 -*-
{
'name': 'Collaborative Pads',
'version': '2.0',
'category': 'Project Management',
'description': """
Adds enhanced support for (Ether)Pad attachments in the web client.
===================================================================
Lets the company customize which Pad installation should be used to link to new
pads (by default, http://etherpad.com/).
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/notes',
'depends': ['web'],
'data': [
'res_company.xml',
'views/pad.xml',
],
'demo': ['pad_demo.xml'],
'installable': True,
'auto_install': False,
'web': True,
'qweb' : ['static/src/xml/*.xml'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ahua/pythondotorg | refs/heads/master | minutes/tests/test_views.py | 8 | import datetime
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.test import TestCase
from ..models import Minutes
User = get_user_model()
class MinutesViewsTests(TestCase):
def setUp(self):
start_date = datetime.datetime.now()
last_month = start_date - datetime.timedelta(weeks=4)
two_months = last_month - datetime.timedelta(weeks=4)
self.m1 = Minutes.objects.create(
date=start_date,
content='Testing',
is_published=False,
)
self.m2 = Minutes.objects.create(
date=last_month,
content='Testing',
is_published=True,
)
self.m3 = Minutes.objects.create(
date=two_months,
content='Testing',
is_published=True,
)
self.admin_user = User.objects.create_user('admin', '[email protected]', 'adminpass')
self.admin_user.is_staff = True
self.admin_user.save()
def test_list_view(self):
response = self.client.get(reverse('minutes_list'))
self.assertEqual(response.status_code, 200)
self.assertNotIn(self.m1, response.context['minutes_list'])
self.assertIn(self.m2, response.context['minutes_list'])
self.assertIn(self.m3, response.context['minutes_list'])
# Test that staff can see drafts
self.client.login(username='admin', password='adminpass')
response = self.client.get(reverse('minutes_list'))
self.assertEqual(response.status_code, 200)
self.assertIn(self.m1, response.context['minutes_list'])
self.assertIn(self.m2, response.context['minutes_list'])
self.assertIn(self.m3, response.context['minutes_list'])
def test_detail_view(self):
response = self.client.get(reverse('minutes_detail', kwargs={
'year': self.m2.date.strftime("%Y"),
'month': self.m2.date.strftime("%m").zfill(2),
'day': self.m2.date.strftime("%d").zfill(2),
}))
self.assertEqual(response.status_code, 200)
self.assertEqual(self.m2, response.context['minutes'])
response = self.client.get(reverse('minutes_detail', kwargs={
'year': self.m1.date.strftime("%Y"),
'month': self.m1.date.strftime("%m").zfill(2),
'day': self.m1.date.strftime("%d").zfill(2),
}))
self.assertEqual(response.status_code, 404)
# Test that staff can see drafts
self.client.login(username='admin', password='adminpass')
response = self.client.get(reverse('minutes_detail', kwargs={
'year': self.m1.date.strftime("%Y"),
'month': self.m1.date.strftime("%m").zfill(2),
'day': self.m1.date.strftime("%d").zfill(2),
}))
self.assertEqual(response.status_code, 200)
self.assertEqual(self.m1, response.context['minutes'])
|
vertigo235/Sick-Beard-XEM | refs/heads/master | lib/enzyme/__init__.py | 168 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <[email protected]>
# Copyright 2003-2006 Thomas Schueppel <[email protected]>
# Copyright 2003-2006 Dirk Meyer <[email protected]>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
import mimetypes
import os
import sys
from exceptions import *
PARSERS = [('asf', ['video/asf'], ['asf', 'wmv', 'wma']),
('flv', ['video/flv'], ['flv']),
('mkv', ['video/x-matroska', 'application/mkv'], ['mkv', 'mka', 'webm']),
('mp4', ['video/quicktime', 'video/mp4'], ['mov', 'qt', 'mp4', 'mp4a', '3gp', '3gp2', '3g2', 'mk2']),
('mpeg', ['video/mpeg'], ['mpeg', 'mpg', 'mp4', 'ts']),
('ogm', ['application/ogg'], ['ogm', 'ogg', 'ogv']),
('real', ['video/real'], ['rm', 'ra', 'ram']),
('riff', ['video/avi'], ['wav', 'avi'])
]
def parse(path):
"""Parse metadata of the given video
:param string path: path to the video file to parse
:return: a parser corresponding to the video's mimetype or extension
:rtype: :class:`~enzyme.core.AVContainer`
"""
if not os.path.isfile(path):
raise ValueError('Invalid path')
extension = os.path.splitext(path)[1][1:]
mimetype = mimetypes.guess_type(path)[0]
parser_ext = None
parser_mime = None
for (parser_name, parser_mimetypes, parser_extensions) in PARSERS:
if mimetype in parser_mimetypes:
parser_mime = parser_name
if extension in parser_extensions:
parser_ext = parser_name
parser = parser_mime or parser_ext
if not parser:
raise NoParserError()
mod = __import__(parser, globals=globals(), locals=locals(), fromlist=[], level=-1)
with open(path, 'rb') as f:
p = mod.Parser(f)
return p
|
machinebrains/neat-python | refs/heads/master | tests/test_simple_run.py | 1 | from __future__ import print_function
import os
from neatsociety import nn, population, statistics, visualize
from neatsociety.config import Config
def test_run():
xor_inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
xor_outputs = [0, 1, 1, 0]
def eval_fitness(genomes):
for g in genomes:
net = nn.create_feed_forward_phenotype(g)
error = 0.0
for inputs, expected in zip(xor_inputs, xor_outputs):
# Serial activation propagates the inputs through the entire network.
output = net.serial_activate(inputs)
error += (output[0] - expected) ** 2
# When the output matches expected for all inputs, fitness will reach
# its maximum value of 1.0.
g.fitness = 1 - error
local_dir = os.path.dirname(__file__)
config = Config(os.path.join(local_dir, 'test_configuration'))
pop = population.Population(config)
pop.run(eval_fitness, 10)
visualize.plot_stats(pop.statistics)
visualize.plot_species(pop.statistics)
winner = pop.statistics.best_genome()
# Validate winner.
for g in pop.statistics.most_fit_genomes:
assert winner.fitness >= g.fitness
visualize.draw_net(winner, view=False, filename="xor2-all.gv")
visualize.draw_net(winner, view=False, filename="xor2-enabled.gv", show_disabled=False)
visualize.draw_net(winner, view=False, filename="xor2-enabled-pruned.gv", show_disabled=False, prune_unused=True)
statistics.save_stats(pop.statistics)
statistics.save_species_count(pop.statistics)
statistics.save_species_fitness(pop.statistics)
|
fmarier/letsencrypt-debian | refs/heads/master | letsencrypt/client.py | 1 | """Let's Encrypt client API."""
import logging
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
import OpenSSL
import zope.component
from acme import client as acme_client
from acme import jose
from acme import messages
from letsencrypt import account
from letsencrypt import auth_handler
from letsencrypt import configuration
from letsencrypt import constants
from letsencrypt import continuity_auth
from letsencrypt import crypto_util
from letsencrypt import errors
from letsencrypt import error_handler
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt import reverter
from letsencrypt import storage
from letsencrypt.display import ops as display_ops
from letsencrypt.display import enhancements
logger = logging.getLogger(__name__)
def _acme_from_config_key(config, key):
# TODO: Allow for other alg types besides RS256
return acme_client.Client(directory=config.server, key=key,
verify_ssl=(not config.no_verify_ssl))
def register(config, account_storage, tos_cb=None):
"""Register new account with an ACME CA.
This function takes care of generating fresh private key,
registering the account, optionally accepting CA Terms of Service
and finally saving the account. It should be called prior to
initialization of `Client`, unless account has already been created.
:param .IConfig config: Client configuration.
:param .AccountStorage account_storage: Account storage where newly
registered account will be saved to. Save happens only after TOS
acceptance step, so any account private keys or
`.RegistrationResource` will not be persisted if `tos_cb`
returns ``False``.
:param tos_cb: If ACME CA requires the user to accept a Terms of
Service before registering account, client action is
necessary. For example, a CLI tool would prompt the user
acceptance. `tos_cb` must be a callable that should accept
`.RegistrationResource` and return a `bool`: ``True`` iff the
Terms of Service present in the contained
`.Registration.terms_of_service` is accepted by the client, and
``False`` otherwise. ``tos_cb`` will be called only if the
client acction is necessary, i.e. when ``terms_of_service is not
None``. This argument is optional, if not supplied it will
default to automatic acceptance!
:raises letsencrypt.errors.Error: In case of any client problems, in
particular registration failure, or unaccepted Terms of Service.
:raises acme.errors.Error: In case of any protocol problems.
:returns: Newly registered and saved account, as well as protocol
API handle (should be used in `Client` initialization).
:rtype: `tuple` of `.Account` and `acme.client.Client`
"""
# Log non-standard actions, potentially wrong API calls
if account_storage.find_all():
logger.info("There are already existing accounts for %s", config.server)
if config.email is None:
logger.warn("Registering without email!")
# Each new registration shall use a fresh new key
key = jose.JWKRSA(key=jose.ComparableRSAKey(
rsa.generate_private_key(
public_exponent=65537,
key_size=config.rsa_key_size,
backend=default_backend())))
acme = _acme_from_config_key(config, key)
# TODO: add phone?
regr = acme.register(messages.NewRegistration.from_data(email=config.email))
if regr.terms_of_service is not None:
if tos_cb is not None and not tos_cb(regr):
raise errors.Error(
"Registration cannot proceed without accepting "
"Terms of Service.")
regr = acme.agree_to_tos(regr)
acc = account.Account(regr, key)
account.report_new_account(acc, config)
account_storage.save(acc)
return acc, acme
class Client(object):
"""ACME protocol client.
:ivar .IConfig config: Client configuration.
:ivar .Account account: Account registered with `register`.
:ivar .AuthHandler auth_handler: Authorizations handler that will
dispatch DV and Continuity challenges to appropriate
authenticators (providing `.IAuthenticator` interface).
:ivar .IInstaller installer: Installer.
:ivar acme.client.Client acme: Optional ACME client API handle.
You might already have one from `register`.
"""
def __init__(self, config, account_, dv_auth, installer, acme=None):
"""Initialize a client.
:param .IAuthenticator dv_auth: Prepared (`.IAuthenticator.prepare`)
authenticator that can solve the `.constants.DV_CHALLENGES`.
"""
self.config = config
self.account = account_
self.installer = installer
# Initialize ACME if account is provided
if acme is None and self.account is not None:
acme = _acme_from_config_key(config, self.account.key)
self.acme = acme
# TODO: Check if self.config.enroll_autorenew is None. If
# so, set it based to the default: figure out if dv_auth is
# standalone (then default is False, otherwise default is True)
if dv_auth is not None:
cont_auth = continuity_auth.ContinuityAuthenticator(config,
installer)
self.auth_handler = auth_handler.AuthHandler(
dv_auth, cont_auth, self.acme, self.account)
else:
self.auth_handler = None
def _obtain_certificate(self, domains, csr):
"""Obtain certificate.
Internal function with precondition that `domains` are
consistent with identifiers present in the `csr`.
:param list domains: Domain names.
:param .le_util.CSR csr: DER-encoded Certificate Signing
Request. The key used to generate this CSR can be different
than `authkey`.
:returns: `.CertificateResource` and certificate chain (as
returned by `.fetch_chain`).
:rtype: tuple
"""
if self.auth_handler is None:
msg = ("Unable to obtain certificate because authenticator is "
"not set.")
logger.warning(msg)
raise errors.Error(msg)
if self.account.regr is None:
raise errors.Error("Please register with the ACME server first.")
logger.debug("CSR: %s, domains: %s", csr, domains)
authzr = self.auth_handler.get_authorizations(domains)
certr = self.acme.request_issuance(
jose.ComparableX509(OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, csr.data)),
authzr)
return certr, self.acme.fetch_chain(certr)
def obtain_certificate_from_csr(self, csr):
"""Obtain certficiate from CSR.
:param .le_util.CSR csr: DER-encoded Certificate Signing
Request.
:returns: `.CertificateResource` and certificate chain (as
returned by `.fetch_chain`).
:rtype: tuple
"""
return self._obtain_certificate(
# TODO: add CN to domains?
crypto_util.get_sans_from_csr(
csr.data, OpenSSL.crypto.FILETYPE_ASN1), csr)
def obtain_certificate(self, domains):
"""Obtains a certificate from the ACME server.
`.register` must be called before `.obtain_certificate`
:param set domains: domains to get a certificate
:returns: `.CertificateResource`, certificate chain (as
returned by `.fetch_chain`), and newly generated private key
(`.le_util.Key`) and DER-encoded Certificate Signing Request
(`.le_util.CSR`).
:rtype: tuple
"""
# Create CSR from names
key = crypto_util.init_save_key(
self.config.rsa_key_size, self.config.key_dir)
csr = crypto_util.init_save_csr(key, domains, self.config.csr_dir)
return self._obtain_certificate(domains, csr) + (key, csr)
def obtain_and_enroll_certificate(
self, domains, authenticator, installer, plugins):
"""Obtain and enroll certificate.
Get a new certificate for the specified domains using the specified
authenticator and installer, and then create a new renewable lineage
containing it.
:param list domains: Domains to request.
:param authenticator: The authenticator to use.
:type authenticator: :class:`letsencrypt.interfaces.IAuthenticator`
:param installer: The installer to use.
:type installer: :class:`letsencrypt.interfaces.IInstaller`
:param plugins: A PluginsFactory object.
:returns: A new :class:`letsencrypt.storage.RenewableCert` instance
referred to the enrolled cert lineage, or False if the cert could
not be obtained.
"""
certr, chain, key, _ = self.obtain_certificate(domains)
# TODO: remove this dirty hack
self.config.namespace.authenticator = plugins.find_init(
authenticator).name
if installer is not None:
self.config.namespace.installer = plugins.find_init(installer).name
# XXX: We clearly need a more general and correct way of getting
# options into the configobj for the RenewableCert instance.
# This is a quick-and-dirty way to do it to allow integration
# testing to start. (Note that the config parameter to new_lineage
# ideally should be a ConfigObj, but in this case a dict will be
# accepted in practice.)
params = vars(self.config.namespace)
config = {}
cli_config = configuration.RenewerConfiguration(self.config.namespace)
if (cli_config.config_dir != constants.CLI_DEFAULTS["config_dir"] or
cli_config.work_dir != constants.CLI_DEFAULTS["work_dir"]):
logger.warning(
"Non-standard path(s), might not work with crontab installed "
"by your operating system package manager")
lineage = storage.RenewableCert.new_lineage(
domains[0], OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, certr.body),
key.pem, crypto_util.dump_pyopenssl_chain(chain),
params, config, cli_config)
self._report_renewal_status(lineage)
return lineage
def _report_renewal_status(self, cert):
# pylint: disable=no-self-use
"""Informs the user about automatic renewal and deployment.
:param .RenewableCert cert: Newly issued certificate
"""
if ("autorenew" not in cert.configuration or
cert.configuration.as_bool("autorenew")):
if ("autodeploy" not in cert.configuration or
cert.configuration.as_bool("autodeploy")):
msg = "Automatic renewal and deployment has "
else:
msg = "Automatic renewal but not automatic deployment has "
else:
if ("autodeploy" not in cert.configuration or
cert.configuration.as_bool("autodeploy")):
msg = "Automatic deployment but not automatic renewal has "
else:
msg = "Automatic renewal and deployment has not "
msg += ("been enabled for your certificate. These settings can be "
"configured in the directories under {0}.").format(
cert.cli_config.renewal_configs_dir)
reporter = zope.component.getUtility(interfaces.IReporter)
reporter.add_message(msg, reporter.LOW_PRIORITY, True)
def save_certificate(self, certr, chain_cert, cert_path, chain_path):
# pylint: disable=no-self-use
"""Saves the certificate received from the ACME server.
:param certr: ACME "certificate" resource.
:type certr: :class:`acme.messages.Certificate`
:param list chain_cert:
:param str cert_path: Candidate path to a certificate.
:param str chain_path: Candidate path to a certificate chain.
:returns: cert_path, chain_path (absolute paths to the actual files)
:rtype: `tuple` of `str`
:raises IOError: If unable to find room to write the cert files
"""
for path in cert_path, chain_path:
le_util.make_or_verify_dir(
os.path.dirname(path), 0o755, os.geteuid(),
self.config.strict_permissions)
# try finally close
cert_chain_abspath = None
cert_file, act_cert_path = le_util.unique_file(cert_path, 0o644)
# TODO: Except
cert_pem = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, certr.body)
try:
cert_file.write(cert_pem)
finally:
cert_file.close()
logger.info("Server issued certificate; certificate written to %s",
act_cert_path)
if chain_cert:
chain_file, act_chain_path = le_util.unique_file(
chain_path, 0o644)
# TODO: Except
chain_pem = crypto_util.dump_pyopenssl_chain(chain_cert)
try:
chain_file.write(chain_pem)
finally:
chain_file.close()
logger.info("Cert chain written to %s", act_chain_path)
# This expects a valid chain file
cert_chain_abspath = os.path.abspath(act_chain_path)
return os.path.abspath(act_cert_path), cert_chain_abspath
def deploy_certificate(self, domains, privkey_path, cert_path, chain_path):
"""Install certificate
:param list domains: list of domains to install the certificate
:param str privkey_path: path to certificate private key
:param str cert_path: certificate file path (optional)
:param str chain_path: chain file path
"""
if self.installer is None:
logger.warning("No installer specified, client is unable to deploy"
"the certificate")
raise errors.Error("No installer available")
chain_path = None if chain_path is None else os.path.abspath(chain_path)
with error_handler.ErrorHandler(self.installer.recovery_routine):
for dom in domains:
# TODO: Provide a fullchain reference for installers like
# nginx that want it
self.installer.deploy_cert(
dom, os.path.abspath(cert_path),
os.path.abspath(privkey_path), chain_path)
self.installer.save("Deployed Let's Encrypt Certificate")
# sites may have been enabled / final cleanup
self.installer.restart()
def enhance_config(self, domains, redirect=None):
"""Enhance the configuration.
.. todo:: This needs to handle the specific enhancements offered by the
installer. We will also have to find a method to pass in the chosen
values efficiently.
:param list domains: list of domains to configure
:param redirect: If traffic should be forwarded from HTTP to HTTPS.
:type redirect: bool or None
:raises .errors.Error: if no installer is specified in the
client.
"""
if self.installer is None:
logger.warning("No installer is specified, there isn't any "
"configuration to enhance.")
raise errors.Error("No installer available")
if redirect is None:
redirect = enhancements.ask("redirect")
# When support for more enhancements are added, the call to the
# plugin's `enhance` function should be wrapped by an ErrorHandler
if redirect:
self.redirect_to_ssl(domains)
def redirect_to_ssl(self, domains):
"""Redirect all traffic from HTTP to HTTPS
:param vhost: list of ssl_vhosts
:type vhost: :class:`letsencrypt.interfaces.IInstaller`
"""
with error_handler.ErrorHandler(self.installer.recovery_routine):
for dom in domains:
try:
self.installer.enhance(dom, "redirect")
except errors.PluginError:
logger.warn("Unable to perform redirect for %s", dom)
raise
self.installer.save("Add Redirects")
self.installer.restart()
def validate_key_csr(privkey, csr=None):
"""Validate Key and CSR files.
Verifies that the client key and csr arguments are valid and correspond to
one another. This does not currently check the names in the CSR due to
the inability to read SANs from CSRs in python crypto libraries.
If csr is left as None, only the key will be validated.
:param privkey: Key associated with CSR
:type privkey: :class:`letsencrypt.le_util.Key`
:param .le_util.CSR csr: CSR
:raises .errors.Error: when validation fails
"""
# TODO: Handle all of these problems appropriately
# The client can eventually do things like prompt the user
# and allow the user to take more appropriate actions
# Key must be readable and valid.
if privkey.pem and not crypto_util.valid_privkey(privkey.pem):
raise errors.Error("The provided key is not a valid key")
if csr:
if csr.form == "der":
csr_obj = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, csr.data)
csr = le_util.CSR(csr.file, OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, csr_obj), "pem")
# If CSR is provided, it must be readable and valid.
if csr.data and not crypto_util.valid_csr(csr.data):
raise errors.Error("The provided CSR is not a valid CSR")
# If both CSR and key are provided, the key must be the same key used
# in the CSR.
if csr.data and privkey.pem:
if not crypto_util.csr_matches_pubkey(
csr.data, privkey.pem):
raise errors.Error("The key and CSR do not match")
def rollback(default_installer, checkpoints, config, plugins):
"""Revert configuration the specified number of checkpoints.
:param int checkpoints: Number of checkpoints to revert.
:param config: Configuration.
:type config: :class:`letsencrypt.interfaces.IConfig`
"""
# Misconfigurations are only a slight problems... allow the user to rollback
installer = display_ops.pick_installer(
config, default_installer, plugins, question="Which installer "
"should be used for rollback?")
# No Errors occurred during init... proceed normally
# If installer is None... couldn't find an installer... there shouldn't be
# anything to rollback
if installer is not None:
installer.rollback_checkpoints(checkpoints)
installer.restart()
def view_config_changes(config):
"""View checkpoints and associated configuration changes.
.. note:: This assumes that the installation is using a Reverter object.
:param config: Configuration.
:type config: :class:`letsencrypt.interfaces.IConfig`
"""
rev = reverter.Reverter(config)
rev.recovery_routine()
rev.view_config_changes()
|
markslwong/tensorflow | refs/heads/master | tensorflow/contrib/learn/python/learn/dataframe/transforms/difference.py | 90 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Transform` that performs subtraction on two `Series`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import series
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
def _negate_sparse(st):
return sparse_tensor.SparseTensor(indices=st.indices,
values=-st.values,
dense_shape=st.dense_shape)
@series.Series.register_binary_op("__sub__")
class Difference(transform.TensorFlowTransform):
"""Subtracts one 'Series` from another."""
def __init__(self):
super(Difference, self).__init__()
@property
def name(self):
return "difference"
@property
def input_valency(self):
return 2
@property
def _output_names(self):
return "output",
def _apply_transform(self, input_tensors, **kwargs):
pair_sparsity = (isinstance(input_tensors[0], sparse_tensor.SparseTensor),
isinstance(input_tensors[1], sparse_tensor.SparseTensor))
if pair_sparsity == (False, False):
result = input_tensors[0] - input_tensors[1]
# note tf.sparse_add accepts the mixed cases,
# so long as at least one input is sparse.
elif not pair_sparsity[1]:
result = sparse_ops.sparse_add(input_tensors[0], - input_tensors[1])
else:
result = sparse_ops.sparse_add(input_tensors[0],
_negate_sparse(input_tensors[1]))
# pylint: disable=not-callable
return self.return_type(result)
|
jbalogh/zamboni | refs/heads/master | apps/addons/tests/test_decorators.py | 1 | from django import http
import mock
from nose.tools import eq_
from test_utils import RequestFactory
import amo.tests
from addons.models import Addon
from addons import decorators as dec
class TestAddonView(amo.tests.TestCase):
def setUp(self):
self.addon = Addon.objects.create(slug='x', type=1)
self.func = mock.Mock()
self.func.return_value = mock.sentinel.OK
self.func.__name__ = 'mock_function'
self.view = dec.addon_view(self.func)
self.request = mock.Mock()
self.slug_path = '/addon/%s/reviews' % self.addon.slug
self.request.path = self.id_path = '/addon/%s/reviews' % self.addon.id
self.request.GET = {}
def test_301_by_id(self):
r = self.view(self.request, str(self.addon.id))
eq_(r.status_code, 301)
eq_(r['Location'], self.slug_path)
def test_301_with_querystring(self):
self.request.GET = mock.Mock()
self.request.GET.urlencode.return_value = 'q=1'
r = self.view(self.request, str(self.addon.id))
eq_(r.status_code, 301)
eq_(r['Location'], self.slug_path + '?q=1')
def test_200_by_slug(self):
r = self.view(self.request, self.addon.slug)
eq_(r, mock.sentinel.OK)
def test_404_by_id(self):
with self.assertRaises(http.Http404):
self.view(self.request, str(self.addon.id * 2))
def test_404_by_slug(self):
with self.assertRaises(http.Http404):
self.view(self.request, self.addon.slug + 'xx')
def test_alternate_qs_301_by_id(self):
qs = lambda: Addon.objects.filter(type=1)
view = dec.addon_view_factory(qs=qs)(self.func)
r = view(self.request, str(self.addon.id))
eq_(r.status_code, 301)
eq_(r['Location'], self.slug_path)
def test_alternate_qs_200_by_slug(self):
qs = lambda: Addon.objects.filter(type=1)
view = dec.addon_view_factory(qs=qs)(self.func)
r = view(self.request, self.addon.slug)
eq_(r, mock.sentinel.OK)
def test_alternate_qs_404_by_id(self):
qs = lambda: Addon.objects.filter(type=2)
view = dec.addon_view_factory(qs=qs)(self.func)
with self.assertRaises(http.Http404):
view(self.request, str(self.addon.id))
def test_alternate_qs_404_by_slug(self):
qs = lambda: Addon.objects.filter(type=2)
view = dec.addon_view_factory(qs=qs)(self.func)
with self.assertRaises(http.Http404):
view(self.request, self.addon.slug)
def test_addon_no_slug(self):
a = Addon.objects.create(type=1, name='xxxx')
r = self.view(self.request, a.slug)
eq_(r, mock.sentinel.OK)
def test_slug_isdigit(self):
a = Addon.objects.create(type=1, name='xxxx')
a.update(slug=str(a.id))
r = self.view(self.request, a.slug)
eq_(r, mock.sentinel.OK)
request, addon = self.func.call_args[0]
eq_(addon, a)
class TestPremiumDecorators(amo.tests.TestCase):
def setUp(self):
self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION)
self.func = mock.Mock()
self.func.return_value = True
self.func.__name__ = 'mock_function'
def test_cant_become_premium(self):
self.addon.update(status=amo.STATUS_PUBLIC)
view = dec.can_become_premium(self.func)
res = view(RequestFactory().get('/'), self.addon.pk, self.addon)
eq_(res.status_code, 403)
def test_can_become_premium(self):
self.addon.update(status=amo.STATUS_NOMINATED)
view = dec.can_become_premium(self.func)
assert view(RequestFactory().get('/'), self.addon.pk, self.addon)
|
kenwang815/KodiPlugins | refs/heads/master | script.module.youtube.dl/lib/youtube_dl/extractor/freespeech.py | 18 | from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
class FreespeechIE(InfoExtractor):
IE_NAME = 'freespeech.org'
_VALID_URL = r'https://www\.freespeech\.org/video/(?P<title>.+)'
_TEST = {
'add_ie': ['Youtube'],
'url': 'https://www.freespeech.org/video/obama-romney-campaign-colorado-ahead-debate-0',
'info_dict': {
'id': 'poKsVCZ64uU',
'ext': 'webm',
'title': 'Obama, Romney Campaign in Colorado Ahead of Debate',
'description': 'Obama, Romney Campaign in Colorado Ahead of Debate',
'uploader': 'freespeechtv',
'uploader_id': 'freespeechtv',
'upload_date': '20121002',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group('title')
webpage = self._download_webpage(url, title)
info_json = self._search_regex(r'jQuery.extend\(Drupal.settings, ({.*?})\);', webpage, 'info')
info = json.loads(info_json)
return {
'_type': 'url',
'url': info['jw_player']['basic_video_node_player']['file'],
'ie_key': 'Youtube',
}
|
jeplasenciap/gespai | refs/heads/master | personal/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
nicoboss/Floatmotion | refs/heads/master | OpenGL/raw/GL/ATI/envmap_bumpmap.py | 9 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ATI_envmap_bumpmap'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ATI_envmap_bumpmap',error_checker=_errors._error_checker)
GL_BUMP_ENVMAP_ATI=_C('GL_BUMP_ENVMAP_ATI',0x877B)
GL_BUMP_NUM_TEX_UNITS_ATI=_C('GL_BUMP_NUM_TEX_UNITS_ATI',0x8777)
GL_BUMP_ROT_MATRIX_ATI=_C('GL_BUMP_ROT_MATRIX_ATI',0x8775)
GL_BUMP_ROT_MATRIX_SIZE_ATI=_C('GL_BUMP_ROT_MATRIX_SIZE_ATI',0x8776)
GL_BUMP_TARGET_ATI=_C('GL_BUMP_TARGET_ATI',0x877C)
GL_BUMP_TEX_UNITS_ATI=_C('GL_BUMP_TEX_UNITS_ATI',0x8778)
GL_DU8DV8_ATI=_C('GL_DU8DV8_ATI',0x877A)
GL_DUDV_ATI=_C('GL_DUDV_ATI',0x8779)
@_f
@_p.types(None,_cs.GLenum,arrays.GLfloatArray)
def glGetTexBumpParameterfvATI(pname,param):pass
@_f
@_p.types(None,_cs.GLenum,arrays.GLintArray)
def glGetTexBumpParameterivATI(pname,param):pass
@_f
@_p.types(None,_cs.GLenum,arrays.GLfloatArray)
def glTexBumpParameterfvATI(pname,param):pass
@_f
@_p.types(None,_cs.GLenum,arrays.GLintArray)
def glTexBumpParameterivATI(pname,param):pass
|
treeio/treeio | refs/heads/2.0 | treeio/projects/south_migrations/0003_auto__add_field_tasktimeslot_user.py | 6 | # encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TaskTimeSlot.user'
db.add_column('projects_tasktimeslot', 'user', self.gf(
'django.db.models.fields.related.ForeignKey')(default=1, to=orm['core.User']), keep_default=False)
def backwards(self, orm):
# Deleting field 'TaskTimeSlot.user'
db.delete_column('projects_tasktimeslot', 'user_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.accessentity': {
'Meta': {'object_name': 'AccessEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"})
},
'core.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Comment']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'objects_created'", 'null': 'True', 'to': "orm['core.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'full_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_full_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}),
'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'read_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_read_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Tag']", 'null': 'True', 'blank': 'True'}),
'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.tag': {
'Meta': {'ordering': "['name']", 'object_name': 'Tag'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'core.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User'},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.AccessEntity']"}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_access': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'identities.contact': {
'Meta': {'ordering': "['name']", 'object_name': 'Contact', '_ormbases': ['core.Object']},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.ContactType']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'related_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'related_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.AccessEntity']", 'null': 'True', 'blank': 'True'})
},
'identities.contactfield': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactField', '_ormbases': ['core.Object']},
'allowed_values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'identities.contacttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactType', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['identities.ContactField']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'projects.milestone': {
'Meta': {'ordering': "['name']", 'object_name': 'Milestone', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.TaskStatus']"})
},
'projects.project': {
'Meta': {'ordering': "['name']", 'object_name': 'Project', '_ormbases': ['core.Object']},
'client': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'client'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'details': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'manager': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'manager'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['projects.Project']"})
},
'projects.task': {
'Meta': {'ordering': "('-priority', 'name')", 'object_name': 'Task', '_ormbases': ['core.Object']},
'assigned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}),
'caller': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']", 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'estimated_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'milestone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Milestone']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['projects.Task']"}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.TaskStatus']"})
},
'projects.taskrecord': {
'Meta': {'object_name': 'TaskRecord', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'record_type': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Task']"})
},
'projects.taskstatus': {
'Meta': {'ordering': "('hidden', '-active', 'name')", 'object_name': 'TaskStatus', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'details': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
},
'projects.tasktimeslot': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'TaskTimeSlot', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Task']"}),
'time_from': ('django.db.models.fields.DateTimeField', [], {}),
'time_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"})
}
}
complete_apps = ['projects']
|
rosudrag/Freemium-winner | refs/heads/master | VirtualEnvironment/Lib/site-packages/sqlalchemy/dialects/sybase/pysybase.py | 80 | # sybase/pysybase.py
# Copyright (C) 2010-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase+pysybase
:name: Python-Sybase
:dbapi: Sybase
:connectstring: sybase+pysybase://<username>:<password>@<dsn>/\
[database name]
:url: http://python-sybase.sourceforge.net/
Unicode Support
---------------
The python-sybase driver does not appear to support non-ASCII strings of any
kind at this time.
"""
from sqlalchemy import types as sqltypes, processors
from sqlalchemy.dialects.sybase.base import SybaseDialect, \
SybaseExecutionContext, SybaseSQLCompiler
class _SybNumeric(sqltypes.Numeric):
def result_processor(self, dialect, type_):
if not self.asdecimal:
return processors.to_float
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class SybaseExecutionContext_pysybase(SybaseExecutionContext):
def set_ddl_autocommit(self, dbapi_connection, value):
if value:
# call commit() on the Sybase connection directly,
# to avoid any side effects of calling a Connection
# transactional method inside of pre_exec()
dbapi_connection.commit()
def pre_exec(self):
SybaseExecutionContext.pre_exec(self)
for param in self.parameters:
for key in list(param):
param["@" + key] = param[key]
del param[key]
class SybaseSQLCompiler_pysybase(SybaseSQLCompiler):
def bindparam_string(self, name, **kw):
return "@" + name
class SybaseDialect_pysybase(SybaseDialect):
driver = 'pysybase'
execution_ctx_cls = SybaseExecutionContext_pysybase
statement_compiler = SybaseSQLCompiler_pysybase
colspecs = {
sqltypes.Numeric: _SybNumeric,
sqltypes.Float: sqltypes.Float
}
@classmethod
def dbapi(cls):
import Sybase
return Sybase
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user', password='passwd')
return ([opts.pop('host')], opts)
def do_executemany(self, cursor, statement, parameters, context=None):
# calling python-sybase executemany yields:
# TypeError: string too long for buffer
for param in parameters:
cursor.execute(statement, param)
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version_number")
# i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0),
# (12, 5, 0, 0)
return (vers / 1000, vers % 1000 / 100, vers % 100 / 10, vers % 10)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, (self.dbapi.OperationalError,
self.dbapi.ProgrammingError)):
msg = str(e)
return ('Unable to complete network request to host' in msg or
'Invalid connection state' in msg or
'Invalid cursor state' in msg)
else:
return False
dialect = SybaseDialect_pysybase
|
EmanueleCannizzaro/scons | refs/heads/master | test/MSVS/vs-11.0-files.py | 1 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/MSVS/vs-11.0-files.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test that we can generate Visual Studio 11.0 project (.vcxproj) and
solution (.sln) files that look correct.
"""
import os
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
host_arch = test.get_vs_host_arch()
# Make the test infrastructure think we have this version of MSVS installed.
test._msvs_versions = ['11.0']
expected_slnfile = TestSConsMSVS.expected_slnfile_11_0
expected_vcprojfile = TestSConsMSVS.expected_vcprojfile_11_0
SConscript_contents = TestSConsMSVS.SConscript_contents_11_0
test.write('SConstruct', SConscript_contents%{'HOST_ARCH': host_arch})
test.run(arguments="Test.vcxproj")
test.must_exist(test.workpath('Test.vcxproj'))
test.must_exist(test.workpath('Test.vcxproj.filters'))
vcxproj = test.read('Test.vcxproj', 'r')
expect = test.msvs_substitute(expected_vcprojfile, '11.0', None, 'SConstruct')
# don't compare the pickled data
assert vcxproj[:len(expect)] == expect, test.diff_substr(expect, vcxproj)
test.must_exist(test.workpath('Test.sln'))
sln = test.read('Test.sln', 'r')
expect = test.msvs_substitute(expected_slnfile, '11.0', None, 'SConstruct')
# don't compare the pickled data
assert sln[:len(expect)] == expect, test.diff_substr(expect, sln)
test.run(arguments='-c .')
test.must_not_exist(test.workpath('Test.vcxproj'))
test.must_not_exist(test.workpath('Test.vcxproj.filters'))
test.must_not_exist(test.workpath('Test.sln'))
test.run(arguments='Test.vcxproj')
test.must_exist(test.workpath('Test.vcxproj'))
test.must_exist(test.workpath('Test.vcxproj.filters'))
test.must_exist(test.workpath('Test.sln'))
test.run(arguments='-c Test.sln')
test.must_not_exist(test.workpath('Test.vcxproj'))
test.must_not_exist(test.workpath('Test.vcxproj.filters'))
test.must_not_exist(test.workpath('Test.sln'))
# Test that running SCons with $PYTHON_ROOT in the environment
# changes the .vcxproj output as expected.
os.environ['PYTHON_ROOT'] = 'xyzzy'
python = os.path.join('$(PYTHON_ROOT)', os.path.split(TestSConsMSVS.python)[1])
test.run(arguments='Test.vcxproj')
test.must_exist(test.workpath('Test.vcxproj'))
vcxproj = test.read('Test.vcxproj', 'r')
expect = test.msvs_substitute(expected_vcprojfile, '11.0', None, 'SConstruct',
python=python)
# don't compare the pickled data
assert vcxproj[:len(expect)] == expect, test.diff_substr(expect, vcxproj)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
daliwangi/bitcoin | refs/heads/master | test/functional/decodescript.py | 35 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import *
from io import BytesIO
class DecodeScriptTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777'
push_public_key_hash = '14' + public_key_hash
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_public_key + push_public_key + '53ae')
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
rpc_result = self.nodes[0].decodescript('63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac')
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
|
skycucumber/Messaging-Gateway | refs/heads/master | webapp/venv/lib/python2.7/site-packages/twisted/web/test/test_script.py | 60 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.script}.
"""
import os
from twisted.trial.unittest import TestCase
from twisted.web.http import NOT_FOUND
from twisted.web.script import ResourceScriptDirectory, PythonScript
from twisted.web.test._util import _render
from twisted.web.test.test_web import DummyRequest
class ResourceScriptDirectoryTests(TestCase):
"""
Tests for L{ResourceScriptDirectory}.
"""
def test_render(self):
"""
L{ResourceScriptDirectory.render} sets the HTTP response code to I{NOT
FOUND}.
"""
resource = ResourceScriptDirectory(self.mktemp())
request = DummyRequest([''])
d = _render(resource, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, NOT_FOUND)
d.addCallback(cbRendered)
return d
def test_notFoundChild(self):
"""
L{ResourceScriptDirectory.getChild} returns a resource which renders an
response with the HTTP I{NOT FOUND} status code if the indicated child
does not exist as an entry in the directory used to initialized the
L{ResourceScriptDirectory}.
"""
path = self.mktemp()
os.makedirs(path)
resource = ResourceScriptDirectory(path)
request = DummyRequest(['foo'])
child = resource.getChild("foo", request)
d = _render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, NOT_FOUND)
d.addCallback(cbRendered)
return d
class PythonScriptTests(TestCase):
"""
Tests for L{PythonScript}.
"""
def test_notFoundRender(self):
"""
If the source file a L{PythonScript} is initialized with doesn't exist,
L{PythonScript.render} sets the HTTP response code to I{NOT FOUND}.
"""
resource = PythonScript(self.mktemp(), None)
request = DummyRequest([''])
d = _render(resource, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, NOT_FOUND)
d.addCallback(cbRendered)
return d
|
stepan-perlov/jschema | refs/heads/master | jschema/context.py | 1 | import json
from .errors import JrsNodeNotFound
from .refs_resolver import RefsResolver
class Context(object):
def __init__(self):
self.schemas = {}
self.nodes = {}
self.refsResolver = RefsResolver(self)
def addSchema(self, schema):
self.schemas[schema.id] = schema
def addNode(self, schemaId, path, node):
self.nodes["{}#{}".format(schemaId, path)] = node
def getNode(self, schemaId, path):
fullPath = "{}#{}".format(schemaId, path.replace("/", "."))
if fullPath not in self.nodes:
raise JrsNodeNotFound("Not found node with schemaId: {}, path: {}".format(schemaId, path))
return self.nodes[fullPath]
def initNodes(self):
for schema in self.schemas.values():
schema.root.initNodes()
def resolveRefs(self):
self.refsResolver.resolveRefs()
def toJson(self, prettyPrint):
schemas = {}
for item in self.schemas.values():
schemas[item.id] = item.root.value
if prettyPrint:
return json.dumps(schemas, separators=(",", ": "), indent=4) + "\n"
else:
return json.dumps(schemas, separators=(",", ":"))
|
dstelter92/RESTMD | refs/heads/master | tools/st-wham_RESTMD.py | 1 | #!/usr/bin/env python2.7
import os,sys
from numpy import *
########### ST-WHAM for python...##########
# Originally written in f90 by Jaegil Kim #
# Translated to Python by David Stelter #
###########################################
#
# CITE: http://dx.doi.org/10.1063/1.3626150
# Kim, J., Keyes, T., & Straub, J. E. (2011)
#
# WARNING: ONLY for use with RESTMD/STMD!
# http://dx.doi.org/10.1021/jp300366j
# http://dx.doi.org/10.1103/PhysRevLett.97.050601
#
# Requires list enthalpies for each replica
# in separate files AND the latest Ts array
# for use as the sampling weight, oREST
# files will work fine.
#
# Script will automatically read into histograms,
# run through the ST-WHAM machinery and calculate
# Ts(H) and Entropy based on the RESTMD sampling
# weight.
#
## UNITS!!! IMPORTANT!!!
kb = 0.0019872041 #kcal/mol*K
#kb = 0.000086173324 #ev/K
#kb = 0.0083144621 #kj/mol*K
#kb = 1.0 #reduced/LJ
def Falpha(i, j):
# Linear entropy interpolation based on Ts(H)
Falpha = 0
for indx in range(i+1,j):
if (TH[indx] == TH[indx-1]):
Falpha = Falpha + binsize/TH[indx]
else:
Falpha = Falpha + binsize/(TH[indx] - TH[indx-1])*log(TH[indx]/TH[indx-1])
return Falpha
## Input parameters from inp.stwham
print "ST-WHAM for (RE)STMD\n"
ifile = open('inp.stwham', 'r')
idata = ifile.readlines()
num_lines = len(idata)
if (num_lines > 8 or num_lines < 8):
print "Err: Invalid input: Incorrect parameters, Should be:\n"
print " binsize"
print " Elow"
print " Ehigh"
print " T0"
print " List_of_Tlow"
print " List_of_Thigh"
print " Path_to_data"
print " checkLIMIT\n"
print "Exiting..."
sys.exit()
## Cast inputs...
binsize = double(idata[0])
Emin = double(idata[1])
Emax = double(idata[2])
T0 = double(idata[3])
T1s = array(map(double, idata[4].split())) # List of Tlo
T2s = array(map(double, idata[5].split())) # List of Thi
workdir = idata[6].strip()
checklimit = double(idata[7]) # Cutoff for contribution from neighbor replicas
0
## Initialize outputs...
#hout = open('histogram_stwham.dat', 'w')
tout = open('Ts_stwham.dat', 'w')
fracout = open('fract_stwham.dat', 'w')
## Calculate some constants...
bmin = round(Emin/binsize)
bmax = round(Emax/binsize)
nbin = int(bmax - bmin + 1)
nReplica = len(T1s)
shape = (nbin, nReplica)
fullshape = (nbin, nReplica+1)
## Final checks...
if (nbin < 0):
print "Err: Emin must be smaller than Emax.\n"
sys.exit()
if (nReplica < 1):
print "Err: Must supply list of Tlo and Thi for each replica.\n"
sys.exit()
if (len(T1s) != len(T2s)):
print "Err: Must have Tlo and Thi for all replicas.\n"
sys.exit()
## Prepare raw enthalpy files into histograms...
print "Reading in raw data, writting to histograms...\n"
hist = zeros(shape)
edges = zeros(nbin)
print "Replica: "
for l in range(nReplica):
sys.stdout.write("%d<->%d " % (T1s[l], T2s[l]))
sys.stdout.flush()
data = loadtxt("%sreplica-%d.dat" % (workdir, l))
# Calculate histogram
hist[:,l], edges = histogramdd(ravel(data), bins=nbin, range=[(Emin, Emax)])
#for i in range(nbin):
#hout.write("%f %f\n" % (edges[0][i], hist[i][l]))
#hout.write("%f 0.000000\n" % Emax)
#hout.write("\n")
print "\n"
## Assign some arrays...
TH = zeros(nbin) # Statistical Temperature
Ent = zeros(nbin) # Entropy
PDF2D_RESTMD = zeros(fullshape) # Enthalpy distribution for all replicas and total data
Y2 = zeros(shape) # Latest sampling weight for all replicas
NumData = zeros(nReplica+1) # Count of data points
betaH = zeros(nbin)
betaW = zeros(nbin)
hfrac = zeros(shape) # Histogram fraction, useful for debugging
## Collect data, and normalize...
for l in range(nReplica):
Y2[:,l] = genfromtxt("%soREST.%d.d" % (workdir, l), skip_footer=2, skip_header=13, delimiter=" ")
for l in range(1,nReplica+1):
count = 0
for i in range(nbin):
PDF2D_RESTMD[i][l] = hist[i][l-1] # PDF of each replica
count = count + PDF2D_RESTMD[i][l]
PDF2D_RESTMD[i][0] = PDF2D_RESTMD[i][0] + PDF2D_RESTMD[i][l] # PDF of total data set
PDF2D_RESTMD[:,l] = PDF2D_RESTMD[:,l] / count
NumData[l] = count # Number of data in each replica
NumData[0] = NumData[0] + count # Total number of data
PDF2D_RESTMD[:,0] = PDF2D_RESTMD[:,0] / NumData[0] # Normalized PDF
## Throw out edge data under checklimit...
bstart = None
bstop = None
for i in range(nbin):
if (PDF2D_RESTMD[i][0] > checklimit):
bstart = i + 3
break
if (bstart == None):
print "Err: Energy range not large enough, decrease Emin.\n"
sys.exit()
for i in range(nbin-1,bstart,-1):
if (PDF2D_RESTMD[i][0] > checklimit):
bstop = i - 3
break
if (bstop == None):
print "Err: Energy range not large enough, increase Emax.\n"
sys.exit()
## Calculate Ts(H)
for i in range(bstart,bstop):
if (PDF2D_RESTMD[i+1][0] > checklimit and PDF2D_RESTMD[i-1][0] > checklimit):
betaH[i] = (log(PDF2D_RESTMD[i+1][0] / PDF2D_RESTMD[i-1][0])) / (2*binsize / kb) #NOTE: UNITS important here!
else:
betaH[i] = 0
for l in range(1,nReplica+1):
# Calc B^eff_alpha
if (PDF2D_RESTMD[i][0] > 0): # ensure positive, no empty bins
if (Y2[i][l-1] <= 0):
w = 1 / 0.001
print "WARNING: Negative Temperature detected...\n"
else:
w = 1 / (Y2[i][l-1] * T0)
betaW[i] = betaW[i] + ((NumData[l] * PDF2D_RESTMD[i][l]) / (NumData[0] * PDF2D_RESTMD[i][0]) * w)
TH[i] = 1 / (betaH[i] + betaW[i])
#tout.write("%f %f %f %f\n" % (Emin+(i*binsize), TH[i], betaH[i], betaW[i]))
## Calculate histogram fraction...
for l in range(1,nReplica+1):
for i in range(bstart,bstop):
hfrac[i][l-1] = hist[i][l-1] / (PDF2D_RESTMD[i][0] * NumData[0])
fracout.write("%f %f\n" % (Emin+(i*binsize), hfrac[i][l-1]))
fracout.write("\n")
## Calculate entropy...
for i in range(bstart,bstop):
Ent[i] = Falpha(bstart, i)
tout.write("%f %f %f %f %f\n" % (Emin+(i*binsize), TH[i], Ent[i], betaH[i], betaW[i]))
sys.exit()
|
Ms2ger/servo | refs/heads/master | tests/wpt/web-platform-tests/2dcontext/tools/specextract.py | 75 | import html5lib
import html5lib.treebuilders.dom
# Expected use:
# curl --compressed http://www.whatwg.org/specs/web-apps/current-work/ >current-work
# python specextract.py
#
# Generates current-work-canvas.xhtml, for use by gentest.py to create the annotated spec document
def extract():
parser = html5lib.html5parser.HTMLParser(tree=html5lib.treebuilders.dom.TreeBuilder)
doc = parser.parse(open('current-work', "r"), encoding='utf-8')
head = doc.getElementsByTagName('head')[0]
for n in head.childNodes:
if n.tagName == 'script':
head.removeChild(n)
header = doc.getElementsByTagName('header')[0]
#thecanvas = doc.getElementById('the-canvas') # doesn't work (?!)
thecanvas = [ n for n in doc.getElementsByTagName('h4') if n.getAttribute('id') == 'the-canvas-element' ][0]
keep = [header, thecanvas]
node = thecanvas.nextSibling
while node.nodeName != 'h4':
keep.append(node)
node = node.nextSibling
p = thecanvas.parentNode
for n in p.childNodes[:]:
if n not in keep:
p.removeChild(n)
for n in header.childNodes[3:-4]:
header.removeChild(n)
def make_absolute(uri):
if uri.startswith('data:'):
return uri
elif uri[0] == '/':
return 'http://www.whatwg.org' + uri
else:
return 'http://www.whatwg.org/specs/web-apps/current-work/' + uri
# Fix the stylesheet, icon and image references
for e in doc.getElementsByTagName('link'):
e.setAttribute('href', make_absolute(e.getAttribute('href')))
for img in doc.getElementsByTagName('img'):
img.setAttribute('src', make_absolute(img.getAttribute('src')))
# Convert to XHTML, because it's quicker to re-parse than HTML5
doc.documentElement.setAttribute('xmlns', 'http://www.w3.org/1999/xhtml')
doc.documentElement.setAttribute('xml:lang', doc.documentElement.getAttribute('lang'))
doc.removeChild(doc.firstChild) # remove the DOCTYPE
open('current-work-canvas.xhtml', 'w').write(doc.toxml(encoding = 'UTF-8'))
extract()
|
DataDog/uwsgi | refs/heads/master | plugins/transformation_chunked/uwsgiplugin.py | 13 | NAME='transformation_chunked'
CFLAGS = []
LDFLAGS = []
LIBS = []
GCC_LIST = ['chunked']
|
andresriancho/PyGithub | refs/heads/master | github/GithubException.py | 8 | # -*- coding: utf-8 -*-
# Copyright 2012 Vincent Jacques
# [email protected]
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
class GithubException(Exception):
def __init__(self, status, data):
Exception.__init__(self)
self.status = status
self.data = data
def __str__(self):
return str(self.status) + " " + str(self.data)
|
rsdevgun16e/energi | refs/heads/energi_v0 | qa/rpc-tests/rest.py | 37 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from struct import *
from io import BytesIO
from codecs import encode
import binascii
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls
def http_get_call(host, port, path, response_object = 0):
conn = httplib.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read().decode('utf-8')
#allows simple http post calls with a request body
def http_post_call(host, port, path, requestdata = '', response_object = 0):
conn = httplib.HTTPConnection(host, port)
conn.request('POST', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
url = urlparse.urlparse(self.nodes[0].url)
print "Mining blocks..."
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
######################################
# GETUTXOS: query a unspent outpoint #
######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
################################################
# GETUTXOS: now query a already spent outpoint #
################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += hex_str_to_bytes(txid)
binaryRequest += pack("i", n)
binaryRequest += hex_str_to_bytes(vintx)
binaryRequest += pack("i", 0)
bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = BytesIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(65).rstrip("L")
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid json request
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 500) #must be a 500 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 500 because we exceeding the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160])
# check json format
block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
block_json_obj = json.loads(block_json_string)
assert_equal(block_json_obj['hash'], bb_hash)
# compare with json block header
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str, parse_float=Decimal)
assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
#compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations'])
assert_equal(json_obj[0]['height'], rpc_block_json['height'])
assert_equal(json_obj[0]['version'], rpc_block_json['version'])
assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
assert_equal(json_obj[0]['time'], rpc_block_json['time'])
assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash'])
#see if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str)
assert_equal(len(json_obj), 5) #now we should have 5 header objects
# do tx test
tx_hash = block_json_obj['tx'][0]['txid']
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# check that there are exactly 3 transactions in the TX memory pool before generating the block
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# check that there are our submitted transactions in the TX memory pool
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj, True)
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
|
itsjeyd/edx-platform | refs/heads/master | openedx/core/djangoapps/oauth_dispatch/models.py | 10 | """
Specialized models for oauth_dispatch djangoapp
"""
from datetime import datetime
from django.db import models
from pytz import utc
from oauth2_provider.settings import oauth2_settings
class RestrictedApplication(models.Model):
"""
This model lists which django-oauth-toolkit Applications are considered 'restricted'
and thus have a limited ability to use various APIs.
A restricted Application will only get expired token/JWT payloads
so that they cannot be used to call into APIs.
"""
application = models.ForeignKey(oauth2_settings.APPLICATION_MODEL, null=False)
def __unicode__(self):
"""
Return a unicode representation of this object
"""
return u"<RestrictedApplication '{name}'>".format(
name=self.application.name
)
@classmethod
def set_access_token_as_expired(cls, access_token):
"""
For access_tokens for RestrictedApplications, put the expire timestamp into the beginning of the epoch
which is Jan. 1, 1970
"""
access_token.expires = datetime(1970, 1, 1, tzinfo=utc)
@classmethod
def verify_access_token_as_expired(cls, access_token):
"""
For access_tokens for RestrictedApplications, make sure that the expiry date
is set at the beginning of the epoch which is Jan. 1, 1970
"""
return access_token.expires == datetime(1970, 1, 1, tzinfo=utc)
|
lcharleux/abapy | refs/heads/master | doc/example_code/postproc/HistoryOutput-average.py | 1 | from abapy.postproc import HistoryOutput
from math import sin, pi
N = 100
hist = HistoryOutput()
time = [pi / 2 * float(i)/N for i in xrange(N+1)]
data = [sin(t) for t in time]
hist.add_step(time_step = time, data_step = data)
time2 = [10., 11.]
data2 = [1., 1.]
hist.add_step(time_step = time2, data_step = data2)
sol = 2. / pi + 1.
print 'Print computed value:', hist.average()
print 'Analytic solution:', sol
print 'Relative error: {0:.4}%'.format( (hist.average() - sol)/sol * 100.)
|
android-ia/platform_tools_idea | refs/heads/master | plugins/hg4idea/testData/bin/mercurial/hook.py | 93 | # hook.py - hook support for mercurial
#
# Copyright 2007 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import os, sys, time, types
import extensions, util, demandimport
def _pythonhook(ui, repo, name, hname, funcname, args, throw):
'''call python hook. hook is callable object, looked up as
name in python module. if callable returns "true", hook
fails, else passes. if hook raises exception, treated as
hook failure. exception propagates if throw is "true".
reason for "true" meaning "hook failed" is so that
unmodified commands (e.g. mercurial.commands.update) can
be run as hooks without wrappers to convert return values.'''
ui.note(_("calling hook %s: %s\n") % (hname, funcname))
starttime = time.time()
obj = funcname
if not util.safehasattr(obj, '__call__'):
d = funcname.rfind('.')
if d == -1:
raise util.Abort(_('%s hook is invalid ("%s" not in '
'a module)') % (hname, funcname))
modname = funcname[:d]
oldpaths = sys.path
if util.mainfrozen():
# binary installs require sys.path manipulation
modpath, modfile = os.path.split(modname)
if modpath and modfile:
sys.path = sys.path[:] + [modpath]
modname = modfile
try:
demandimport.disable()
obj = __import__(modname)
demandimport.enable()
except ImportError:
e1 = sys.exc_type, sys.exc_value, sys.exc_traceback
try:
# extensions are loaded with hgext_ prefix
obj = __import__("hgext_%s" % modname)
demandimport.enable()
except ImportError:
demandimport.enable()
e2 = sys.exc_type, sys.exc_value, sys.exc_traceback
if ui.tracebackflag:
ui.warn(_('exception from first failed import attempt:\n'))
ui.traceback(e1)
if ui.tracebackflag:
ui.warn(_('exception from second failed import attempt:\n'))
ui.traceback(e2)
raise util.Abort(_('%s hook is invalid '
'(import of "%s" failed)') %
(hname, modname))
sys.path = oldpaths
try:
for p in funcname.split('.')[1:]:
obj = getattr(obj, p)
except AttributeError:
raise util.Abort(_('%s hook is invalid '
'("%s" is not defined)') %
(hname, funcname))
if not util.safehasattr(obj, '__call__'):
raise util.Abort(_('%s hook is invalid '
'("%s" is not callable)') %
(hname, funcname))
try:
try:
# redirect IO descriptors to the ui descriptors so hooks
# that write directly to these don't mess up the command
# protocol when running through the command server
old = sys.stdout, sys.stderr, sys.stdin
sys.stdout, sys.stderr, sys.stdin = ui.fout, ui.ferr, ui.fin
r = obj(ui=ui, repo=repo, hooktype=name, **args)
except KeyboardInterrupt:
raise
except Exception, exc:
if isinstance(exc, util.Abort):
ui.warn(_('error: %s hook failed: %s\n') %
(hname, exc.args[0]))
else:
ui.warn(_('error: %s hook raised an exception: '
'%s\n') % (hname, exc))
if throw:
raise
ui.traceback()
return True
finally:
sys.stdout, sys.stderr, sys.stdin = old
duration = time.time() - starttime
readablefunc = funcname
if isinstance(funcname, types.FunctionType):
readablefunc = funcname.__module__ + "." + funcname.__name__
ui.log('pythonhook', 'pythonhook-%s: %s finished in %0.2f seconds\n',
name, readablefunc, duration)
if r:
if throw:
raise util.Abort(_('%s hook failed') % hname)
ui.warn(_('warning: %s hook failed\n') % hname)
return r
def _exthook(ui, repo, name, cmd, args, throw):
ui.note(_("running hook %s: %s\n") % (name, cmd))
starttime = time.time()
env = {}
for k, v in args.iteritems():
if util.safehasattr(v, '__call__'):
v = v()
if isinstance(v, dict):
# make the dictionary element order stable across Python
# implementations
v = ('{' +
', '.join('%r: %r' % i for i in sorted(v.iteritems())) +
'}')
env['HG_' + k.upper()] = v
if repo:
cwd = repo.root
else:
cwd = os.getcwd()
if 'HG_URL' in env and env['HG_URL'].startswith('remote:http'):
r = util.system(cmd, environ=env, cwd=cwd, out=ui)
else:
r = util.system(cmd, environ=env, cwd=cwd, out=ui.fout)
duration = time.time() - starttime
ui.log('exthook', 'exthook-%s: %s finished in %0.2f seconds\n',
name, cmd, duration)
if r:
desc, r = util.explainexit(r)
if throw:
raise util.Abort(_('%s hook %s') % (name, desc))
ui.warn(_('warning: %s hook %s\n') % (name, desc))
return r
def _allhooks(ui):
hooks = []
for name, cmd in ui.configitems('hooks'):
if not name.startswith('priority'):
priority = ui.configint('hooks', 'priority.%s' % name, 0)
hooks.append((-priority, len(hooks), name, cmd))
return [(k, v) for p, o, k, v in sorted(hooks)]
_redirect = False
def redirect(state):
global _redirect
_redirect = state
def hook(ui, repo, name, throw=False, **args):
if not ui.callhooks:
return False
r = False
oldstdout = -1
try:
for hname, cmd in _allhooks(ui):
if hname.split('.')[0] != name or not cmd:
continue
if oldstdout == -1 and _redirect:
try:
stdoutno = sys.__stdout__.fileno()
stderrno = sys.__stderr__.fileno()
# temporarily redirect stdout to stderr, if possible
if stdoutno >= 0 and stderrno >= 0:
sys.__stdout__.flush()
oldstdout = os.dup(stdoutno)
os.dup2(stderrno, stdoutno)
except (OSError, AttributeError):
# files seem to be bogus, give up on redirecting (WSGI, etc)
pass
if util.safehasattr(cmd, '__call__'):
r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r
elif cmd.startswith('python:'):
if cmd.count(':') >= 2:
path, cmd = cmd[7:].rsplit(':', 1)
path = util.expandpath(path)
if repo:
path = os.path.join(repo.root, path)
try:
mod = extensions.loadpath(path, 'hghook.%s' % hname)
except Exception:
ui.write(_("loading %s hook failed:\n") % hname)
raise
hookfn = getattr(mod, cmd)
else:
hookfn = cmd[7:].strip()
r = _pythonhook(ui, repo, name, hname, hookfn, args, throw) or r
else:
r = _exthook(ui, repo, hname, cmd, args, throw) or r
finally:
if _redirect and oldstdout >= 0:
os.dup2(oldstdout, stdoutno)
os.close(oldstdout)
return r
|
pupboss/xndian | refs/heads/master | deploy/site-packages/jinja2/testsuite/loader.py | 411 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.loader
~~~~~~~~~~~~~~~~~~~~~~~
Test the loaders.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import tempfile
import shutil
import unittest
from jinja2.testsuite import JinjaTestCase, dict_loader, \
package_loader, filesystem_loader, function_loader, \
choice_loader, prefix_loader
from jinja2 import Environment, loaders
from jinja2._compat import PYPY, PY2
from jinja2.loaders import split_template_path
from jinja2.exceptions import TemplateNotFound
class LoaderTestCase(JinjaTestCase):
def test_dict_loader(self):
env = Environment(loader=dict_loader)
tmpl = env.get_template('justdict.html')
assert tmpl.render().strip() == 'FOO'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_package_loader(self):
env = Environment(loader=package_loader)
tmpl = env.get_template('test.html')
assert tmpl.render().strip() == 'BAR'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_filesystem_loader(self):
env = Environment(loader=filesystem_loader)
tmpl = env.get_template('test.html')
assert tmpl.render().strip() == 'BAR'
tmpl = env.get_template('foo/test.html')
assert tmpl.render().strip() == 'FOO'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_choice_loader(self):
env = Environment(loader=choice_loader)
tmpl = env.get_template('justdict.html')
assert tmpl.render().strip() == 'FOO'
tmpl = env.get_template('test.html')
assert tmpl.render().strip() == 'BAR'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_function_loader(self):
env = Environment(loader=function_loader)
tmpl = env.get_template('justfunction.html')
assert tmpl.render().strip() == 'FOO'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_prefix_loader(self):
env = Environment(loader=prefix_loader)
tmpl = env.get_template('a/test.html')
assert tmpl.render().strip() == 'BAR'
tmpl = env.get_template('b/justdict.html')
assert tmpl.render().strip() == 'FOO'
self.assert_raises(TemplateNotFound, env.get_template, 'missing')
def test_caching(self):
changed = False
class TestLoader(loaders.BaseLoader):
def get_source(self, environment, template):
return u'foo', None, lambda: not changed
env = Environment(loader=TestLoader(), cache_size=-1)
tmpl = env.get_template('template')
assert tmpl is env.get_template('template')
changed = True
assert tmpl is not env.get_template('template')
changed = False
env = Environment(loader=TestLoader(), cache_size=0)
assert env.get_template('template') \
is not env.get_template('template')
env = Environment(loader=TestLoader(), cache_size=2)
t1 = env.get_template('one')
t2 = env.get_template('two')
assert t2 is env.get_template('two')
assert t1 is env.get_template('one')
t3 = env.get_template('three')
assert 'one' in env.cache
assert 'two' not in env.cache
assert 'three' in env.cache
def test_dict_loader_cache_invalidates(self):
mapping = {'foo': "one"}
env = Environment(loader=loaders.DictLoader(mapping))
assert env.get_template('foo').render() == "one"
mapping['foo'] = "two"
assert env.get_template('foo').render() == "two"
def test_split_template_path(self):
assert split_template_path('foo/bar') == ['foo', 'bar']
assert split_template_path('./foo/bar') == ['foo', 'bar']
self.assert_raises(TemplateNotFound, split_template_path, '../foo')
class ModuleLoaderTestCase(JinjaTestCase):
archive = None
def compile_down(self, zip='deflated', py_compile=False):
super(ModuleLoaderTestCase, self).setup()
log = []
self.reg_env = Environment(loader=prefix_loader)
if zip is not None:
self.archive = tempfile.mkstemp(suffix='.zip')[1]
else:
self.archive = tempfile.mkdtemp()
self.reg_env.compile_templates(self.archive, zip=zip,
log_function=log.append,
py_compile=py_compile)
self.mod_env = Environment(loader=loaders.ModuleLoader(self.archive))
return ''.join(log)
def teardown(self):
super(ModuleLoaderTestCase, self).teardown()
if hasattr(self, 'mod_env'):
if os.path.isfile(self.archive):
os.remove(self.archive)
else:
shutil.rmtree(self.archive)
self.archive = None
def test_log(self):
log = self.compile_down()
assert 'Compiled "a/foo/test.html" as ' \
'tmpl_a790caf9d669e39ea4d280d597ec891c4ef0404a' in log
assert 'Finished compiling templates' in log
assert 'Could not compile "a/syntaxerror.html": ' \
'Encountered unknown tag \'endif\'' in log
def _test_common(self):
tmpl1 = self.reg_env.get_template('a/test.html')
tmpl2 = self.mod_env.get_template('a/test.html')
assert tmpl1.render() == tmpl2.render()
tmpl1 = self.reg_env.get_template('b/justdict.html')
tmpl2 = self.mod_env.get_template('b/justdict.html')
assert tmpl1.render() == tmpl2.render()
def test_deflated_zip_compile(self):
self.compile_down(zip='deflated')
self._test_common()
def test_stored_zip_compile(self):
self.compile_down(zip='stored')
self._test_common()
def test_filesystem_compile(self):
self.compile_down(zip=None)
self._test_common()
def test_weak_references(self):
self.compile_down()
tmpl = self.mod_env.get_template('a/test.html')
key = loaders.ModuleLoader.get_template_key('a/test.html')
name = self.mod_env.loader.module.__name__
assert hasattr(self.mod_env.loader.module, key)
assert name in sys.modules
# unset all, ensure the module is gone from sys.modules
self.mod_env = tmpl = None
try:
import gc
gc.collect()
except:
pass
assert name not in sys.modules
# This test only makes sense on non-pypy python 2
if PY2 and not PYPY:
def test_byte_compilation(self):
log = self.compile_down(py_compile=True)
assert 'Byte-compiled "a/test.html"' in log
tmpl1 = self.mod_env.get_template('a/test.html')
mod = self.mod_env.loader.module. \
tmpl_3c4ddf650c1a73df961a6d3d2ce2752f1b8fd490
assert mod.__file__.endswith('.pyc')
def test_choice_loader(self):
log = self.compile_down()
self.mod_env.loader = loaders.ChoiceLoader([
self.mod_env.loader,
loaders.DictLoader({'DICT_SOURCE': 'DICT_TEMPLATE'})
])
tmpl1 = self.mod_env.get_template('a/test.html')
self.assert_equal(tmpl1.render(), 'BAR')
tmpl2 = self.mod_env.get_template('DICT_SOURCE')
self.assert_equal(tmpl2.render(), 'DICT_TEMPLATE')
def test_prefix_loader(self):
log = self.compile_down()
self.mod_env.loader = loaders.PrefixLoader({
'MOD': self.mod_env.loader,
'DICT': loaders.DictLoader({'test.html': 'DICT_TEMPLATE'})
})
tmpl1 = self.mod_env.get_template('MOD/a/test.html')
self.assert_equal(tmpl1.render(), 'BAR')
tmpl2 = self.mod_env.get_template('DICT/test.html')
self.assert_equal(tmpl2.render(), 'DICT_TEMPLATE')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LoaderTestCase))
suite.addTest(unittest.makeSuite(ModuleLoaderTestCase))
return suite
|
sam-m888/gramps | refs/heads/master | gramps/gen/user.py | 9 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
The User class provides basic interaction with the user.
"""
import sys
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
class UserBase(metaclass=ABCMeta):
"""
This class provides a means to interact with the user in an abstract way.
This class should be overridden by each respective user interface to
provide the appropriate interaction (eg. dialogs for GTK, prompts for CLI).
"""
def __init__(self, callback=None, error=None, uistate=None, dbstate=None):
self.callback_function = callback
self.error_function = error
self._fileout = sys.stderr # redirected to mocks by unit tests
self.uistate = uistate
self.dbstate = dbstate
@abstractmethod
def begin_progress(self, title, message, steps):
"""
Start showing a progress indicator to the user.
Don't use this method directly, use progress instead.
:param title: the title of the progress meter
:type title: str
:param message: the message associated with the progress meter
:type message: str
:param steps: the total number of steps for the progress meter.
a value of 0 indicates that the ending is unknown and the
meter should just show activity.
:type steps: int
:returns: none
"""
@abstractmethod
def step_progress(self):
"""
Advance the progress meter.
Don't use this method directly, use progress instead.
"""
def callback(self, percentage, text=None):
"""
Display the precentage.
"""
if self.callback_function:
if text:
self.callback_function(percentage, text)
else:
self.callback_function(percentage)
else:
self._default_callback(percentage, text)
def _default_callback(self, percentage, text):
if text is None:
self._fileout.write("\r%02d%%" % percentage)
else:
self._fileout.write("\r%02d%% %s" % (percentage, text))
@abstractmethod
def end_progress(self):
"""
Stop showing the progress indicator to the user.
Don't use this method directly, use progress instead.
"""
# Context-manager wrapper of the begin/step/end_progress above
@contextmanager
def progress(self, *args, **kwargs):
"""
Preferred form of progress reporting.
Parameters: same as for begin_progress.
Usage example (see gramps/cli/test/user_test.py)::
with self.user.progress("Foo", "Bar", 0) as step:
for i in range(10):
step()
Ensures end_progress will be called even if an exception was thrown.
"""
self.begin_progress(*args, **kwargs)
try:
yield self.step_progress
except:
raise
finally:
self.end_progress()
@abstractmethod
def prompt(self, title, message, accept_label, reject_label, parent=None,
default_label=None):
"""
Prompt the user with a message to select an alternative.
:param title: the title of the question, e.g.: "Undo history warning"
:type title: str
:param message: the message, e.g.: "Proceeding with the tool will
erase the undo history. If you think you may want to revert
running this tool, please stop here and make a backup of the DB."
:type question: str
:param accept_label: what to call the positive choice, e.g.: "Proceed"
:type accept_label: str
:param reject_label: what to call the negative choice, e.g.: "Stop"
:type reject_label: str
:param default_label: the label of the default
:type default_label: str or None
:returns: the user's answer to the question
:rtype: bool
"""
@abstractmethod
def warn(self, title, warning=""):
"""
Warn the user.
:param title: the title of the warning
:type title: str
:param warning: the warning
:type warning: str
:returns: none
"""
@abstractmethod
def notify_error(self, title, error=""):
"""
Notify the user of an error.
:param title: the title of the error
:type title: str
:param error: the error message
:type error: str
:returns: none
"""
@abstractmethod
def notify_db_error(self, error):
"""
Notify the user of a DB error.
:param error: the error message
:type error: str
:returns: none
"""
@abstractmethod
def notify_db_repair(self, error):
"""
Notify the user their DB might need repair.
:param error: the error message
:type error: str
:returns: none
"""
@abstractmethod
def info(self, msg1, infotext, parent=None, monospaced=False):
"""
Displays information to the user
"""
class User(UserBase):
"""
An implementation of the :class:`.gen.user.UserBase` class which supresses
output and accepts prompts. This is useful for unit tests.
"""
def __init__(self, callback=None, error=None, uistate=None, dbstate=None):
UserBase.__init__(self, callback=self.__cb)
def __cb(self, percent, text=None):
return
def begin_progress(self, title, message, steps):
pass
def step_progress(self):
pass
def end_progress(self):
pass
def prompt(self, title, message, accept_label, reject_label, parent=None,
default_label=None):
return True
def warn(self, title, warning=""):
pass
def notify_error(self, title, error=""):
pass
def notify_db_error(self, error):
pass
def notify_db_repair(self, error):
pass
def info(self, msg1, infotext, parent=None, monospaced=False):
pass
|
8l/beri | refs/heads/master | cheritest/trunk/tests/alu/test_raw_srl.py | 2 | #-
# Copyright (c) 2014 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_raw_srl(BaseBERITestCase):
def test_srl_0(self):
'''Test SRL by 0 bits'''
self.assertRegisterEqual(self.MIPS.a0, 0x76543210, "SRL by 0 bits failed")
def test_srl_1(self):
'''Test SRL by 1 bit'''
self.assertRegisterEqual(self.MIPS.a1, 0x3b2a1908, "SRL by 1 bit failed")
def test_srl_16(self):
'''Test SRL by 16 bits'''
self.assertRegisterEqual(self.MIPS.a2, 0x7654, "SRL by 16 bits failed")
def test_srl_31(self):
'''Test SRL by 31 bits'''
self.assertRegisterEqual(self.MIPS.a3, 0x0, "SRL by 31 bits failed")
def test_srl_0_neg(self):
'''Test SRL by 0 bits of a negative value'''
self.assertRegisterEqual(self.MIPS.a4, 0xfffffffffedcba98, "SRL by 0 bits of a negative value failed")
def test_srl_1_neg(self):
'''Test SRL by 1 bits of a negative value'''
self.assertRegisterEqual(self.MIPS.a5, 0x7f6e5d4c, "SRL by 1 bit of a negative value failed")
def test_srl_16_neg(self):
'''Test SRL by 16 bits of a negative value'''
self.assertRegisterEqual(self.MIPS.a6, 0xfedc, "SRL by 16 bits of a negative value failed")
def test_srl_31_neg(self):
'''Test SRL by 31 bits of a negative value'''
self.assertRegisterEqual(self.MIPS.a7, 0x1, "SRL by 31 bits of a negative value failed")
|
sussexstudent/falmer | refs/heads/master | falmer/matte/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
ariakerstein/twitterFlaskClone | refs/heads/master | project/lib/python2.7/site-packages/werkzeug/contrib/limiter.py | 365 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.limiter
~~~~~~~~~~~~~~~~~~~~~~~~
A middleware that limits incoming data. This works around problems with
Trac_ or Django_ because those directly stream into the memory.
.. _Trac: http://trac.edgewall.org/
.. _Django: http://www.djangoproject.com/
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from warnings import warn
from werkzeug.wsgi import LimitedStream
class StreamLimitMiddleware(object):
"""Limits the input stream to a given number of bytes. This is useful if
you have a WSGI application that reads form data into memory (django for
example) and you don't want users to harm the server by uploading tons of
data.
Default is 10MB
.. versionchanged:: 0.9
Deprecated middleware.
"""
def __init__(self, app, maximum_size=1024 * 1024 * 10):
warn(DeprecationWarning('This middleware is deprecated'))
self.app = app
self.maximum_size = maximum_size
def __call__(self, environ, start_response):
limit = min(self.maximum_size, int(environ.get('CONTENT_LENGTH') or 0))
environ['wsgi.input'] = LimitedStream(environ['wsgi.input'], limit)
return self.app(environ, start_response)
|
h2oai/h2o | refs/heads/master | py/testdir_single_jvm/test_NN2_mnist.py | 9 | import unittest, time, sys, random, string
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_gbm, h2o_cmd, h2o_import as h2i, h2o_browse as h2b
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(java_heap_GB=2)
@classmethod
def tearDownClass(cls):
###h2o.sleep(3600)
h2o.tear_down_cloud()
def test_NN_mnist(self):
#h2b.browseTheCloud()
csvPathname_train = 'mnist/train.csv.gz'
csvPathname_test = 'mnist/test.csv.gz'
hex_key = 'mnist_train.hex'
validation_key = 'mnist_test.hex'
timeoutSecs = 30
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname_train, schema='put', hex_key=hex_key, timeoutSecs=timeoutSecs)
parseResultV = h2i.import_parse(bucket='smalldata', path=csvPathname_test, schema='put', hex_key=validation_key, timeoutSecs=timeoutSecs)
inspect = h2o_cmd.runInspect(None, hex_key)
print "\n" + csvPathname_train, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
response = inspect['numCols'] - 1
#Making random id
identifier = ''.join(random.sample(string.ascii_lowercase + string.digits, 10))
model_key = 'nn_' + identifier + '.hex'
kwargs = {
'ignored_cols' : None,
'response' : response,
'classification' : 1,
'activation' : 'RectifierWithDropout',
'input_dropout_ratio' : 0.2,
'hidden' : '117,131,129',
'adaptive_rate' : 0,
'rate' : 0.005,
'rate_annealing' : 1e-6,
'momentum_start' : 0.5,
'momentum_ramp' : 100000,
'momentum_stable' : 0.9,
'l1' : 0.00001,
'l2' : 0.0000001,
'seed' : 98037452452,
'loss' : 'CrossEntropy',
'max_w2' : 15,
'initial_weight_distribution' : 'UniformAdaptive',
#'initial_weight_scale' : 0.01,
'epochs' : 2.0,
'destination_key' : model_key,
'validation' : validation_key,
'score_interval' : 10000
}
expectedErr = 0.057 ## expected validation error for the above model
relTol = 0.20 ## 20% rel. error tolerance due to Hogwild!
timeoutSecs = 600
start = time.time()
nn = h2o_cmd.runDeepLearning(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "neural net end on ", csvPathname_train, " and ", csvPathname_test, 'took', time.time() - start, 'seconds'
predict_key = 'score_' + identifier + '.hex'
kwargs = {
'data_key': validation_key,
'destination_key': predict_key,
'model_key': model_key
}
predictResult = h2o_cmd.runPredict(timeoutSecs=timeoutSecs, **kwargs)
h2o_cmd.runInspect(key=predict_key, verbose=True)
kwargs = {
}
predictCMResult = h2o.nodes[0].predict_confusion_matrix(
actual=validation_key,
vactual=response,
predict=predict_key,
vpredict='predict',
timeoutSecs=timeoutSecs, **kwargs)
cm = predictCMResult['cm']
print h2o_gbm.pp_cm(cm)
actualErr = h2o_gbm.pp_cm_summary(cm)/100.;
print "actual classification error:" + format(actualErr)
print "expected classification error:" + format(expectedErr)
if actualErr != expectedErr and abs((expectedErr - actualErr)/expectedErr) > relTol:
raise Exception("Scored classification error of %s is not within %s %% relative error of %s" %
(actualErr, float(relTol)*100, expectedErr))
if __name__ == '__main__':
h2o.unit_main()
|
Bulochkin/tensorflow_pack | refs/heads/master | tensorflow/python/kernel_tests/sparse_cross_op_test.py | 72 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse_cross_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseCrossOpTest(test.TestCase):
def test_simple(self):
"""Tests a simple scenario."""
op = sparse_ops._sparse_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['batch1-FC1-F1_X_batch1-FC2-F1'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_dense(self):
"""Tests only dense inputs."""
op = sparse_ops._sparse_cross([
constant_op.constant([['batch1-FC1-F1', 'batch1-FC1-F2'],
['batch2-FC1-F1', 'batch2-FC1-F2']],
dtypes.string),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2',
'batch1-FC1-F2_X_batch1-FC2-F1', 'batch1-FC1-F2_X_batch1-FC2-F2'
], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_mixed_string_sparse(self):
"""Tests mixed type."""
op = sparse_ops._sparse_cross([
self._sparse_tensor([[11], [333, 55555]]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['11_X_batch1-FC2-F1'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '55555_X_batch2-FC2-F1',
'55555_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_mixed_string_dense(self):
"""Tests mixed dense inputs."""
op = sparse_ops._sparse_cross([
constant_op.constant([[11, 333], [55555, 999999]], dtypes.int64),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2', '333_X_batch1-FC2-F1',
'333_X_batch1-FC2-F2'
], [
'55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2',
'999999_X_batch2-FC2-F1', '999999_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_sparse_cross_dense(self):
"""Tests sparse and dense inputs."""
op = sparse_ops._sparse_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_sparse_input(self):
"""Tests mixed type sparse and dense inputs."""
op = sparse_ops._sparse_cross([
self._sparse_tensor([[11], [333, 5555]]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2',
'5555_X_batch2-FC2-F1', '5555_X_batch2-FC2-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_permutation_3x3x3(self):
"""Tests 3x3x3 permutation."""
op = sparse_ops._sparse_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor(
[['batch1-FC2-F1', 'batch1-FC2-F2', 'batch1-FC2-F3']]),
self._sparse_tensor(
[['batch1-FC3-F1', 'batch1-FC3-F2', 'batch1-FC3-F3']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F3'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_permutation_3x1x2(self):
"""Tests 3x1x2 permutation."""
op = sparse_ops._sparse_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2'
]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_large_batch(self):
"""Tests with large batch size to force multithreading."""
batch_size = 5000
col1 = []
col2 = []
col3 = []
for b in range(batch_size):
col1.append(
['batch%d-FC1-F1' % b, 'batch%d-FC1-F2' % b, 'batch%d-FC1-F3' % b])
col2.append(['batch%d-FC2-F1' % b])
col3.append(['batch%d-FC3-F1' % b, 'batch%d-FC3-F2' % b])
op = sparse_ops._sparse_cross([
self._sparse_tensor(col1), self._sparse_tensor(col2),
self._sparse_tensor(col3)
])
col_out = []
for b in range(batch_size):
col_out.append([
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b)
])
expected_out = self._sparse_tensor(col_out)
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_one_column_empty(self):
"""Tests when one column is empty.
The crossed tensor should be empty.
"""
op = sparse_ops._sparse_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']]),
self._sparse_tensor([], 1),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
with self.test_session() as sess:
self._assert_sparse_tensor_empty(sess.run(op))
def test_some_columns_empty(self):
"""Tests when more than one columns are empty.
Cross for the corresponding batch should be empty.
"""
op = sparse_ops._sparse_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']], 2),
self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1']], 2),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']], 2)
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2'
]], 2)
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_all_columns_empty(self):
"""Tests when all columns are empty.
The crossed tensor should be empty.
"""
op = sparse_ops._sparse_cross([
self._sparse_tensor([]), self._sparse_tensor([]),
self._sparse_tensor([])
])
with self.test_session() as sess:
self._assert_sparse_tensor_empty(sess.run(op))
def test_hashed_zero_bucket_no_hash_key(self):
op = sparse_ops._sparse_cross_hashed(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
])
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[1971693436396284976]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_zero_bucket(self):
op = sparse_ops._sparse_cross_hashed(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hash_key=sparse_ops._DEFAULT_HASH_KEY + 1)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[4847552627144134031]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
# TODO(sibyl-Aix6ihai): Add benchmark to compare Hashed vs Non-hashed.
def test_hashed_no_hash_key(self):
op = sparse_ops._sparse_cross_hashed(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
num_buckets=100)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[83]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output(self):
op = sparse_ops._sparse_cross_hashed(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
num_buckets=100,
hash_key=sparse_ops._DEFAULT_HASH_KEY + 1)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[31]])
with self.test_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed__has_no_collision(self):
"""Tests that fingerprint concatenation has no collisions."""
# Although the last 10 bits of 359 and 1024+359 are identical.
# As a result, all the crosses shouldn't collide.
t1 = constant_op.constant([[359], [359 + 1024]])
t2 = constant_op.constant([list(range(10)), list(range(10))])
cross = sparse_ops._sparse_cross_hashed(
[t2, t1],
num_buckets=1024,
hash_key=sparse_ops._DEFAULT_HASH_KEY + 1)
cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
with session.Session():
values = cross_dense.eval()
self.assertTrue(numpy.not_equal(values[0], values[1]).all())
def test_hashed_3x1x2(self):
"""Tests 3x1x2 permutation with hashed output."""
op = sparse_ops._sparse_cross_hashed(
[
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
],
num_buckets=1000)
with self.test_session() as sess:
out = sess.run(op)
self.assertEqual(6, len(out.values))
self.assertAllEqual([[0, i] for i in range(6)], out.indices)
self.assertTrue(all(x < 1000 and x >= 0 for x in out.values))
all_values_are_different = len(out.values) == len(set(out.values))
self.assertTrue(all_values_are_different)
def _assert_sparse_tensor_empty(self, sp):
self.assertEquals(0, sp.indices.size)
self.assertEquals(0, sp.values.size)
# TODO(zakaria): check if we can ignore the first dim of the shape.
self.assertEquals(0, sp.dense_shape[1])
def _assert_sparse_tensor_equals(self, sp1, sp2):
self.assertAllEqual(sp1.indices.eval(), sp2.indices)
self.assertAllEqual(sp1.values.eval(), sp2.values)
self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape)
def _sparse_tensor(self, data, batch_size=-1):
"""Generates a SparseTensor.
Args:
data: Should be a list of list of strings or int64. Each item of the outer
list represents a batch. Each item of the batch is a feature of a
specific feature column.
batch_size: optional batch size, especially for cases when data has no
entry for some batches.
Returns:
A SparseTensor.
"""
indices = []
values = []
max_col_count = 0
for batch, batch_ix in zip(data, range(len(data))):
for column, column_ix in zip(batch, range(len(batch))):
indices.append([batch_ix, column_ix])
values.append(column)
max_col_count = max(max_col_count, column_ix + 1)
shape = [batch_size if batch_size != -1 else len(data), max_col_count]
value_type = (dtypes.string if not values or isinstance(values[0], str) else
dtypes.int64)
return sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64, [len(indices), 2]),
constant_op.constant(values, value_type, [len(indices)]),
constant_op.constant(shape, dtypes.int64))
if __name__ == '__main__':
test.main()
|
QijunPan/ansible | refs/heads/devel | lib/ansible/utils/module_docs_fragments/netapp.py | 2 | #
# (c) 2016, Sumit Kumar <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
DOCUMENTATION = """
options:
- See respective platform section for more details
requirements:
- See respective platform section for more details
notes:
- Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
"""
# Documentation fragment for ONTAP
ONTAP = """
options:
hostname:
required: true
description:
- The hostname or IP address of the ONTAP instance.
username:
required: true
description:
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required. For more information, please read the documentation U(https://goo.gl/BRu78Z).
password:
required: true
description:
- Password for the specified user.
requirements:
- A physical or virtual clustered Data ONTAP system. The modules were developed with Clustered Data ONTAP 8.3
- Ansible 2.2
- netapp-lib (2015.9.25). Install using 'pip install netapp-lib'
notes:
- The modules prefixed with C(netapp\_cdot) are built to support the ONTAP storage platform.
"""
# Documentation fragment for SolidFire
SOLIDFIRE = """
options:
hostname:
required: true
description:
- The hostname or IP address of the SolidFire cluster.
username:
required: true
description:
- Please ensure that the user has the adequate permissions. For more information, please read the official documentation U(https://goo.gl/ddJa4Q).
password:
required: true
description:
- Password for the specified user.
requirements:
- solidfire-sdk-python (1.1.0.92)
notes:
- The modules prefixed with C(sf\_) are built to support the SolidFire storage platform.
"""
|
shsingh/ansible | refs/heads/devel | lib/ansible/module_utils/network/exos/argspec/facts/facts.py | 20 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The arg spec for the exos facts module.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class FactsArgs(object): # pylint: disable=R0903
""" The arg spec for the exos facts module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
'gather_subset': dict(default=['!config'], type='list'),
'gather_network_resources': dict(type='list'),
}
|
markflorisson/blaze-core | refs/heads/master | blaze/datadescriptor/remote_data_descriptor.py | 7 | from __future__ import absolute_import, division, print_function
import datashape
from ..catalog.blaze_url import add_indexers_to_url
from .data_descriptor import IDataDescriptor, Capabilities
from dynd import nd, ndt
class RemoteDataDescriptor(IDataDescriptor):
"""
A Blaze data descriptor which exposes an array on another
server.
"""
def __init__(self, url, dshape=None):
from ..io.client import requests
self.url = url
if dshape is None:
self._dshape = datashape.dshape(requests.get_remote_datashape(url))
else:
self._dshape = datashape.dshape(dshape)
@property
def dshape(self):
return self._dshape
@property
def capabilities(self):
"""The capabilities for the remote data descriptor."""
return Capabilities(
# treat remote arrays as immutable (maybe not?)
immutable = True,
# TODO: not sure what to say here
deferred = False,
# persistent on the remote server
persistent = True,
appendable = False,
remote = True,
)
def __repr__(self):
return 'RemoteDataDescriptor(%r, dshape=%r)' % (self.url, self.dshape)
def dynd_arr(self):
from ..io.client import requests
"""Downloads the data and returns a local in-memory nd.array"""
# TODO: Need binary serialization
j = requests.get_remote_json(self.url)
tp = ndt.type(str(self.dshape))
return nd.parse_json(tp, j)
def __len__(self):
ds = self.dshape
if isinstance(ds, datashape.DataShape):
ds = ds[-1]
if isinstance(ds, datashape.Fixed):
return int(ds)
raise AttributeError('the datashape (%s) of this data descriptor has no length' % ds)
def __getitem__(self, key):
return RemoteDataDescriptor(add_indexers_to_url(self.url, (key,)))
def getattr(self, name):
ds = self.dshape
if isinstance(ds, datashape.DataShape):
ds = ds[-1]
if isinstance(ds, datashape.Record) and name in ds.names:
return RemoteDataDescriptor(self.url + '.' + name)
else:
raise AttributeError(('Blaze remote array does not ' +
'have attribute "%s"') % name)
def __iter__(self):
raise NotImplementedError('remote data descriptor iterator unimplemented')
|
nburn42/tensorflow | refs/heads/master | tensorflow/examples/speech_commands/models_test.py | 48 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for speech commands models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.speech_commands import models
from tensorflow.python.platform import test
class ModelsTest(test.TestCase):
def testPrepareModelSettings(self):
self.assertIsNotNone(
models.prepare_model_settings(10, 16000, 1000, 20, 10, 40))
def testCreateModelConvTraining(self):
model_settings = models.prepare_model_settings(10, 16000, 1000, 20, 10, 40)
with self.test_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(fingerprint_input,
model_settings, "conv", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_prob)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
def testCreateModelConvInference(self):
model_settings = models.prepare_model_settings(10, 16000, 1000, 20, 10, 40)
with self.test_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits = models.create_model(fingerprint_input, model_settings, "conv",
False)
self.assertIsNotNone(logits)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
def testCreateModelLowLatencyConvTraining(self):
model_settings = models.prepare_model_settings(10, 16000, 1000, 20, 10, 40)
with self.test_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(
fingerprint_input, model_settings, "low_latency_conv", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_prob)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
def testCreateModelFullyConnectedTraining(self):
model_settings = models.prepare_model_settings(10, 16000, 1000, 20, 10, 40)
with self.test_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(
fingerprint_input, model_settings, "single_fc", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_prob)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
def testCreateModelBadArchitecture(self):
model_settings = models.prepare_model_settings(10, 16000, 1000, 20, 10, 40)
with self.test_session():
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
with self.assertRaises(Exception) as e:
models.create_model(fingerprint_input, model_settings,
"bad_architecture", True)
self.assertTrue("not recognized" in str(e.exception))
if __name__ == "__main__":
test.main()
|
arummler/eudaq | refs/heads/master | legacy/producers/palpidess/scripts/config_set_generation/conf_generator.py | 11 | #!/usr/bin/env python
import os
####################################################################################################
### Settings class #################################################################################
####################################################################################################
class settings:
def __init__( self ):
# file index number
self.i_file=0
## settings
# list: values to be set
# prio: sequence of the different settings
##
# Vbb
self.vbb_list=[]
self.vbb_prio=[]
# Vrst
self.vrst_list=[]
self.vrst_prio=[]
# Vcasn
self.vcasn_list=[[]]
self.vcasn_prio=[]
# Vcasn
self.vcasn_list=[]
self.vcasn_prio=[]
# Ithr
self.ithr_list=[]
self.ithr_prio=[]
# Vlight
self.vlight_list=[]
self.vlight_prio=[]
# Acq_time
self.acq_time_list=[]
self.acq_time_prio=[]
# Trig_delay
self.trig_delay_list=[]
self.trig_delay_prio=[]
def generate_files( self ):
for i_vbb in range(len(self.vbb_prio)):
vbb_index=self.vbb_prio[i_vbb]
vbb=self.vbb_list[vbb_index]
for i_vrst in range(len(self.vrst_prio)):
vrst=self.vrst_list[self.vrst_prio[i_vrst]]
for i_vcasn in range(len(self.vcasn_prio)):
vcasn=self.vcasn_list[vbb_index][self.vcasn_prio[i_vcasn]]
for i_vcasp in range(len(self.vcasp_prio)):
vcasp=self.vcasp_list[self.vcasp_prio[i_vcasp]]
for i_ithr in range(len(self.ithr_prio)):
ithr=self.ithr_list[self.ithr_prio[i_ithr]]
for i_vlight in range(len(self.vlight_prio)):
vlight=self.vlight_list[self.vlight_prio[i_vlight]]
for i_acq_time in range(len(self.acq_time_list)):
acq_time=self.acq_time_list[self.acq_time_prio[i_acq_time]]
for i_trig_delay in range(len(self.trig_delay_list)):
trig_delay=self.trig_delay_list[self.trig_delay_prio[i_trig_delay]]
args =(self.i_file, vbb, vrst, vcasn, vcasp, ithr, vlight, acq_time, trig_delay)
cmd ="/bin/bash conf_gen_helper.sh %d %f %f %f %f %f %f %f %f"%args
os.system(cmd)
self.i_file+=1
####################################################################################################
####################################################################################################
s=settings()
#### load the standard settings
# Vbb (V)
s.vbb_list=[ 0, 0.5, 1, 2, 3, 4, 5, 6 ]
s.vbb_prio=[ 0, 3, 2, 4, 1, 5, 6 ]
# Vrst (V)
s.vrst_list=[ 1.6 ]
s.vrst_prio=[ 0 ]
# back-bias dependent Vcasn (V) # Vbb:
s.vcasn_list=[[ 0.40, 0.50, 0.60 ], # 0.0 V
[ 0.60, 0.70, 0.75 ], # 0.5 V
[ 0.70, 0.80, 0.90 ], # 1.0 V
[ 0.90, 1.05, 1.10 ], # 2.0 V
[ 1.00, 1.10, 1.20 ], # 3.0 V
[ 1.20, 1.30, 1.40 ], # 4.0 V
[ 1.35, 1.40, 1.45 ], # 5.0 V
[ 1.40, 1.45, 1.50 ]] # 6.0 V
s.vcasn_prio=[ 0, 1, 2 ]
# Vcasp (V)
s.vcasp_list=[ 0.6 ]
s.vcasp_prio=[ 0 ]
# Ithr (uA)
s.ithr_list=[ 1.02, 1.54, 2.05, 2.87 ]
s.ithr_prio=[ 2, 0, 1, 3 ]
# Vlight (V)
s.vlight_list=[ 0., 10.25 ]
s.vlight_prio=[ 0, 1 ]
# Acq_time
s.acq_time_list=[ 1.54 ]
s.acq_time_prio=[ 0 ]
# Trig_delay
s.trig_delay_list=[ 0. ]
s.trig_delay_prio=[ 0 ]
### apply modifications
# no light standard
s.vlight_prio=[ 0 ]
s.acq_time_list=[ 3. ]
s.generate_files()
# scientific light
s.vlight_prio=[ 1 ]
s.generate_files()
# delay scan
s.vlight_prio=[ 0 ]
s.vbb_prio=[ 0, 3 ]
s.ithr_prio=[ 2, 0 ]
s.trig_delay_list=[ 0. ]
s.trig_delay_prio=[ 0 ]
s.ithr_prio=[ 2, 0 ]
s.vcasn_prio=[ 0 ]
s.trig_delay_list=[ 1., 2., 3., 4., 5., 6., 7., 8., 9. ]
s.trig_delay_prio=[ 0, 1, 2, 3, 4, 5, 6, 7, 8 ]
s.generate_files()
# delay scan with light
s.trig_delay_list=[ 1., 2., 3., 4., 5., 6., 7. ]
s.trig_delay_prio=[ 0, 1, 2, 3, 4, 5, 6 ]
s.vlight_prio=[ 1 ]
s.generate_files()
# fine delay scan
s.acq_time_list=[ 1.54 ]
s.trig_delay_list=[ 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6. ]
s.trig_delay_prio=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ]
s.generate_files()
# fine delay scan without light
s.vlight_prio=[ 0 ]
s.generate_files()
print "%d config files produced" % s.i_file
print "Estimated measurement time %d min (%0.1f h) assuming 7.5 min per config file" % (s.i_file*7.5, s.i_file*7.5/60.)
|
tumbl3w33d/ansible | refs/heads/devel | test/units/modules/net_tools/nios/test_nios_a_record.py | 59 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.modules.net_tools.nios import nios_a_record
from ansible.module_utils.net_tools.nios import api
from units.compat.mock import patch, MagicMock, Mock
from .test_nios_module import TestNiosModule, load_fixture
class TestNiosARecordModule(TestNiosModule):
module = nios_a_record
def setUp(self):
super(TestNiosARecordModule, self).setUp()
self.module = MagicMock(name='ansible.modules.net_tools.nios.nios_a_record.WapiModule')
self.module.check_mode = False
self.module.params = {'provider': None}
self.mock_wapi = patch('ansible.modules.net_tools.nios.nios_a_record.WapiModule')
self.exec_command = self.mock_wapi.start()
self.mock_wapi_run = patch('ansible.modules.net_tools.nios.nios_a_record.WapiModule.run')
self.mock_wapi_run.start()
self.load_config = self.mock_wapi_run.start()
def tearDown(self):
super(TestNiosARecordModule, self).tearDown()
self.mock_wapi.stop()
self.mock_wapi_run.stop()
def _get_wapi(self, test_object):
wapi = api.WapiModule(self.module)
wapi.get_object = Mock(name='get_object', return_value=test_object)
wapi.create_object = Mock(name='create_object')
wapi.update_object = Mock(name='update_object')
wapi.delete_object = Mock(name='delete_object')
return wapi
def load_fixtures(self, commands=None):
self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
self.load_config.return_value = dict(diff=None, session='session')
def test_nios_a_record_create(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'a.ansible.com',
'ipv4': '192.168.10.1', 'comment': None, 'extattrs': None}
test_object = None
test_spec = {
"name": {"ib_req": True},
"ipv4": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
print("WAPI: ", wapi.__dict__)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(),
'ipv4': '192.168.10.1'})
def test_nios_a_record_update_comment(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'a.ansible.com', 'ipv4': '192.168.10.1',
'comment': 'updated comment', 'extattrs': None}
test_object = [
{
"comment": "test comment",
"_ref": "arecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
"name": "a.ansible.com",
"ipv4": "192.168.10.1",
"extattrs": {}
}
]
test_spec = {
"name": {"ib_req": True},
"ipv4": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
def test_nios_a_record_remove(self):
self.module.params = {'provider': None, 'state': 'absent', 'name': 'a.ansible.com', 'ipv4': '192.168.10.1',
'comment': None, 'extattrs': None}
ref = "arecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false"
test_object = [{
"comment": "test comment",
"_ref": ref,
"name": "a.ansible.com",
"ipv4": "192.168.10.1",
"extattrs": {'Site': {'value': 'test'}}
}]
test_spec = {
"name": {"ib_req": True},
"ipv4": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.delete_object.assert_called_once_with(ref)
def test_nios_a_record_update_record_name(self):
self.module.params = {'provider': None, 'state': 'present', 'name': {'new_name': 'a_new.ansible.com', 'old_name': 'a.ansible.com'},
'comment': 'comment', 'extattrs': None}
test_object = [
{
"comment": "test comment",
"_ref": "arecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
"name": "a_new.ansible.com",
"old_name": "a.ansible.com",
"extattrs": {}
}
]
test_spec = {
"name": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.update_object.called_once_with(test_object)
|
ptisserand/ansible | refs/heads/devel | lib/ansible/modules/cloud/rackspace/rax_mon_notification.py | 45 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_notification
short_description: Create or delete a Rackspace Cloud Monitoring notification.
description:
- Create or delete a Rackspace Cloud Monitoring notification that specifies a
channel that can be used to communicate alarms, such as email, webhooks, or
PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
*rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
version_added: "2.0"
options:
state:
description:
- Ensure that the notification with this C(label) exists or does not exist.
choices: ['present', 'absent']
label:
description:
- Defines a friendly name for this notification. String between 1 and 255
characters long.
required: true
notification_type:
description:
- A supported notification type.
choices: ["webhook", "email", "pagerduty"]
required: true
details:
description:
- Dictionary of key-value pairs used to initialize the notification.
Required keys and meanings vary with notification type. See
http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
service-notification-types-crud.html for details.
required: true
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Monitoring notification example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Email me when something goes wrong.
rax_mon_entity:
credentials: ~/.rax_pub
label: omg
type: email
details:
address: [email protected]
register: the_notification
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def notification(module, state, label, notification_type, details):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
notification = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for n in cm.list_notifications():
if n.label == label:
existing.append(n)
if existing:
notification = existing[0]
if state == 'present':
should_update = False
should_delete = False
should_create = False
if len(existing) > 1:
module.fail_json(msg='%s existing notifications are labelled %s.' %
(len(existing), label))
if notification:
should_delete = (notification_type != notification.type)
should_update = (details != notification.details)
if should_update and not should_delete:
notification.update(details=notification.details)
changed = True
if should_delete:
notification.delete()
else:
should_create = True
if should_create:
notification = cm.create_notification(notification_type,
label=label, details=details)
changed = True
else:
for n in existing:
n.delete()
changed = True
if notification:
notification_dict = {
"id": notification.id,
"type": notification.type,
"label": notification.label,
"details": notification.details
}
module.exit_json(changed=changed, notification=notification_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
details=dict(required=True, type='dict')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
notification_type = module.params.get('notification_type')
details = module.params.get('details')
setup_rax_module(module, pyrax)
notification(module, state, label, notification_type, details)
if __name__ == '__main__':
main()
|
MyAOSP/external_chromium_org | refs/heads/kk-4.4 | tools/telemetry/telemetry/value/list_of_scalar_values.py | 36 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import numbers
from telemetry import value as value_module
def _Mean(values):
return float(sum(values)) / len(values) if len(values) > 0 else 0.0
class ListOfScalarValues(value_module.Value):
def __init__(self, page, name, units, values,
important=True, same_page_merge_policy=value_module.CONCATENATE):
super(ListOfScalarValues, self).__init__(page, name, units, important)
assert len(values) > 0
assert isinstance(values, list)
for v in values:
assert isinstance(v, numbers.Number)
self.values = values
self.same_page_merge_policy = same_page_merge_policy
def __repr__(self):
if self.page:
page_name = self.page.url
else:
page_name = None
if self.same_page_merge_policy == value_module.CONCATENATE:
merge_policy = 'CONCATENATE'
else:
merge_policy = 'PICK_FIRST'
return ('ListOfScalarValues(%s, %s, %s, %s, ' +
'important=%s, same_page_merge_policy=%s)') % (
page_name,
self.name, self.units,
repr(self.values),
self.important,
merge_policy)
def GetBuildbotDataType(self, output_context):
if self._IsImportantGivenOutputIntent(output_context):
return 'default'
return 'unimportant'
def GetBuildbotValue(self):
return self.values
def GetRepresentativeNumber(self):
return _Mean(self.values)
def GetRepresentativeString(self):
return repr(self.values)
def IsMergableWith(self, that):
return (super(ListOfScalarValues, self).IsMergableWith(that) and
self.same_page_merge_policy == that.same_page_merge_policy)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
assert len(values) > 0
v0 = values[0]
if v0.same_page_merge_policy == value_module.PICK_FIRST:
return ListOfScalarValues(
v0.page, v0.name, v0.units,
values[0].values,
important=v0.important,
same_page_merge_policy=v0.same_page_merge_policy)
assert v0.same_page_merge_policy == value_module.CONCATENATE
all_values = []
for v in values:
all_values.extend(v.values)
return ListOfScalarValues(
v0.page, v0.name, v0.units,
all_values,
important=v0.important,
same_page_merge_policy=v0.same_page_merge_policy)
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values,
group_by_name_suffix=False):
assert len(values) > 0
v0 = values[0]
all_values = []
for v in values:
all_values.extend(v.values)
if not group_by_name_suffix:
name = v0.name
else:
name = v0.name_suffix
return ListOfScalarValues(
None, name, v0.units,
all_values,
important=v0.important,
same_page_merge_policy=v0.same_page_merge_policy)
|
danieldmm/minerva | refs/heads/master | evaluation/keyword_annotation.py | 1 | # Base class and functions to annotate the best keywords from each context according to their retrieval scores
#
# Copyright: (c) Daniel Duma 2016
# Author: Daniel Duma <[email protected]>
# For license information, see LICENSE.TXT
from __future__ import print_function
from __future__ import absolute_import
import math
import re
TERM_POSITION_IN_TUPLE = 6
def termScoresInFormula(part):
"""
Returns list of all term matching elements in formula
:param part: tuple, list or dict
:returns: list of all term matching elements in formula
"""
if isinstance(part, tuple) or isinstance(part, list):
return part
elif isinstance(part, dict):
if "type" not in part:
return None
if part["type"] in ["*", "+", "max"]:
scores = [termScoresInFormula(sub_part) for sub_part in part["parts"]]
result = []
for score in scores:
if isinstance(score, list):
result.extend(score)
else:
result.append(score)
return result
elif part["type"] in ["const", "coord"]:
pass
return []
def getDictOfTermScores(formula, op="max"):
"""
Returns the score of each term in the formula as a dict
:param formula: formula dict
:rtype: dict
"""
term_scores = termScoresInFormula(formula)
res = {}
if not term_scores:
return res
for score in term_scores:
# score=(field,qw,fw,tf,docFreq,maxDocs,term)
old_score = res.get(score[TERM_POSITION_IN_TUPLE], 0)
new_score = (score[1] * score[2])
if op == "add":
res[score[TERM_POSITION_IN_TUPLE]] = old_score + new_score
elif op == "max":
res[score[TERM_POSITION_IN_TUPLE]] = max(old_score, new_score)
## print(res)
return res
def getDictOfDocFreq(formulas):
"""
Returns a dict where the key is the term and the value its docFreq over the collection, and also an integer which
is the maxDocs
:param formulas:
:return:
"""
res = {}
maxDocs = 0
for formula in formulas:
term_scores = termScoresInFormula(formula.formula)
if not term_scores:
continue
for score in term_scores:
term = score[TERM_POSITION_IN_TUPLE]
if term not in res:
res[term] = score[4]
if not maxDocs:
maxDocs = score[5]
assert maxDocs == score[5]
## print(res)
return res, maxDocs
# def getFormulaTermWeights(unique_result):
# """
# Computes a score for each matching keyword in the formula for the
# matching files in the index
#
# unique_result is a dict
# {"match_guid":"<guid>","formulas":[{"guid":"<guid>","formula":<formula>}]
#
# :rtype: dict
# """
# idf_scores = defaultdict(lambda: 0)
# max_scores = defaultdict(lambda: 0)
#
# formula_term_scores = []
# match_result = None
#
# for formula in unique_result["formulas"]:
# term_scores = getDictOfTermScores(formula["formula"], "max")
#
# formula_term_scores.append((formula, term_scores))
# if formula["guid"] == unique_result["match_guid"]:
# match_result = term_scores
#
# for term in term_scores:
# idf_scores[term] = idf_scores[term] + term_scores[term]
# if term_scores[term] > max_scores[term]:
# max_scores[term] = term_scores[term]
#
# if not match_result:
# return None
#
# for term in idf_scores:
# idf_scores[term] = log((max_scores[term] * len(unique_result["formulas"])) / (1 + idf_scores[term]), 2)
#
# for term in match_result:
# match_result[term] = match_result[term] * idf_scores[term]
#
# return match_result
# def makeStructuredQueryFromKeywords(keywords):
# """
# This is just to get around my former use of this query storage format
# """
# query = StructuredQuery()
# for kw in keywords:
# query.addToken(kw[0], 1, boost=kw[1])
# return query
# def evaluateKeywordSelection(precomputed_queries, extracted_keywords, exp, use_keywords=True, metric="mrr",
# index_field="text"):
# """
# Get the batch scores of a set of queries
#
# :param precomputed_queries: ditto
# :param extracted_keywords: a list of lists of tuples, one list for each query
# """
# from proc.results_logging import measureScores
# from retrieval.elastic_retrieval import ElasticRetrieval
#
# retrieval_model = ElasticRetrieval(exp["features_index_name"], "", es_instance=cp.Corpus.es)
#
# scores_list = []
#
# for index, precomputed_query in precomputed_queries:
# scores = {}
# if use_keywords:
# query = makeStructuredQueryFromKeywords(extracted_keywords[index])
# else:
# query = precomputed_query
#
# retrieved = retrieval_model.runQuery(query, max_results=exp.get("max_results_recall", 200))
# measureScores(retrieved, precomputed_query["match_guid"], scores)
# scores_list.append(scores[metric])
#
# return sum(scores_list) / float(len(scores_list))
def listOfTermValuesInFormulas(formulas):
"""
Returns a dict where {term: [list of values]} in all formulas
"""
term_stats = {}
for formula in formulas:
term_scores = getDictOfTermScores(formula.formula)
for term in term_scores:
if term not in term_stats:
term_stats[term] = []
term_stats[term].append(term_scores[term])
return term_stats
def getIDFfromFormulas(formulas):
"""
Returns a dict where {term: [list of values]} in all formulas
"""
doc_counts = {}
for formula in formulas:
term_scores = getDictOfTermScores(formula.formula)
for term in term_scores:
if term not in doc_counts:
doc_counts[term] = 0
doc_counts[term] += 1
return doc_counts
def getNormalisedTermScores(precomputed_query, doc_list, retrieval_model):
"""
Returns the NORMALISED term scores from the explain query for each match_guid document
:param precomputed_query: dict with the query keywords
:param doc_list: top N retrieved documents (200 by default)
:param retrieval_model:
:return: dict of dicts of term scores {match_guid: {term: score}}
"""
formulas = [retrieval_model.formulaFromExplanation(precomputed_query, doc_id) for doc_id in doc_list]
raw_term_scores = listOfTermValuesInFormulas(formulas)
formula_docfreq = getIDFfromFormulas(formulas)
for term in raw_term_scores:
raw_term_scores[term] = sum(raw_term_scores[term])
sum_raw_term_scores = sum(raw_term_scores.values())
normalised_term_scores = {}
match_formulas = []
for match_guid in precomputed_query["match_guids"]:
match_formula = retrieval_model.formulaFromExplanation(precomputed_query, match_guid)
match_formulas.append(match_formula)
match_term_scores = getDictOfTermScores(match_formula.formula, "max")
sum_match_term_scores = sum(match_term_scores.values())
for term in match_term_scores:
match_term_scores[term] = match_term_scores[term] / sum_match_term_scores
# divisor = 1 + (raw_term_scores.get(term, 0) / sum_raw_term_scores)
divisor = 1 + (raw_term_scores.get(term, 0))
# squaring the divisor to decrease more
match_term_scores[term] = match_term_scores[term] / float(pow(divisor, 2))
# match_term_scores[term] = match_term_scores[term] / float(pow(idf, 2))
# idf = 1 + math.log(len(formulas) / 1 + formula_docfreq.get(term, 0))
# match_term_scores[term] = match_term_scores[term] / float(pow(divisor, pow(idf, 3)))
normalised_sum = sum(match_term_scores.values())
for term in match_term_scores:
match_term_scores[term] /= normalised_sum
normalised_term_scores[match_guid] = match_term_scores
return normalised_term_scores, formulas, match_formulas
def filterTermScores(term_scores, docFreq, min_docs_to_match, max_docs_to_match, min_term_len=0, stopword_list=None):
"""
filter terms that don't appear in a minimum of documents across the corpus
"""
removed = {}
filter_term_scores = {}
for guid in term_scores:
filter_term_scores[guid] = {}
for term in term_scores[guid]:
to_remove = False
if stopword_list and term in stopword_list:
to_remove = True
elif max_docs_to_match and docFreq.get(term, 0) > max_docs_to_match:
to_remove = True
elif min_docs_to_match and docFreq.get(term, 0) < min_docs_to_match:
to_remove = True
elif len(term) < min_term_len:
to_remove = True
if to_remove:
removed[term] = removed.get(term, 0) + 1
else:
filter_term_scores[guid][term] = term_scores[guid][term]
print("Removed", removed)
return filter_term_scores
class BaseKeywordSelector(object):
"""
"""
def __init__(self, name):
"""
"""
self.name = name
def selectKeywords(self, precomputed_query, doc_list, retrieval_model, parameters, cit, weights,
norm_term_scores=None,
docFreq=None, maxDocs=None, rawScores=None):
"""
"""
pass
def saveResults(self, path):
pass
def main():
pass
if __name__ == '__main__':
main()
def addUpAllTermScores(term_scores, options={}):
"""
Combines of the term scores over serveral guids
:param term_scores:
:return:
"""
all_term_scores = {}
num_docs_with_term = {}
mode = options.get("terms_weight_mode", "add")
multiplier = options.get("multiplier", 1)
power = options.get("power", 1)
min_val = options.get("min_val", None)
add_val = options.get("add_val", 0)
for guid in term_scores:
for term in term_scores[guid]:
all_term_scores[term] = all_term_scores.get(term, [])
all_term_scores[term].append(term_scores[guid][term])
num_docs_with_term[term] = num_docs_with_term.get(term, 0) + 1
for term in all_term_scores:
if mode == "add":
all_term_scores[term] = sum(all_term_scores[term])
elif mode == "max":
all_term_scores[term] = max(all_term_scores[term])
elif mode == "avg":
all_term_scores[term] = sum(all_term_scores[term]) / len(all_term_scores[term])
elif mode == "mul":
res = 1
for number in all_term_scores[term]:
res *= number
all_term_scores[term] = res
all_term_scores[term] *= multiplier
all_term_scores[term] = pow(all_term_scores[term], power)
if min_val:
all_term_scores[term] = max(min_val, all_term_scores[term])
all_term_scores[term] += add_val
return all_term_scores
def getCountsInQueryForMatchingTerms(precomputed_query):
# x[1] is count, x[2] is weight in the structured query
res = {x[0]: x[1] for x in precomputed_query["structured_query"]}
for kp_tuple in precomputed_query.get("keyphrases", []):
kp = " ".join(kp_tuple)
pattern = kp.replace(" ", r"\W+")
try:
all_matches = re.findall(pattern, precomputed_query["query_text"])
except Exception as e:
print(e)
continue
count = len(all_matches)
res[kp] = count
return res |
Zhongqilong/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/chardet/universaldetector.py | 744 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
|
bufferapp/buffer-django-nonrel | refs/heads/master | tests/regressiontests/utils/test_module/bad_module.py | 255 | import a_package_name_that_does_not_exist
content = 'Bad Module' |
crmccreary/openerp_server | refs/heads/master | openerp/addons/event/report/report_event_registration.py | 9 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
import tools
class report_event_registration(osv.osv):
_name = "report.event.registration"
_description = "Events Analysis"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Event Start Date', readonly=True),
'year': fields.char('Year', size=4, readonly=True),
'month': fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'),
('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'),
('10','October'), ('11','November'), ('12','December')], 'Month',readonly=True),
'event_id': fields.many2one('event.event', 'Event', required=True),
'draft_state': fields.integer(' # No of Draft Registrations', size=20),
'confirm_state': fields.integer(' # No of Confirmed Registrations', size=20),
'register_max': fields.integer('Maximum Registrations'),
'nbevent': fields.integer('Number Of Events'),
'type': fields.many2one('event.type', 'Event Type'),
'state': fields.selection([('draft', 'Draft'), ('confirm', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'State', readonly=True, required=True),
'user_id': fields.many2one('res.users', 'Responsible', readonly=True),
'speaker_id': fields.many2one('res.partner', 'Speaker', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'total': fields.float('Total'),
'section_id': fields.related('event_id', 'section_id', type='many2one', relation='crm.case.section', string='Sale Team', store=True, readonly=True),
}
_order = 'date desc'
def init(self, cr):
"""
initialize the sql view for the event registration
cr -- the cursor
"""
tools.drop_view_if_exists(cr, 'report_event_registration')
cr.execute("""
CREATE OR REPLACE view report_event_registration AS (
SELECT
id,
event_id,
date,
user_id,
section_id,
company_id,
product_id,
speaker_id,
year,
month,
nbevent,
type,
SUM(draft_state) AS draft_state,
SUM(confirm_state) AS confirm_state,
SUM(total) AS total,
register_max,
state
FROM(
SELECT
MIN(e.id) AS id,
e.id AS event_id,
e.date_begin AS date,
e.user_id AS user_id,
e.section_id AS section_id,
e.company_id AS company_id,
e.product_id AS product_id,
e.main_speaker_id AS speaker_id,
to_char(e.date_begin, 'YYYY') AS year,
to_char(e.date_begin, 'MM') AS month,
count(e.id) AS nbevent,
t.id AS type,
CASE WHEN c.state IN ('draft') THEN c.nb_register ELSE 0 END AS draft_state,
CASE WHEN c.state IN ('open','done') THEN c.nb_register ELSE 0 END AS confirm_state,
CASE WHEN c.state IN ('done') THEN c.price_subtotal ELSE 0 END AS total,
e.register_max AS register_max,
e.state AS state
FROM
event_event e
LEFT JOIN
event_registration c ON (e.id=c.event_id)
LEFT JOIN
event_type t ON (e.type=t.id)
WHERE c.active = 'true'
GROUP BY
to_char(e.date_begin, 'YYYY'),
to_char(e.date_begin, 'MM'),
c.state,
c.nb_register,
t.id, e.id, e.date_begin, e.main_speaker_id,
e.register_max, e.type, e.state, c.event_id, e.user_id,e.company_id,e.product_id,e.section_id,
to_char(e.date_begin, 'YYYY-MM-DD'), c.id, c.price_subtotal )AS foo
GROUP BY
id,
event_id,
date,
user_id,
section_id,
company_id,
product_id,
speaker_id,
year,
month,
nbevent,
type,
register_max,
state
)
""")
report_event_registration()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
thomastu/django-wiki | refs/heads/master | wiki/views/article.py | 7 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import difflib
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.db import transaction
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template.context import RequestContext
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView, View, RedirectView
from django.views.generic.edit import FormView
from django.views.generic.list import ListView
from wiki.views.mixins import ArticleMixin
from wiki import editors, forms, models
from wiki.conf import settings
from wiki.core.plugins import registry as plugin_registry
from wiki.core.diff import simple_merge
from wiki.decorators import get_article, json_view
from django.core.urlresolvers import reverse
from wiki.core.exceptions import NoRootURL
from wiki.core import permissions
from django.http import Http404
from six.moves import range
log = logging.getLogger(__name__)
class ArticleView(ArticleMixin, TemplateView):
template_name = "wiki/view.html"
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, *args, **kwargs):
return super(
ArticleView,
self).dispatch(
request,
article,
*args,
**kwargs)
def get_context_data(self, **kwargs):
kwargs['selected_tab'] = 'view'
return ArticleMixin.get_context_data(self, **kwargs)
class Create(FormView, ArticleMixin):
form_class = forms.CreateForm
template_name = "wiki/create.html"
@method_decorator(get_article(can_write=True, can_create=True))
def dispatch(self, request, article, *args, **kwargs):
return super(Create, self).dispatch(request, article, *args, **kwargs)
def get_form(self, form_class):
"""
Returns an instance of the form to be used in this view.
"""
kwargs = self.get_form_kwargs()
initial = kwargs.get('initial', {})
initial['slug'] = self.request.GET.get('slug', None)
kwargs['initial'] = initial
form = form_class(self.request, self.urlpath, **kwargs)
form.fields['slug'].widget = forms.TextInputPrepend(
prepend='/' + self.urlpath.path,
attrs={
# Make patterns force lowercase if we are case insensitive to bless the user with a
# bit of strictness, anyways
'pattern': '[a-z0-9_-]+' if not settings.URL_CASE_SENSITIVE else '[a-zA-Z0-9_-]+',
'title': 'Lowercase letters, numbers, hyphens and underscores' if not settings.URL_CASE_SENSITIVE else 'Letters, numbers, hyphens and underscores',
}
)
return form
def form_valid(self, form):
user = None
ip_address = None
if not self.request.user.is_anonymous():
user = self.request.user
if settings.LOG_IPS_USERS:
ip_address = self.request.META.get('REMOTE_ADDR', None)
elif settings.LOG_IPS_ANONYMOUS:
ip_address = self.request.META.get('REMOTE_ADDR', None)
try:
self.newpath = models.URLPath.create_article(
self.urlpath,
form.cleaned_data['slug'],
title=form.cleaned_data['title'],
content=form.cleaned_data['content'],
user_message=form.cleaned_data['summary'],
user=user,
ip_address=ip_address,
article_kwargs={'owner': user,
'group': self.article.group,
'group_read': self.article.group_read,
'group_write': self.article.group_write,
'other_read': self.article.other_read,
'other_write': self.article.other_write,
})
messages.success(
self.request,
_("New article '%s' created.") %
self.newpath.article.current_revision.title)
# TODO: Handle individual exceptions better and give good feedback.
except Exception as e:
log.exception("Exception creating article.")
if self.request.user.is_superuser:
messages.error(
self.request,
_("There was an error creating this article: %s") %
str(e))
else:
messages.error(
self.request,
_("There was an error creating this article."))
return redirect('wiki:get', '')
url = self.get_success_url()
return url
def get_success_url(self):
return redirect('wiki:get', self.newpath.path)
def get_context_data(self, **kwargs):
c = ArticleMixin.get_context_data(self, **kwargs)
c['parent_urlpath'] = self.urlpath
c['parent_article'] = self.article
c['create_form'] = kwargs.pop('form', None)
c['editor'] = editors.getEditor()
return c
class Delete(FormView, ArticleMixin):
form_class = forms.DeleteForm
template_name = "wiki/delete.html"
@method_decorator(
get_article(
can_write=True,
not_locked=True,
can_delete=True))
def dispatch(self, request, article, *args, **kwargs):
return self.dispatch1(request, article, *args, **kwargs)
def dispatch1(self, request, article, *args, **kwargs):
"""Deleted view needs to access this method without a decorator,
therefore it is separate."""
urlpath = kwargs.get('urlpath', None)
# Where to go after deletion...
self.next = ""
self.cannot_delete_root = False
if urlpath and urlpath.parent:
self.next = reverse(
'wiki:get',
kwargs={
'path': urlpath.parent.path})
elif urlpath:
# We are a urlpath with no parent. This is the root
self.cannot_delete_root = True
else:
# We have no urlpath. Get it if a urlpath exists
for art_obj in article.articleforobject_set.filter(is_mptt=True):
if art_obj.content_object.parent:
self.next = reverse(
'wiki:get', kwargs={
'article_id': art_obj.content_object.parent.article.id})
else:
self.cannot_delete_root = True
return super(Delete, self).dispatch(request, article, *args, **kwargs)
def get_initial(self):
return {'revision': self.article.current_revision}
def get_form(self, form_class):
form = super(Delete, self).get_form(form_class)
if self.article.can_moderate(self.request.user):
form.fields['purge'].widget = forms.forms.CheckboxInput()
return form
def get_form_kwargs(self):
kwargs = FormView.get_form_kwargs(self)
kwargs['article'] = self.article
kwargs['has_children'] = bool(self.children_slice)
return kwargs
def form_valid(self, form):
cd = form.cleaned_data
purge = cd['purge']
# If we are purging, only moderators can delete articles with children
cannot_delete_children = False
can_moderate = self.article.can_moderate(self.request.user)
if purge and self.children_slice and not can_moderate:
cannot_delete_children = True
if self.cannot_delete_root or cannot_delete_children:
messages.error(
self.request,
_('This article cannot be deleted because it has children or is a root article.'))
return redirect('wiki:get', article_id=self.article.id)
if can_moderate and purge:
# First, remove children
if self.urlpath:
self.urlpath.delete_subtree()
self.article.delete()
messages.success(
self.request,
_('This article together with all its contents are now completely gone! Thanks!'))
else:
revision = models.ArticleRevision()
revision.inherit_predecessor(self.article)
revision.set_from_request(self.request)
revision.deleted = True
self.article.add_revision(revision)
messages.success(
self.request,
_('The article "%s" is now marked as deleted! Thanks for keeping the site free from unwanted material!') %
revision.title)
return self.get_success_url()
def get_success_url(self):
return redirect(self.next)
def get_context_data(self, **kwargs):
cannot_delete_children = False
if self.children_slice and not self.article.can_moderate(
self.request.user):
cannot_delete_children = True
kwargs['delete_form'] = kwargs.pop('form', None)
kwargs['cannot_delete_root'] = self.cannot_delete_root
kwargs['delete_children'] = self.children_slice[:20]
kwargs['delete_children_more'] = len(self.children_slice) > 20
kwargs['cannot_delete_children'] = cannot_delete_children
return super(Delete, self).get_context_data(**kwargs)
class Edit(FormView, ArticleMixin):
"""Edit an article and process sidebar plugins."""
form_class = forms.EditForm
template_name = "wiki/edit.html"
@method_decorator(get_article(can_write=True, not_locked=True))
def dispatch(self, request, article, *args, **kwargs):
self.sidebar_plugins = plugin_registry.get_sidebar()
self.sidebar = []
return super(Edit, self).dispatch(request, article, *args, **kwargs)
def get_initial(self):
initial = FormView.get_initial(self)
for field_name in ['title', 'content']:
session_key = 'unsaved_article_%s_%d' % (
field_name, self.article.id)
if session_key in list(self.request.session.keys()):
content = self.request.session[session_key]
initial[field_name] = content
del self.request.session[session_key]
return initial
def get_form(self, form_class):
"""
Checks from querystring data that the edit form is actually being saved,
otherwise removes the 'data' and 'files' kwargs from form initialisation.
"""
kwargs = self.get_form_kwargs()
if self.request.POST.get(
'save',
'') != '1' and self.request.POST.get('preview') != '1':
kwargs['data'] = None
kwargs['files'] = None
kwargs['no_clean'] = True
return form_class(
self.request,
self.article.current_revision,
**kwargs)
def get_sidebar_form_classes(self):
"""Returns dictionary of form classes for the sidebar. If no form class is
specified, puts None in dictionary. Keys in the dictionary are used
to identify which form is being saved."""
form_classes = {}
for cnt, plugin in enumerate(self.sidebar_plugins):
form_classes[
'form%d' %
cnt] = (
plugin,
plugin.sidebar.get(
'form_class',
None))
return form_classes
def get(self, request, *args, **kwargs):
# Generate sidebar forms
self.sidebar_forms = []
for form_id, (plugin, Form) in list(
self.get_sidebar_form_classes().items()):
if Form:
form = Form(self.article, self.request.user)
setattr(form, 'form_id', form_id)
else:
form = None
self.sidebar.append((plugin, form))
return super(Edit, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
# Generate sidebar forms
self.sidebar_forms = []
for form_id, (plugin, Form) in list(
self.get_sidebar_form_classes().items()):
if Form:
if form_id == self.request.GET.get('f', None):
form = Form(
self.article,
self.request,
data=self.request.POST,
files=self.request.FILES)
if form.is_valid():
form.save()
usermessage = form.get_usermessage()
if usermessage:
messages.success(self.request, usermessage)
else:
messages.success(
self.request,
_('Your changes were saved.'))
title = form.cleaned_data['unsaved_article_title']
content = form.cleaned_data['unsaved_article_content']
if title != self.article.current_revision.title or content != self.article.current_revision.content:
request.session[
'unsaved_article_title_%d' %
self.article.id] = title
request.session[
'unsaved_article_content_%d' %
self.article.id] = content
messages.warning(
request,
_('Please note that your article text has not yet been saved!'))
if self.urlpath:
return redirect(
'wiki:edit',
path=self.urlpath.path)
return redirect(
'wiki:edit',
article_id=self.article.id)
else:
form = Form(self.article, self.request)
setattr(form, 'form_id', form_id)
else:
form = None
self.sidebar.append((plugin, form))
return super(Edit, self).post(request, *args, **kwargs)
def form_valid(self, form):
"""Create a new article revision when the edit form is valid
(does not concern any sidebar forms!)."""
revision = models.ArticleRevision()
revision.inherit_predecessor(self.article)
revision.title = form.cleaned_data['title']
revision.content = form.cleaned_data['content']
revision.user_message = form.cleaned_data['summary']
revision.deleted = False
revision.set_from_request(self.request)
self.article.add_revision(revision)
messages.success(
self.request,
_('A new revision of the article was succesfully added.'))
return self.get_success_url()
def get_success_url(self):
"""Go to the article view page when the article has been saved"""
if self.urlpath:
return redirect("wiki:get", path=self.urlpath.path)
return redirect('wiki:get', article_id=self.article.id)
def get_context_data(self, **kwargs):
kwargs['edit_form'] = kwargs.pop('form', None)
kwargs['editor'] = editors.getEditor()
kwargs['selected_tab'] = 'edit'
kwargs['sidebar'] = self.sidebar
return super(Edit, self).get_context_data(**kwargs)
class Deleted(Delete):
"""Tell a user that an article has been deleted. If user has permissions,
let user restore and possibly purge the deleted article and children."""
template_name = "wiki/deleted.html"
form_class = forms.DeleteForm
@method_decorator(get_article(can_read=True, deleted_contents=True))
def dispatch(self, request, article, *args, **kwargs):
self.urlpath = kwargs.get('urlpath', None)
self.article = article
if self.urlpath:
deleted_ancestor = self.urlpath.first_deleted_ancestor()
if deleted_ancestor is None:
# No one is deleted!
return redirect('wiki:get', path=self.urlpath.path)
elif deleted_ancestor != self.urlpath:
# An ancestor was deleted, so redirect to that deleted page
return redirect('wiki:deleted', path=deleted_ancestor.path)
else:
if not article.current_revision.deleted:
return redirect('wiki:get', article_id=article.id)
# Restore
if request.GET.get('restore', False):
can_restore = not article.current_revision.locked and article.can_delete(
request.user)
can_restore = can_restore or article.can_moderate(request.user)
if can_restore:
revision = models.ArticleRevision()
revision.inherit_predecessor(self.article)
revision.set_from_request(request)
revision.deleted = False
revision.automatic_log = _('Restoring article')
self.article.add_revision(revision)
messages.success(
request,
_('The article "%s" and its children are now restored.') %
revision.title)
if self.urlpath:
return redirect('wiki:get', path=self.urlpath.path)
else:
return redirect('wiki:get', article_id=article.id)
return super(
Deleted,
self).dispatch1(
request,
article,
*args,
**kwargs)
def get_initial(self):
return {'revision': self.article.current_revision,
'purge': True}
def get_form(self, form_class):
form = super(Delete, self).get_form(form_class)
return form
def get_context_data(self, **kwargs):
kwargs['purge_form'] = kwargs.pop('form', None)
return super(Delete, self).get_context_data(**kwargs)
class Source(ArticleMixin, TemplateView):
template_name = "wiki/source.html"
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, *args, **kwargs):
return super(Source, self).dispatch(request, article, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['selected_tab'] = 'source'
return ArticleMixin.get_context_data(self, **kwargs)
class History(ListView, ArticleMixin):
template_name = "wiki/history.html"
allow_empty = True
context_object_name = 'revisions'
paginate_by = 10
def get_queryset(self):
return models.ArticleRevision.objects.filter(
article=self.article).order_by('-created')
def get_context_data(self, **kwargs):
# Is this a bit of a hack? Use better inheritance?
kwargs_article = ArticleMixin.get_context_data(self, **kwargs)
kwargs_listview = ListView.get_context_data(self, **kwargs)
kwargs.update(kwargs_article)
kwargs.update(kwargs_listview)
kwargs['selected_tab'] = 'history'
return kwargs
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, *args, **kwargs):
return super(History, self).dispatch(request, article, *args, **kwargs)
class Dir(ListView, ArticleMixin):
template_name = "wiki/dir.html"
allow_empty = True
context_object_name = 'directory'
model = models.URLPath
paginate_by = 30
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, *args, **kwargs):
self.filter_form = forms.DirFilterForm(request.GET)
if self.filter_form.is_valid():
self.query = self.filter_form.cleaned_data['query']
else:
self.query = None
return super(Dir, self).dispatch(request, article, *args, **kwargs)
def get_queryset(self):
children = self.urlpath.get_children().can_read(self.request.user)
if self.query:
children = children.filter(
Q(article__current_revision__title__contains=self.query) |
Q(slug__contains=self.query))
if not self.article.can_moderate(self.request.user):
children = children.active()
children = children.select_related_common().order_by(
'article__current_revision__title')
return children
def get_context_data(self, **kwargs):
kwargs_article = ArticleMixin.get_context_data(self, **kwargs)
kwargs_listview = ListView.get_context_data(self, **kwargs)
kwargs.update(kwargs_article)
kwargs.update(kwargs_listview)
kwargs['filter_query'] = self.query
kwargs['filter_form'] = self.filter_form
# Update each child's ancestor cache so the lookups don't have
# to be repeated.
updated_children = kwargs[self.context_object_name]
for child in updated_children:
child.set_cached_ancestors_from_parent(self.urlpath)
kwargs[self.context_object_name] = updated_children
return kwargs
class SearchView(ListView):
template_name = "wiki/search.html"
paginate_by = 25
context_object_name = "articles"
def dispatch(self, request, *args, **kwargs):
# Do not allow anonymous users to search if they cannot read content
if request.user.is_anonymous() and not settings.ANONYMOUS:
return redirect(settings.LOGIN_URL)
self.search_form = forms.SearchForm(request.GET)
if self.search_form.is_valid():
self.query = self.search_form.cleaned_data['q']
else:
self.query = None
return super(SearchView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
if not self.query:
return models.Article.objects.none()
articles = models.Article.objects.filter(
Q(current_revision__title__icontains=self.query) |
Q(current_revision__content__icontains=self.query))
if not permissions.can_moderate(
models.URLPath.root().article,
self.request.user):
articles = articles.active().can_read(self.request.user)
return articles
def get_context_data(self, **kwargs):
kwargs = ListView.get_context_data(self, **kwargs)
kwargs['search_form'] = self.search_form
kwargs['search_query'] = self.query
return kwargs
class Plugin(View):
def dispatch(self, request, path=None, slug=None, **kwargs):
kwargs['path'] = path
for plugin in list(plugin_registry.get_plugins().values()):
if getattr(plugin, 'slug', None) == slug:
return plugin.article_view(request, **kwargs)
raise Http404()
class Settings(ArticleMixin, TemplateView):
permission_form_class = forms.PermissionsForm
template_name = "wiki/settings.html"
@method_decorator(login_required)
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, *args, **kwargs):
return super(
Settings,
self).dispatch(
request,
article,
*args,
**kwargs)
def get_form_classes(self,):
"""
Return all settings forms that can be filled in
"""
settings_forms = []
if permissions.can_change_permissions(self.article, self.request.user):
settings_forms.append(self.permission_form_class)
plugin_forms = [F for F in plugin_registry.get_settings_forms()]
plugin_forms.sort(key=lambda form: form.settings_order)
settings_forms += plugin_forms
for i in range(len(settings_forms)):
# TODO: Do not set an attribute on a form class - this
# could be mixed up with a different instance
# Use strategy from Edit view...
setattr(settings_forms[i], 'action', 'form%d' % i)
return settings_forms
def post(self, *args, **kwargs):
self.forms = []
for Form in self.get_form_classes():
if Form.action == self.request.GET.get('f', None):
form = Form(self.article, self.request, self.request.POST)
if form.is_valid():
form.save()
usermessage = form.get_usermessage()
if usermessage:
messages.success(self.request, usermessage)
if self.urlpath:
return redirect(
'wiki:settings',
path=self.urlpath.path)
return redirect(
'wiki:settings',
article_id=self.article.id)
else:
form = Form(self.article, self.request)
self.forms.append(form)
return super(Settings, self).get(*args, **kwargs)
def get(self, *args, **kwargs):
self.forms = []
# There is a bug where articles fetched with select_related have bad boolean field https://code.djangoproject.com/ticket/15040
# We fetch a fresh new article for this reason
new_article = models.Article.objects.get(id=self.article.id)
for Form in self.get_form_classes():
self.forms.append(Form(new_article, self.request))
return super(Settings, self).get(*args, **kwargs)
def get_success_url(self):
if self.urlpath:
return redirect('wiki:settings', path=self.urlpath.path)
return redirect('wiki:settings', article_id=self.article.id)
def get_context_data(self, **kwargs):
kwargs['selected_tab'] = 'settings'
kwargs['forms'] = self.forms
return super(Settings, self).get_context_data(**kwargs)
class ChangeRevisionView(RedirectView):
permanent = False
@method_decorator(get_article(can_write=True, not_locked=True))
def dispatch(self, request, article, *args, **kwargs):
self.article = article
self.urlpath = kwargs.pop('kwargs', False)
self.change_revision()
return super(
ChangeRevisionView,
self).dispatch(
request,
*args,
**kwargs)
def get_redirect_url(self, **kwargs):
if self.urlpath:
return reverse("wiki:history", kwargs={'path': self.urlpath.path})
else:
return reverse(
'wiki:history',
kwargs={
'article_id': self.article.id})
def change_revision(self):
revision = get_object_or_404(
models.ArticleRevision,
article=self.article,
id=self.kwargs['revision_id'])
self.article.current_revision = revision
self.article.save()
messages.success(
self.request,
_("The article %(title)s is now set to display revision #%(revision_number)d") % {
'title': revision.title,
'revision_number': revision.revision_number,
})
class Preview(ArticleMixin, TemplateView):
template_name = "wiki/preview_inline.html"
@method_decorator(get_article(can_read=True, deleted_contents=True))
def dispatch(self, request, article, *args, **kwargs):
revision_id = request.GET.get('r', None)
self.title = None
self.content = None
self.preview = False
if revision_id:
self.revision = get_object_or_404(
models.ArticleRevision,
article=article,
id=revision_id)
else:
self.revision = None
return super(Preview, self).dispatch(request, article, *args, **kwargs)
def post(self, request, *args, **kwargs):
edit_form = forms.EditForm(
request,
self.article.current_revision,
request.POST,
preview=True)
if edit_form.is_valid():
self.title = edit_form.cleaned_data['title']
self.content = edit_form.cleaned_data['content']
self.preview = True
return super(Preview, self).get(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
if self.revision and not self.title:
self.title = self.revision.title
if self.revision and not self.content:
self.content = self.revision.content
return super(Preview, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['title'] = self.title
kwargs['revision'] = self.revision
kwargs['content'] = self.content
kwargs['preview'] = self.preview
return ArticleMixin.get_context_data(self, **kwargs)
@json_view
def diff(request, revision_id, other_revision_id=None):
revision = get_object_or_404(models.ArticleRevision, id=revision_id)
if not other_revision_id:
other_revision = revision.previous_revision
baseText = other_revision.content if other_revision else ""
newText = revision.content
differ = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = differ.compare(baseText.splitlines(1), newText.splitlines(1))
other_changes = []
if not other_revision or other_revision.title != revision.title:
other_changes.append((_('New title'), revision.title))
return dict(diff=list(diff), other_changes=other_changes)
# TODO: Throw in a class-based view
@get_article(can_write=True)
def merge(
request,
article,
revision_id,
urlpath=None,
template_file="wiki/preview_inline.html",
preview=False):
revision = get_object_or_404(
models.ArticleRevision,
article=article,
id=revision_id)
current_text = article.current_revision.content if article.current_revision else ""
new_text = revision.content
content = simple_merge(current_text, new_text)
# Save new revision
if not preview:
old_revision = article.current_revision
if revision.deleted:
c = RequestContext(
request,
{'error_msg': _('You cannot merge with a deleted revision'),
'article': article, 'urlpath': urlpath})
return render_to_response("wiki/error.html", context_instance=c)
new_revision = models.ArticleRevision()
new_revision.inherit_predecessor(article)
new_revision.deleted = False
new_revision.locked = False
new_revision.title = article.current_revision.title
new_revision.content = content
new_revision.automatic_log = (
_('Merge between revision #%(r1)d and revision #%(r2)d') % {
'r1': revision.revision_number,
'r2': old_revision.revision_number})
article.add_revision(new_revision, save=True)
old_revision.simpleplugin_set.all().update(
article_revision=new_revision)
revision.simpleplugin_set.all().update(article_revision=new_revision)
messages.success(
request,
_('A new revision was created: Merge between revision #%(r1)d and revision #%(r2)d') % {
'r1': revision.revision_number,
'r2': old_revision.revision_number})
if urlpath:
return redirect('wiki:edit', path=urlpath.path)
else:
return redirect('wiki:edit', article_id=article.id)
c = RequestContext(request, {'article': article,
'title': article.current_revision.title,
'revision': None,
'merge1': revision,
'merge2': article.current_revision,
'merge': True,
'content': content})
return render_to_response(template_file, context_instance=c)
class CreateRootView(FormView):
form_class = forms.CreateRootForm
template_name = 'wiki/create_root.html'
def dispatch(self, request, *args, **kwargs):
if not request.user.is_superuser:
return redirect("wiki:root_missing")
try:
root = models.URLPath.root()
except NoRootURL:
pass
else:
if root.article:
return redirect('wiki:get', path=root.path)
# TODO: This is too dangerous... let's say there is no root.article and we end up here,
# then it might cascade to delete a lot of things on an existing
# installation.... / benjaoming
root.delete()
return super(CreateRootView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
models.URLPath.create_root(
title=form.cleaned_data["title"],
content=form.cleaned_data["content"],
request=self.request
)
return redirect("wiki:root")
def get_context_data(self, **kwargs):
data = super(CreateRootView, self).get_context_data(**kwargs)
data['editor'] = editors.getEditor()
return data
class MissingRootView(TemplateView):
template_name = 'wiki/root_missing.html'
|
dgarros/ansible | refs/heads/devel | lib/ansible/inventory/manager.py | 11 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
import os
import re
import itertools
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
from ansible.inventory.data import InventoryData
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins import PluginLoader
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
IGNORED_ALWAYS = [b"^\.", b"^host_vars$", b"^group_vars$", b"^vars_plugins$"]
IGNORED_PATTERNS = [to_bytes(x) for x in C.INVENTORY_IGNORE_PATTERNS]
IGNORED_EXTS = [b'%s$' % to_bytes(re.escape(x)) for x in C.INVENTORY_IGNORE_EXTS]
IGNORED = re.compile(b'|'.join(IGNORED_ALWAYS + IGNORED_PATTERNS + IGNORED_EXTS))
def order_patterns(patterns):
''' takes a list of patterns and reorders them by modifier to apply them consistently '''
# FIXME: this goes away if we apply patterns incrementally or by groups
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
for p in patterns:
if p.startswith("!"):
pattern_exclude.append(p)
elif p.startswith("&"):
pattern_intersection.append(p)
elif p:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
return pattern_regular + pattern_intersection + pattern_exclude
def split_host_pattern(pattern):
"""
Takes a string containing host patterns separated by commas (or a list
thereof) and returns a list of single patterns (which may not contain
commas). Whitespace is ignored.
Also accepts ':' as a separator for backwards compatibility, but it is
not recommended due to the conflict with IPv6 addresses and host ranges.
Example: 'a,b[1], c[2:3] , d' -> ['a', 'b[1]', 'c[2:3]', 'd']
"""
if isinstance(pattern, list):
return list(itertools.chain(*map(split_host_pattern, pattern)))
# If it's got commas in it, we'll treat it as a straightforward
# comma-separated list of patterns.
elif ',' in pattern:
patterns = re.split('\s*,\s*', pattern)
# If it doesn't, it could still be a single pattern. This accounts for
# non-separator uses of colons: IPv6 addresses and [x:y] host ranges.
else:
try:
(base, port) = parse_address(pattern, allow_ranges=True)
patterns = [pattern]
except:
# The only other case we accept is a ':'-separated list of patterns.
# This mishandles IPv6 addresses, and is retained only for backwards
# compatibility.
patterns = re.findall(
r'''(?: # We want to match something comprising:
[^\s:\[\]] # (anything other than whitespace or ':[]'
| # ...or...
\[[^\]]*\] # a single complete bracketed expression)
)+ # occurring once or more
''', pattern, re.X
)
return [p.strip() for p in patterns]
class InventoryManager(object):
''' Creates and manages inventory '''
def __init__(self, loader, sources=None):
# base objects
self._loader = loader
self._inventory = InventoryData()
# a list of host(names) to contain current inquiries to
self._restriction = None
self._subset = None
# caches
self._hosts_patterns_cache = {} # resolved full patterns
self._pattern_cache = {} # resolved individual patterns
self._inventory_plugins = [] # for generating inventory
# the inventory dirs, files, script paths or lists of hosts
if sources is None:
self._sources = []
elif isinstance(sources, string_types):
self._sources = [sources]
else:
self._sources = sources
# get to work!
self.parse_sources()
@property
def localhost(self):
return self._inventory.localhost
@property
def groups(self):
return self._inventory.groups
@property
def hosts(self):
return self._inventory.hosts
def get_vars(self, *args, **kwargs):
return self._inventory.get_vars(args, kwargs)
def add_host(self, host, group=None, port=None):
return self._inventory.add_host(host, group, port)
def add_group(self, group):
return self._inventory.add_group(group)
def get_groups_dict(self):
return self._inventory.get_groups_dict()
def reconcile_inventory(self):
return self._inventory.reconcile_inventory()
def get_host(self, hostname):
return self._inventory.get_host(hostname)
def _setup_inventory_plugins(self):
''' sets up loaded inventory plugins for usage '''
inventory_loader = PluginLoader('InventoryModule', 'ansible.plugins.inventory', 'inventory_plugins', 'inventory_plugins')
display.vvvv('setting up inventory plugins')
for name in C.INVENTORY_ENABLED:
plugin = inventory_loader.get(name)
if plugin:
self._inventory_plugins.append(plugin)
else:
display.warning('Failed to load inventory plugin, skipping %s' % name)
if not self._inventory_plugins:
raise AnsibleError("No inventory plugins available to generate inventory, make sure you have at least one whitelisted.")
def parse_sources(self, cache=True):
''' iterate over inventory sources and parse each one to populate it'''
self._setup_inventory_plugins()
parsed = False
# allow for multiple inventory parsing
for source in self._sources:
if source:
if ',' not in source:
source = unfrackpath(source, follow=False)
parse = self.parse_source(source, cache=cache)
if parse and not parsed:
parsed = True
if parsed:
# do post processing
self._inventory.reconcile_inventory()
else:
display.warning("No inventory was parsed, only implicit localhost is available")
self._inventory_plugins = []
def parse_source(self, source, cache=True):
''' Generate or update inventory for the source provided '''
parsed = False
display.debug(u'Examining possible inventory source: %s' % source)
b_source = to_bytes(source)
# process directories as a collection of inventories
if os.path.isdir(b_source):
display.debug(u'Searching for inventory files in directory: %s' % source)
for i in sorted(os.listdir(b_source)):
display.debug(u'Considering %s' % i)
# Skip hidden files and stuff we explicitly ignore
if IGNORED.search(i):
continue
# recursively deal with directory entries
fullpath = os.path.join(b_source, i)
parsed_this_one = self.parse_source(to_text(fullpath))
display.debug(u'parsed %s as %s' % (fullpath, parsed_this_one))
if not parsed:
parsed = parsed_this_one
else:
# left with strings or files, let plugins figure it out
# set so new hosts can use for inventory_file/dir vasr
self._inventory.current_source = source
# get inventory plugins if needed, there should always be at least one generator
if not self._inventory_plugins:
self._setup_inventory_plugins()
# try source with each plugin
failures = []
for plugin in self._inventory_plugins:
plugin_name = to_text(getattr(plugin, '_load_name', getattr(plugin, '_original_path', '')))
display.debug(u'Attempting to use plugin %s' % plugin_name)
# initialize
if plugin.verify_file(source):
try:
plugin.parse(self._inventory, self._loader, source, cache=cache)
parsed = True
display.vvv(u'Parsed %s inventory source with %s plugin' % (to_text(source), plugin_name))
break
except AnsibleParserError as e:
failures.append(u'\n* Failed to parse %s with %s inventory plugin: %s\n' % (to_text(source), plugin_name, to_text(e)))
else:
display.debug(u'%s did not meet %s requirements' % (to_text(source), plugin_name))
else:
if failures:
# only if no plugin processed files should we show errors.
for fail in failures:
display.warning(fail)
if not parsed:
display.warning(u"Unable to parse %s as an inventory source" % to_text(source))
# clear up, jic
self._inventory.current_source = None
return parsed
def clear_caches(self):
''' clear all caches '''
self._hosts_patterns_cache = {}
self._pattern_cache = {}
# FIXME: flush inventory cache
def refresh_inventory(self):
''' recalculate inventory '''
self.clear_caches()
self._inventory = InventoryData()
self.parse_sources(cache=False)
def _match(self, string, pattern_str):
try:
if pattern_str.startswith('~'):
return re.search(pattern_str[1:], string)
else:
return fnmatch.fnmatch(string, pattern_str)
except Exception as e:
raise AnsibleError('invalid host pattern (%s): %s' % (pattern_str, str(e)))
def _match_list(self, items, item_attr, pattern_str):
results = []
try:
if not pattern_str.startswith('~'):
pattern = re.compile(fnmatch.translate(pattern_str))
else:
pattern = re.compile(pattern_str[1:])
except Exception:
raise AnsibleError('invalid host list pattern: %s' % pattern_str)
for item in items:
if pattern.match(getattr(item, item_attr)):
results.append(item)
return results
def get_hosts(self, pattern="all", ignore_limits=False, ignore_restrictions=False, order=None):
"""
Takes a pattern or list of patterns and returns a list of matching
inventory host names, taking into account any active restrictions
or applied subsets
"""
# Check if pattern already computed
if isinstance(pattern, list):
pattern_hash = u":".join(pattern)
else:
pattern_hash = pattern
if not ignore_limits and self._subset:
pattern_hash += u":%s" % to_text(self._subset)
if not ignore_restrictions and self._restriction:
pattern_hash += u":%s" % to_text(self._restriction)
if pattern_hash not in self._hosts_patterns_cache:
patterns = split_host_pattern(pattern)
hosts = self._evaluate_patterns(patterns)
# mainly useful for hostvars[host] access
if not ignore_limits and self._subset:
# exclude hosts not in a subset, if defined
subset = self._evaluate_patterns(self._subset)
hosts = [h for h in hosts if h in subset]
if not ignore_restrictions and self._restriction:
# exclude hosts mentioned in any restriction (ex: failed hosts)
hosts = [h for h in hosts if h.name in self._restriction]
seen = set()
self._hosts_patterns_cache[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)]
# sort hosts list if needed (should only happen when called from strategy)
if order in ['sorted', 'reverse_sorted']:
from operator import attrgetter
hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], key=attrgetter('name'), reverse=(order == 'reverse_sorted'))
elif order == 'reverse_inventory':
hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], reverse=True)
else:
hosts = self._hosts_patterns_cache[pattern_hash][:]
if order == 'shuffle':
from random import shuffle
shuffle(hosts)
elif order not in [None, 'inventory']:
AnsibleOptionsError("Invalid 'order' specified for inventory hosts: %s" % order)
return hosts
def _evaluate_patterns(self, patterns):
"""
Takes a list of patterns and returns a list of matching host names,
taking into account any negative and intersection patterns.
"""
patterns = order_patterns(patterns)
hosts = []
for p in patterns:
# avoid resolving a pattern that is a plain host
if p in self._inventory.hosts:
hosts.append(self._inventory.get_host(p))
else:
that = self._match_one_pattern(p)
if p.startswith("!"):
hosts = [h for h in hosts if h not in that]
elif p.startswith("&"):
hosts = [h for h in hosts if h in that]
else:
to_append = [h for h in that if h.name not in [y.name for y in hosts]]
hosts.extend(to_append)
return hosts
def _match_one_pattern(self, pattern):
"""
Takes a single pattern and returns a list of matching host names.
Ignores intersection (&) and exclusion (!) specifiers.
The pattern may be:
1. A regex starting with ~, e.g. '~[abc]*'
2. A shell glob pattern with ?/*/[chars]/[!chars], e.g. 'foo*'
3. An ordinary word that matches itself only, e.g. 'foo'
The pattern is matched using the following rules:
1. If it's 'all', it matches all hosts in all groups.
2. Otherwise, for each known group name:
(a) if it matches the group name, the results include all hosts
in the group or any of its children.
(b) otherwise, if it matches any hosts in the group, the results
include the matching hosts.
This means that 'foo*' may match one or more groups (thus including all
hosts therein) but also hosts in other groups.
The built-in groups 'all' and 'ungrouped' are special. No pattern can
match these group names (though 'all' behaves as though it matches, as
described above). The word 'ungrouped' can match a host of that name,
and patterns like 'ungr*' and 'al*' can match either hosts or groups
other than all and ungrouped.
If the pattern matches one or more group names according to these rules,
it may have an optional range suffix to select a subset of the results.
This is allowed only if the pattern is not a regex, i.e. '~foo[1]' does
not work (the [1] is interpreted as part of the regex), but 'foo*[1]'
would work if 'foo*' matched the name of one or more groups.
Duplicate matches are always eliminated from the results.
"""
if pattern.startswith("&") or pattern.startswith("!"):
pattern = pattern[1:]
if pattern not in self._pattern_cache:
(expr, slice) = self._split_subscript(pattern)
hosts = self._enumerate_matches(expr)
try:
hosts = self._apply_subscript(hosts, slice)
except IndexError:
raise AnsibleError("No hosts matched the subscripted pattern '%s'" % pattern)
self._pattern_cache[pattern] = hosts
return self._pattern_cache[pattern]
def _split_subscript(self, pattern):
"""
Takes a pattern, checks if it has a subscript, and returns the pattern
without the subscript and a (start,end) tuple representing the given
subscript (or None if there is no subscript).
Validates that the subscript is in the right syntax, but doesn't make
sure the actual indices make sense in context.
"""
# Do not parse regexes for enumeration info
if pattern.startswith('~'):
return (pattern, None)
# We want a pattern followed by an integer or range subscript.
# (We can't be more restrictive about the expression because the
# fnmatch semantics permit [\[:\]] to occur.)
pattern_with_subscript = re.compile(
r'''^
(.+) # A pattern expression ending with...
\[(?: # A [subscript] expression comprising:
(-?[0-9]+)| # A single positive or negative number
([0-9]+)([:-]) # Or an x:y or x: range.
([0-9]*)
)\]
$
''', re.X
)
subscript = None
m = pattern_with_subscript.match(pattern)
if m:
(pattern, idx, start, sep, end) = m.groups()
if idx:
subscript = (int(idx), None)
else:
if not end:
end = -1
subscript = (int(start), int(end))
if sep == '-':
display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed")
return (pattern, subscript)
def _apply_subscript(self, hosts, subscript):
"""
Takes a list of hosts and a (start,end) tuple and returns the subset of
hosts based on the subscript (which may be None to return all hosts).
"""
if not hosts or not subscript:
return hosts
(start, end) = subscript
if end:
if end == -1:
end = len(hosts) - 1
return hosts[start:end + 1]
else:
return [hosts[start]]
def _enumerate_matches(self, pattern):
"""
Returns a list of host names matching the given pattern according to the
rules explained above in _match_one_pattern.
"""
results = []
def __append_host_to_results(host):
if host.name not in results:
if not host.implicit:
results.append(host)
matched = False
for group in self._inventory.groups.values():
if self._match(to_text(group.name), pattern):
matched = True
for host in group.get_hosts():
__append_host_to_results(host)
else:
matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
if matching_hosts:
matched = True
for host in matching_hosts:
__append_host_to_results(host)
if not results and pattern in C.LOCALHOST:
# get_host autocreates implicit when needed
implicit = self._inventory.get_host(pattern)
if implicit:
results.append(implicit)
matched = True
if not matched:
display.warning("Could not match supplied host pattern, ignoring: %s" % pattern)
return results
def list_hosts(self, pattern="all"):
""" return a list of hostnames for a pattern """
# FIXME: cache?
result = [h for h in self.get_hosts(pattern)]
# allow implicit localhost if pattern matches and no other results
if len(result) == 0 and pattern in C.LOCALHOST:
result = [pattern]
return result
def list_groups(self):
# FIXME: cache?
return sorted(self._inventory.groups.keys(), key=lambda x: x)
def restrict_to_hosts(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to batch serial operations in main playbook code, don't use this for other
reasons.
"""
if restriction is None:
return
elif not isinstance(restriction, list):
restriction = [restriction]
self._restriction = [h.name for h in restriction]
def subset(self, subset_pattern):
"""
Limits inventory results to a subset of inventory that matches a given
pattern, such as to select a given geographic of numeric slice amongst
a previous 'hosts' selection that only select roles, or vice versa.
Corresponds to --limit parameter to ansible-playbook
"""
if subset_pattern is None:
self._subset = None
else:
subset_patterns = split_host_pattern(subset_pattern)
results = []
# allow Unix style @filename data
for x in subset_patterns:
if x.startswith("@"):
fd = open(x[1:])
results.extend(fd.read().split("\n"))
fd.close()
else:
results.append(x)
self._subset = results
def remove_restriction(self):
""" Do not restrict list operations """
self._restriction = None
def clear_pattern_cache(self):
self._pattern_cache = {}
|
agrista/odoo-saas | refs/heads/master | addons/l10n_fr_rib/bank.py | 335 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Numérigraphe SARL.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class res_partner_bank(osv.osv):
"""Add fields and behavior for French RIB"""
_inherit = "res.partner.bank"
def _check_key(self, cr, uid, ids):
"""Check the RIB key"""
for bank_acc in self.browse(cr, uid, ids):
# Ignore the accounts of type other than rib
if bank_acc.state != 'rib':
continue
# Fail if the needed values are empty of too short
if (not bank_acc.bank_code
or len(bank_acc.bank_code) != 5
or not bank_acc.office or len(bank_acc.office) != 5
or not bank_acc.rib_acc_number or len(bank_acc.rib_acc_number) != 11
or not bank_acc.key or len(bank_acc.key) != 2):
return False
# Get the rib data (without the key)
rib = "%s%s%s" % (bank_acc.bank_code, bank_acc.office, bank_acc.rib_acc_number)
# Translate letters into numbers according to a specific table
# (notice how s -> 2)
table = dict((ord(a), b) for a, b in zip(
u'abcdefghijklmnopqrstuvwxyz', u'12345678912345678923456789'))
rib = rib.lower().translate(table)
# compute the key
key = 97 - (100 * int(rib)) % 97
if int(bank_acc.key) != key:
raise osv.except_osv(_('Error!'),
_("The RIB key %s does not correspond to the other codes: %s %s %s.") % \
(bank_acc.key, bank_acc.bank_code, bank_acc.office, bank_acc.rib_acc_number) )
if bank_acc.acc_number:
if not self.is_iban_valid(cr, uid, bank_acc.acc_number):
raise osv.except_osv(_('Error!'), _("The IBAN %s is not valid.") % bank_acc.acc_number)
return True
def onchange_bank_id(self, cr, uid, ids, bank_id, context=None):
"""Change the bank code"""
result = super(res_partner_bank, self).onchange_bank_id(cr, uid, ids, bank_id,
context=context)
if bank_id:
value = result.setdefault('value', {})
bank = self.pool.get('res.bank').browse(cr, uid, bank_id,
context=context)
value['bank_code'] = bank.rib_code
return result
_columns = {
'acc_number': fields.char('Account Number', size=64, required=False),
'rib_acc_number': fields.char('RIB account number', size=11, readonly=True,),
'bank_code': fields.char('Bank Code', size=64, readonly=True,),
'office': fields.char('Office Code', size=5, readonly=True,),
'key': fields.char('Key', size=2, readonly=True,
help="The key is a number allowing to check the "
"correctness of the other codes."),
}
_constraints = [(_check_key, 'The RIB and/or IBAN is not valid', ['rib_acc_number', 'bank_code', 'office', 'key'])]
class res_bank(osv.osv):
"""Add the bank code to make it easier to enter RIB data"""
_inherit = 'res.bank'
def name_search(self, cr, user, name, args=None, operator='ilike',
context=None, limit=80):
"""Search by bank code in addition to the standard search"""
# Get the standard results
results = super(res_bank, self).name_search(cr, user,
name, args=args ,operator=operator, context=context, limit=limit)
# Get additional results using the RIB code
ids = self.search(cr, user, [('rib_code', operator, name)],
limit=limit, context=context)
# Merge the results
results = list(set(results + self.name_get(cr, user, ids, context)))
return results
_columns = {
'rib_code': fields.char('RIB Bank Code'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nolram/news_crawler | refs/heads/master | classificador/__init__.py | 6 | __author__ = 'nolram'
|
douggeiger/gnuradio | refs/heads/master | gr-filter/python/filter/qa_pm_remez.py | 57 | #!/usr/bin/env python
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, filter
import sys, math
# ----------------------------------------------------------------
# See optfir for an explanation of these.
def stopband_atten_to_dev (atten_db):
"""Convert a stopband attenuation in dB to an absolute value"""
return 10**(-atten_db/20)
def passband_ripple_to_dev (ripple_db):
"""Convert passband ripple spec expressed in dB to an absolute value"""
return (10**(ripple_db/20)-1)/(10**(ripple_db/20)+1)
# ----------------------------------------------------------------
def remezord (fcuts, mags, devs, fsamp = 2):
'''
FIR order estimator (lowpass, highpass, bandpass, mulitiband).
'''
# get local copies
fcuts = fcuts[:]
mags = mags[:]
devs = devs[:]
for i in range (len (fcuts)):
fcuts[i] = float (fcuts[i]) / fsamp
nf = len (fcuts)
nm = len (mags)
nd = len (devs)
nbands = nm
if nm != nd:
raise ValueError, "Length of mags and devs must be equal"
if nf != 2 * (nbands - 1):
raise ValueError, "Length of f must be 2 * len (mags) - 2"
for i in range (len (mags)):
if mags[i] != 0: # if not stopband, get relative deviation
devs[i] = devs[i] / mags[i]
# separate the passband and stopband edges
f1 = fcuts[0::2]
f2 = fcuts[1::2]
n = 0
min_delta = 2
for i in range (len (f1)):
if f2[i] - f1[i] < min_delta:
n = i
min_delta = f2[i] - f1[i]
if nbands == 2:
# lowpass or highpass case (use formula)
l = lporder (f1[n], f2[n], devs[0], devs[1])
else:
# bandpass or multipass case
# try different lowpasses and take the worst one that
# goes through the BP specs
l = 0
for i in range (1, nbands-1):
l1 = lporder (f1[i-1], f2[i-1], devs[i], devs[i-1])
l2 = lporder (f1[i], f2[i], devs[i], devs[i+1])
l = max (l, l1, l2)
n = int (math.ceil (l)) - 1 # need order, not length for remez
# cook up remez compatible result
ff = [0] + fcuts + [1]
for i in range (1, len (ff) - 1):
ff[i] *= 2
aa = []
for a in mags:
aa = aa + [a, a]
max_dev = max (devs)
wts = [1] * len(devs)
for i in range (len (wts)):
wts[i] = max_dev / devs[i]
return (n, ff, aa, wts)
def lporder (freq1, freq2, delta_p, delta_s):
'''
FIR lowpass filter length estimator.
'''
df = abs (freq2 - freq1)
ddp = math.log10 (delta_p)
dds = math.log10 (delta_s)
a1 = 5.309e-3
a2 = 7.114e-2
a3 = -4.761e-1
a4 = -2.66e-3
a5 = -5.941e-1
a6 = -4.278e-1
b1 = 11.01217
b2 = 0.5124401
t1 = a1 * ddp * ddp
t2 = a2 * ddp
t3 = a4 * ddp * ddp
t4 = a5 * ddp
dinf=((t1 + t2 + a3) * dds) + (t3 + t4 + a6)
ff = b1 + b2 * (ddp - dds)
n = dinf / df - ff * df + 1
return n
# ----------------------------------------------------------------
class test_pm_remez(gr_unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_low_pass(self):
gain = 1
Fs = 1
freq1 = 0.1
freq2 = 0.2
passband_ripple_db = 0.01
stopband_atten_db = 60
passband_dev = passband_ripple_to_dev(passband_ripple_db)
stopband_dev = stopband_atten_to_dev(stopband_atten_db)
desired_ampls = (gain, 0)
(n, fo, ao, w) = remezord([freq1, freq2], desired_ampls,
[passband_dev, stopband_dev], Fs)
new_taps = filter.pm_remez(n + 2, fo, ao, w, "bandpass")
known_taps = (-0.0008370135734511828, -0.0006622211673134374,
0.0008501079576365787, 0.003059609130249229,
0.003202235537205373, -0.001000899296974219,
-0.007589728680590891, -0.009790921118281865,
-0.001524210202628562, 0.014373535837200111,
0.02392881326993834, 0.011798133085019008,
-0.021954446348997188, -0.05293436740264934,
-0.04375787096766848, 0.028038890498420392,
0.14612655590172896, 0.25738578419108626,
0.302967004188747, 0.25738578419108626,
0.14612655590172896, 0.028038890498420392,
-0.04375787096766848, -0.05293436740264934,
-0.021954446348997188, 0.011798133085019008,
0.02392881326993834, 0.014373535837200111,
-0.001524210202628562, -0.009790921118281865,
-0.007589728680590891, -0.001000899296974219,
0.003202235537205373, 0.003059609130249229,
0.0008501079576365787, -0.0006622211673134374,
-0.0008370135734511828)
self.assertFloatTuplesAlmostEqual(known_taps, new_taps, 5)
if __name__ == '__main__':
gr_unittest.run(test_pm_remez, "test_pm_remez.xml")
|
sxjscience/mxnet | refs/heads/master | python/mxnet/_ffi/base.py | 9 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name
"""Base library for MXNet FFI.
Acknowledgement: This file originates from incubator-tvm
"""
import sys
import ctypes
import numpy as np
string_types = (str,)
integer_types = (int, np.int32)
numeric_types = integer_types + (float, np.float32)
# this function is needed for python3
# to convert ctypes.char_p .value back to python str
if sys.platform == "win32":
encoding = 'cp' + str(ctypes.cdll.kernel32.GetACP())
py_str = lambda x: x.decode(encoding)
else:
py_str = lambda x: x.decode('utf-8')
#----------------------------
# helper function in ctypes.
#----------------------------
def c_str(string):
"""Create ctypes char * from a python string
Parameters
----------
string : string type
python string
Returns
-------
str : c_char_p
A char pointer that can be passed to C API
"""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Create ctypes array from a python array
Parameters
----------
ctype : ctypes data type
data type of the array we want to convert to
values : tuple or list
data content
Returns
-------
out : ctypes array
Created ctypes array
"""
return (ctype * len(values))(*values)
|
ldjebran/robottelo | refs/heads/master | tests/foreman/api/test_reporttemplates.py | 2 | # -*- encoding: utf-8 -*-
"""Unit tests for the ``report_templates`` paths.
:Requirement: Report templates
:CaseAutomation: Automated
:CaseLevel: Component
:CaseComponent: Reporting
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from robottelo.decorators import tier1, tier2, stubbed
from robottelo.datafactory import valid_data_list
from robottelo.helpers import is_open
from robottelo.test import APITestCase
from fauxfactory import gen_string
from nailgun import entities
from requests import HTTPError
class ReportTemplateTestCase(APITestCase):
"""Tests for ``katello/api/v2/report_templates``."""
@tier1
def test_positive_CRUDL(self):
"""Create, Read, Update, Delete, List
:id: a2a577db-144e-4761-a42e-e83885464786
:setup: User with reporting access rights
:steps:
1. Create Report Template
2. List Report Templates, verify it's there
3. Read Report Template
4. Update Report Template, read again
5. Delete Report template, check it's not present
:expectedresults: All operations succeed, no template present in the end
:CaseImportance: Critical
"""
# Create
rt = None
name = None
template1 = gen_string('alpha')
for name in valid_data_list():
rt = entities.ReportTemplate(name=name, template=template1).create()
# List
res = entities.ReportTemplate().search(query={'search': u'name="{}"'.format(name)})
self.assertIn(name, list(map(lambda x: x.name, res)))
# Read
rt = entities.ReportTemplate(id=rt.id).read()
self.assertEqual(name, rt.name)
self.assertEqual(template1, rt.template)
# Update
template2 = gen_string('alpha')
entities.ReportTemplate(id=rt.id, template=template2).update(['template'])
rt = entities.ReportTemplate(id=rt.id).read()
self.assertEqual(template2, rt.template)
# Delete
entities.ReportTemplate(id=rt.id).delete()
with self.assertRaises(HTTPError):
rt = entities.ReportTemplate(id=rt.id).read()
@tier1
def test_positive_generate_report_nofilter(self):
"""Generate Host Status report
:id: a4b687db-144e-4761-a42e-e93887464986
:setup: User with reporting access rights, some report template, at least two hosts
:steps:
1. POST /api/report_templates/:id/generate
:expectedresults: Report is generated for all hosts visible to user
:CaseImportance: Critical
"""
host_name = gen_string('alpha').lower()
entities.Host(name=host_name).create()
rt = entities.ReportTemplate().search(query={'search': u'name="Host statuses"'})[0].read()
res = rt.generate()
self.assertIn("Service Level", res)
self.assertIn(host_name, res)
@tier2
def test_positive_generate_report_filter(self):
"""Generate Host Status report
:id: a4b677cb-144e-4761-a42e-e93887464986
:setup: User with reporting access rights, some report template, at least two hosts
:steps:
1. POST /api/report_templates/:id/generate ... # define input_values
:expectedresults: Report is generated (only) for the host specified by the filter
:CaseImportance: High
"""
host1_name = gen_string('alpha').lower()
host2_name = gen_string('alpha').lower()
entities.Host(name=host1_name).create()
entities.Host(name=host2_name).create()
rt = entities.ReportTemplate().search(query={'search': u'name="Host statuses"'})[0].read()
res = rt.generate(data={"input_values": {"hosts": host2_name}})
self.assertIn("Service Level", res)
self.assertNotIn(host1_name, res)
self.assertIn(host2_name, res)
@tier2
def test_positive_report_add_userinput(self):
"""Add user input to template, use it in template, generate template
:id: a4a577db-144e-4761-a42e-e86887464986
:setup: User with reporting access rights
:steps:
1. PUT /api/templates/:template_id/template_inputs/:id ... # add user input
:expectedresults: User input is assigned to the report template and used in template
:CaseImportance: High
"""
host_name = gen_string('alpha').lower()
input_name = gen_string('alpha').lower()
input_value = gen_string('alpha').lower()
template_name = gen_string('alpha').lower()
template = '<%= "value=\\"" %><%= input(\'{0}\') %><%= "\\"" %>'.format(input_name)
entities.Host(name=host_name).create()
rt = entities.ReportTemplate(name=template_name, template=template).create()
entities.TemplateInput(name=input_name,
input_type="user",
template=rt.id,
).create()
ti = entities.TemplateInput(template=rt.id).search()[0].read()
self.assertEquals(input_name, ti.name)
res = rt.generate(data={"input_values": {input_name: input_value}})
self.assertEquals('value="{}"'.format(input_value), res)
@tier2
def test_positive_lock_clone_nodelete_unlock_report(self):
"""Lock report template. Check it can be cloned and can't be deleted or edited.
Unlock. Check it can be deleted and edited.
:id: a4c577db-144e-4761-a42e-e83887464986
:setup: User with reporting access rights, some report template that is not locked
:steps:
1. Create template
2. Lock template
3. Clone template, check cloned data
4. Try to delete template
5. Try to edit template
6. Unlock template
7. Edit template
8. Delete template
:expectedresults: Report is locked
:CaseImportance: High
:BZ: 1680458
"""
# 1. Create template
template_name = gen_string('alpha').lower()
template_clone_name = gen_string('alpha').lower()
template1 = gen_string('alpha')
template2 = gen_string('alpha')
rt = entities.ReportTemplate(name=template_name, template=template1).create()
# 2. Lock template
entities.ReportTemplate(id=rt.id, locked=True).update(["locked"])
rt = rt.read()
self.assertTrue(rt.locked)
# 3. Clone template, check cloned data
rt.clone(data={'name': template_clone_name})
cloned_rt = entities.ReportTemplate().search(
query={'search': u'name="{}"'.format(template_clone_name)})[0].read()
self.assertEquals(template_clone_name, cloned_rt.name)
self.assertEquals(template1, cloned_rt.template)
# 4. Try to delete template
if not is_open('BZ:1680458'):
with self.assertRaises(HTTPError):
rt.delete()
# In BZ1680458, exception is thrown but template is deleted anyway
self.assertNotEquals(0, len(entities.ReportTemplate().search(
query={'search': u'name="{}"'.format(template_name)})))
# 5. Try to edit template
with self.assertRaises(HTTPError):
entities.ReportTemplate(id=rt.id, template=template2).update(["template"])
rt = rt.read()
self.assertEquals(template1, rt.template)
# 6. Unlock template
entities.ReportTemplate(id=rt.id, locked=False).update(["locked"])
rt = rt.read()
self.assertFalse(rt.locked)
# 7. Edit template
entities.ReportTemplate(id=rt.id, template=template2).update(["template"])
rt = rt.read()
self.assertEquals(template2, rt.template)
# 8. Delete template
rt.delete()
self.assertEquals(0, len(entities.ReportTemplate().search(
query={'search': u'name="{}"'.format(template_name)})))
@tier2
@stubbed()
def test_positive_export_report(self):
"""Export report template
:id: a4b577db-144e-4761-a42e-a83887464986
:setup: User with reporting access rights, some report template
:steps:
1. /api/report_templates/:id/export
:expectedresults: Report script is shown
:CaseImportance: High
"""
@tier2
@stubbed()
def test_positive_generate_report_sanitized(self):
"""Generate report template where there are values in comma outputted which might brake CSV format
:id: a4b577db-144e-4961-a42e-e93887464986
:setup: User with reporting access rights, Host Statuses report,
a host with OS that has comma in its name
:steps:
1. POST /api/report_templates/:id/generate
:expectedresults: Report is generated in proper CSV format (value with comma is quoted)
:CaseImportance: Medium
"""
@tier2
@stubbed()
def test_negative_create_report_without_name(self):
"""Try to create a report template with empty name
:id: a4b577db-144e-4771-a42e-e93887464986
:setup: User with reporting access rights
:steps:
1. POST /api/report_templates
:expectedresults: Report is not created
:CaseImportance: Medium
"""
@tier2
@stubbed()
def test_positive_applied_errata(self):
"""Generate an Applied Errata report
:id: a4b577db-141e-4871-a42e-e93887464986
:setup: User with reporting access rights, some host with applied errata
:steps:
1. POST /api/report_templates/:id/generate
:expectedresults: A report is generated with all applied errata listed
:CaseImportance: Medium
"""
@tier2
@stubbed()
def test_positive_generate_nonblocking(self):
"""Generate an Applied Errata report
:id: a4b577db-142e-4871-a42e-e93887464986
:setup: User with reporting access rights, some host with applied errata
:steps:
1. POST /api/report_templates/:id/schedule_report
2. GET /api/report_templates/:id/report_data/:job_id
:expectedresults: A report is generated asynchronously
:CaseImportance: Medium
"""
@tier2
@stubbed()
def test_positive_generate_email_compressed(self):
"""Generate an Applied Errata report, get it by e-mail, compressed
:id: a4b577db-143e-4871-a42e-e93887464986
:setup: User with reporting access rights, some host with applied errata
:steps:
1. POST /api/report_templates/:id/schedule_report
:expectedresults: A report is generated asynchronously, the result
is compressed and mailed to the specified address
:CaseImportance: Medium
"""
@tier2
@stubbed()
def test_positive_generate_email_uncompressed(self):
"""Generate an Applied Errata report, get it by e-mail, uncompressed
:id: a4b577db-143f-4871-a42e-e93887464986
:setup: User with reporting access rights, some host with applied errata
:steps:
1. POST /api/report_templates/:id/schedule_report
:expectedresults: A report is generated asynchronously, the result
is not compressed and is mailed
to the specified address
:CaseImportance: Medium
"""
@tier2
@stubbed()
def test_negative_bad_email(self):
""" Report can't be generated when incorrectly formed mail specified
:id: a4b577db-164e-4871-a42e-e93887464986
:setup: User with reporting access rights, some host with applied errata
:steps:
1. POST /api/report_templates/:id/schedule_report
:expectedresults: Error message about wrong e-mail address, no task is triggered
:CaseImportance: Medium
"""
@tier2
@stubbed()
def test_positive_cleanup_task_running(self):
""" Report can't be generated when incorrectly formed mail specified
:id: a4b577db-145e-4871-a42e-e93887464986
:setup: Installed Satellite, user that can list running tasks
:steps:
1. List running tasks
:expectedresults: Report cleanup task is running
:CaseImportance: Medium
"""
@tier2
@stubbed()
def test_negative_nonauthor_of_report_cant_download_it(self):
"""The resulting report should only be downloadable by
the user that generated it or admin. Check.
:id: a4b577db-146e-4871-a42e-e93887464986
:setup: Installed Satellite, user that can list running tasks
:steps:
1. POST /api/report_templates/:id/schedule_report
2. GET /api/report_templates/:id/report_data/:job_id (as a different non-admin user)
:expectedresults: Report can't be downloaded. Error.
:CaseImportance: Medium
"""
|
patrickstocklin/chattR | refs/heads/master | lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langthaimodel.py | 2929 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
|
minhphung171093/GreenERP_V9 | refs/heads/master | openerp/addons/website_event/tests/test_ui.py | 50 | import openerp.tests
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(True)
class TestUi(openerp.tests.HttpCase):
def test_admin(self):
self.phantom_js("/", "odoo.__DEBUG__.services['web.Tour'].run('event', 'test')", "odoo.__DEBUG__.services['web.Tour'].tours.event", login='admin')
|
openstack/murano | refs/heads/master | murano/policy/__init__.py | 12133432 | |
asdofindia/kitsune | refs/heads/master | kitsune/__init__.py | 12133432 | |
richbrowne/f5-openstack-agent | refs/heads/master | f5_openstack_agent/utils/__init__.py | 12133432 | |
mantidproject/mantid | refs/heads/master | scripts/Diffraction/isis_powder/pearl.py | 2 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from contextlib import contextmanager
import mantid.simpleapi as mantid
from mantid.kernel import logger
from isis_powder.routines import common, instrument_settings
from isis_powder.abstract_inst import AbstractInst
from isis_powder.pearl_routines import pearl_advanced_config, pearl_algs, pearl_calibration_algs, pearl_output, \
pearl_param_mapping
import copy
class Pearl(AbstractInst):
def __init__(self, **kwargs):
self._inst_settings = instrument_settings.InstrumentSettings(
param_map=pearl_param_mapping.attr_mapping,
adv_conf_dict=pearl_advanced_config.get_all_adv_variables(),
kwargs=kwargs)
self._default_inst_settings = copy.deepcopy(self._inst_settings)
super(Pearl, self).__init__(user_name=self._inst_settings.user_name,
calibration_dir=self._inst_settings.calibration_dir,
output_dir=self._inst_settings.output_dir,
inst_prefix="PEARL")
self._cached_run_details = {}
def focus(self, **kwargs):
with self._apply_temporary_inst_settings(kwargs, kwargs.get("run_number")):
if self._inst_settings.perform_atten:
if not hasattr(self._inst_settings, 'attenuation_file'):
raise RuntimeError("Attenuation cannot be applied because attenuation_file not specified")
return self._focus(run_number_string=self._inst_settings.run_number,
do_absorb_corrections=self._inst_settings.absorb_corrections,
do_van_normalisation=self._inst_settings.van_norm)
def create_vanadium(self, **kwargs):
kwargs[
"perform_attenuation"] = None # Hard code this off as we do not need an attenuation file
with self._apply_temporary_inst_settings(kwargs, kwargs.get("run_in_cycle")):
if str(self._inst_settings.tt_mode).lower() == "all":
for new_tt_mode in ["tt35", "tt70", "tt88"]:
self._inst_settings.tt_mode = new_tt_mode
self._run_create_vanadium()
else:
self._run_create_vanadium()
def create_cal(self, **kwargs):
with self._apply_temporary_inst_settings(kwargs, kwargs.get("run_number")):
run_details = self._get_run_details(self._inst_settings.run_number)
cross_correlate_params = {
"ReferenceSpectra": self._inst_settings.reference_spectra,
"WorkspaceIndexMin": self._inst_settings.cross_corr_ws_min,
"WorkspaceIndexMax": self._inst_settings.cross_corr_ws_max,
"XMin": self._inst_settings.cross_corr_x_min,
"XMax": self._inst_settings.cross_corr_x_max
}
get_detector_offsets_params = {
"DReference": self._inst_settings.d_reference,
"Step": self._inst_settings.get_det_offsets_step,
"XMin": self._inst_settings.get_det_offsets_x_min,
"XMax": self._inst_settings.get_det_offsets_x_max
}
output_file_paths = self._generate_out_file_paths(run_details)
return pearl_calibration_algs.create_calibration(
calibration_runs=self._inst_settings.run_number,
instrument=self,
offset_file_name=run_details.offset_file_path,
grouping_file_name=run_details.grouping_file_path,
calibration_dir=self._inst_settings.calibration_dir,
rebin_1_params=self._inst_settings.cal_rebin_1,
rebin_2_params=self._inst_settings.cal_rebin_2,
cross_correlate_params=cross_correlate_params,
get_det_offset_params=get_detector_offsets_params,
output_name=output_file_paths["output_name"] + "_grouped")
def should_subtract_empty_inst(self):
return self._inst_settings.subtract_empty_inst
@contextmanager
def _apply_temporary_inst_settings(self, kwargs, run):
# set temporary settings, Check has to occur before updating attributes,
# otherwise it would assumed the longmode vars are cached.
if not self._inst_settings.long_mode == bool(kwargs.get("long_mode")):
self._inst_settings.update_attributes(kwargs=kwargs)
self._switch_long_mode_inst_settings(kwargs.get("long_mode"))
else:
self._inst_settings.update_attributes(kwargs=kwargs)
# check that cache exists
run_number_string_key = self._generate_run_details_fingerprint(
run, self._inst_settings.file_extension, self._inst_settings.tt_mode)
if run_number_string_key in self._cached_run_details:
# update spline path of cache
add_spline = [self._inst_settings.tt_mode, "long"] if self._inst_settings.long_mode else \
[self._inst_settings.tt_mode]
self._cached_run_details[run_number_string_key].update_file_paths(
self._inst_settings, add_spline)
yield
# reset instrument settings
self._inst_settings = copy.deepcopy(self._default_inst_settings)
# reset spline path
add_spline = [self._inst_settings.tt_mode, "long"] if self._inst_settings.long_mode else \
[self._inst_settings.tt_mode]
self._cached_run_details[run_number_string_key].update_file_paths(self._inst_settings, add_spline)
def _run_create_vanadium(self):
# Provides a minimal wrapper so if we have tt_mode 'all' we can loop round
return self._create_vanadium(run_number_string=self._inst_settings.run_in_range,
do_absorb_corrections=self._inst_settings.absorb_corrections)
def _get_run_details(self, run_number_string):
run_number_string_key = self._generate_run_details_fingerprint(
run_number_string, self._inst_settings.file_extension, self._inst_settings.tt_mode)
if run_number_string_key in self._cached_run_details:
return self._cached_run_details[run_number_string_key]
self._cached_run_details[run_number_string_key] = pearl_algs.get_run_details(
run_number_string=run_number_string,
inst_settings=self._inst_settings,
is_vanadium_run=self._is_vanadium)
return self._cached_run_details[run_number_string_key]
def _add_formatting_options(self, format_options):
"""
Add any instrument-specific format options to the given
list
:param format_options: A dictionary of string format keys mapped to their expansions
:return: format_options as it is passed in
"""
inst = self._inst_settings
format_options.update({'tt_mode': str(inst.tt_mode), '_long_mode': '_long' if inst.long_mode else ''})
return format_options
def _normalise_ws_current(self, ws_to_correct):
monitor_spectra = self._inst_settings.monitor_spec_no
monitor_ws = common.extract_single_spectrum(ws_to_process=ws_to_correct,
spectrum_number_to_extract=monitor_spectra)
normalised_ws = pearl_algs.normalise_ws_current(
ws_to_correct=ws_to_correct,
monitor_ws=monitor_ws,
spline_coeff=self._inst_settings.monitor_spline,
integration_range=self._inst_settings.monitor_integration_range,
lambda_values=self._inst_settings.monitor_lambda,
ex_regions=self._inst_settings.monitor_mask_regions)
common.remove_intermediate_workspace(monitor_ws)
return normalised_ws
def _get_current_tt_mode(self):
return self._inst_settings.tt_mode
def _spline_vanadium_ws(self, focused_vanadium_spectra):
focused_vanadium_spectra = pearl_algs.strip_bragg_peaks(focused_vanadium_spectra)
splined_list = common.spline_workspaces(focused_vanadium_spectra=focused_vanadium_spectra,
num_splines=self._inst_settings.spline_coefficient)
# Ensure the name is unique if we are in tt_mode all
new_workspace_names = []
for ws in splined_list:
new_name = ws.name() + '_' + self._inst_settings.tt_mode
new_workspace_names.append(
mantid.RenameWorkspace(InputWorkspace=ws, OutputWorkspace=new_name))
return new_workspace_names
def _get_instrument_bin_widths(self):
if self._inst_settings.tt_mode=="custom":
return self._inst_settings.custom_focused_bin_widths
else:
return self._inst_settings.focused_bin_widths
def _output_focused_ws(self, processed_spectra, run_details, output_mode=None):
if not output_mode:
output_mode = self._inst_settings.focus_mode
attenuation_path = None
if self._inst_settings.perform_atten:
name_key='name'
path_key='path'
if isinstance(self._inst_settings.attenuation_files, str):
self._inst_settings.attenuation_files = eval(self._inst_settings.attenuation_files)
atten_file_found = False
for atten_file in self._inst_settings.attenuation_files:
if any (required_key not in atten_file for required_key in [name_key,path_key]):
logger.warning("A dictionary in attenuation_files has been ignored because "
f"it doesn't contain both {name_key} and {path_key} entries")
elif atten_file[name_key] == self._inst_settings.attenuation_file:
if atten_file_found:
raise RuntimeError(
f"Duplicate name {self._inst_settings.attenuation_file} found in attenuation_files")
attenuation_path = atten_file[path_key]
atten_file_found = True
if attenuation_path is None:
raise RuntimeError(
f"Unknown attenuation_file {self._inst_settings.attenuation_file} specified for attenuation")
output_spectra = \
pearl_output.generate_and_save_focus_output(self, processed_spectra=processed_spectra,
run_details=run_details, focus_mode=output_mode,
attenuation_filepath=attenuation_path)
group_name = "PEARL{0!s}_{1}{2}-Results-D-Grp"
mode = "_long" if self._inst_settings.long_mode else ""
group_name = group_name.format(run_details.output_run_string, self._inst_settings.tt_mode,
mode)
grouped_d_spacing = mantid.GroupWorkspaces(InputWorkspaces=output_spectra,
OutputWorkspace=group_name)
return grouped_d_spacing, None
def _crop_banks_to_user_tof(self, focused_banks):
if self._inst_settings.tt_mode=="custom":
return common.crop_banks_using_crop_list(focused_banks,
self._inst_settings.custom_tof_cropping_values)
else:
return common.crop_banks_using_crop_list(focused_banks,
self._inst_settings.tof_cropping_values)
def _crop_raw_to_expected_tof_range(self, ws_to_crop):
out_ws = common.crop_in_tof(ws_to_crop=ws_to_crop,
x_min=self._inst_settings.raw_data_crop_vals[0],
x_max=self._inst_settings.raw_data_crop_vals[-1])
return out_ws
def _crop_van_to_expected_tof_range(self, van_ws_to_crop):
cropped_ws = common.crop_in_tof(ws_to_crop=van_ws_to_crop,
x_min=self._inst_settings.van_tof_cropping[0],
x_max=self._inst_settings.van_tof_cropping[-1])
return cropped_ws
def _apply_absorb_corrections(self, run_details, ws_to_correct):
if self._inst_settings.gen_absorb:
absorb_file_name = self._inst_settings.absorb_out_file
if not absorb_file_name:
raise RuntimeError(
"\"absorb_corrections_out_filename\" must be supplied when generating absorption "
"corrections")
absorb_corrections = pearl_algs.generate_vanadium_absorb_corrections(
van_ws=ws_to_correct, output_filename=absorb_file_name)
else:
absorb_corrections = None
return pearl_algs.apply_vanadium_absorb_corrections(van_ws=ws_to_correct,
run_details=run_details,
absorb_ws=absorb_corrections)
def _switch_long_mode_inst_settings(self, long_mode_on):
self._inst_settings.update_attributes(
advanced_config=pearl_advanced_config.get_long_mode_dict(long_mode_on))
if long_mode_on:
setattr(self._inst_settings, "perform_atten", False)
|
Jgarcia-IAS/SAT | refs/heads/master | openerp/addons-extra/odoo-pruebas/odoo-server/addons/l10n_fr/__init__.py | 424 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import l10n_fr
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
gminds/rapidnewsng | refs/heads/master | django/conf/locale/zh_TW/formats.py | 1293 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
# DATE_FORMAT =
# TIME_FORMAT =
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
# SHORT_DATE_FORMAT =
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
linglung/ytdl | refs/heads/master | youtube_dl/options.py | 1 | from __future__ import unicode_literals
import os.path
import optparse
import re
import sys
from .downloader.external import list_external_downloaders
from .compat import (
compat_expanduser,
compat_get_terminal_size,
compat_getenv,
compat_kwargs,
compat_shlex_split,
)
from .utils import (
preferredencoding,
write_string,
)
from .version import __version__
def parseOpts(overrideArguments=None):
def _readOptions(filename_bytes, default=[]):
try:
optionf = open(filename_bytes)
except IOError:
return default # silently skip if file is not present
try:
# FIXME: https://github.com/rg3/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
contents = optionf.read()
if sys.version_info < (3,):
contents = contents.decode(preferredencoding())
res = compat_shlex_split(contents, comments=True)
finally:
optionf.close()
return res
def _readUserConf():
xdg_config_home = compat_getenv('XDG_CONFIG_HOME')
if xdg_config_home:
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
else:
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf')
userConf = _readOptions(userConfFile, None)
if userConf is None:
appdata_dir = compat_getenv('appdata')
if appdata_dir:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'),
default=None)
if userConf is None:
userConf = []
return userConf
def _format_option_string(option):
''' ('-o', '--option') -> -o, --format METAVAR'''
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, ', ')
if option.takes_value():
opts.append(' %s' % option.metavar)
return ''.join(opts)
def _comma_separated_values_options_callback(option, opt_str, value, parser):
setattr(parser.values, option.dest, value.split(','))
def _hide_login_info(opts):
PRIVATE_OPTS = ['-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username']
eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
def _scrub_eq(o):
m = eqre.match(o)
if m:
return m.group('key') + '=PRIVATE'
else:
return o
opts = list(map(_scrub_eq, opts))
for private_opt in PRIVATE_OPTS:
try:
i = opts.index(private_opt)
opts[i + 1] = 'PRIVATE'
except ValueError:
pass
return opts
# No need to wrap help messages if we're on a wide console
columns = compat_get_terminal_size().columns
max_width = columns if columns else 80
max_help_position = 80
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
fmt.format_option_strings = _format_option_string
kw = {
'version': __version__,
'formatter': fmt,
'usage': '%prog [OPTIONS] URL [URL...]',
'conflict_handler': 'resolve',
}
parser = optparse.OptionParser(**compat_kwargs(kw))
general = optparse.OptionGroup(parser, 'General Options')
general.add_option(
'-h', '--help',
action='help',
help='Print this help text and exit')
general.add_option(
'-v', '--version',
action='version',
help='Print program version and exit')
general.add_option(
'-U', '--update',
action='store_true', dest='update_self',
help='Update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
general.add_option(
'-i', '--ignore-errors',
action='store_true', dest='ignoreerrors', default=False,
help='Continue on download errors, for example to skip unavailable videos in a playlist')
general.add_option(
'--abort-on-error',
action='store_false', dest='ignoreerrors',
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
general.add_option(
'--dump-user-agent',
action='store_true', dest='dump_user_agent', default=False,
help='Display the current browser identification')
general.add_option(
'--list-extractors',
action='store_true', dest='list_extractors', default=False,
help='List all supported extractors')
general.add_option(
'--extractor-descriptions',
action='store_true', dest='list_extractor_descriptions', default=False,
help='Output descriptions of all supported extractors')
general.add_option(
'--force-generic-extractor',
action='store_true', dest='force_generic_extractor', default=False,
help='Force extraction to use the generic extractor')
general.add_option(
'--default-search',
dest='default_search', metavar='PREFIX',
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
general.add_option(
'--ignore-config',
action='store_true',
help='Do not read configuration files. '
'When given in the global configuration file /etc/youtube-dl.conf: '
'Do not read the user configuration in ~/.config/youtube-dl/config '
'(%APPDATA%/youtube-dl/config.txt on Windows)')
general.add_option(
'--config-location',
dest='config_location', metavar='PATH',
help='Location of the configuration file; either the path to the config or its containing directory.')
general.add_option(
'--flat-playlist',
action='store_const', dest='extract_flat', const='in_playlist',
default=False,
help='Do not extract the videos of a playlist, only list them.')
general.add_option(
'--mark-watched',
action='store_true', dest='mark_watched', default=False,
help='Mark videos watched (YouTube only)')
general.add_option(
'--no-mark-watched',
action='store_false', dest='mark_watched', default=False,
help='Do not mark videos watched (YouTube only)')
general.add_option(
'--no-color', '--no-colors',
action='store_true', dest='no_color',
default=False,
help='Do not emit color codes in output')
network = optparse.OptionGroup(parser, 'Network Options')
network.add_option(
'--proxy', dest='proxy',
default=None, metavar='URL',
help='Use the specified HTTP/HTTPS/SOCKS proxy. To enable experimental '
'SOCKS proxy, specify a proper scheme. For example '
'socks5://127.0.0.1:1080/. Pass in an empty string (--proxy "") '
'for direct connection')
network.add_option(
'--socket-timeout',
dest='socket_timeout', type=float, default=None, metavar='SECONDS',
help='Time to wait before giving up, in seconds')
network.add_option(
'--source-address',
metavar='IP', dest='source_address', default=None,
help='Client-side IP address to bind to (experimental)',
)
network.add_option(
'-4', '--force-ipv4',
action='store_const', const='0.0.0.0', dest='source_address',
help='Make all connections via IPv4 (experimental)',
)
network.add_option(
'-6', '--force-ipv6',
action='store_const', const='::', dest='source_address',
help='Make all connections via IPv6 (experimental)',
)
network.add_option(
'--geo-verification-proxy',
dest='geo_verification_proxy', default=None, metavar='URL',
help='Use this proxy to verify the IP address for some geo-restricted sites. '
'The default proxy specified by --proxy (or none, if the options is not present) is used for the actual downloading. (experimental)'
)
network.add_option(
'--cn-verification-proxy',
dest='cn_verification_proxy', default=None, metavar='URL',
help=optparse.SUPPRESS_HELP,
)
selection = optparse.OptionGroup(parser, 'Video Selection')
selection.add_option(
'--playlist-start',
dest='playliststart', metavar='NUMBER', default=1, type=int,
help='Playlist video to start at (default is %default)')
selection.add_option(
'--playlist-end',
dest='playlistend', metavar='NUMBER', default=None, type=int,
help='Playlist video to end at (default is last)')
selection.add_option(
'--playlist-items',
dest='playlist_items', metavar='ITEM_SPEC', default=None,
help='Playlist video items to download. Specify indices of the videos in the playlist separated by commas like: "--playlist-items 1,2,5,8" if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.')
selection.add_option(
'--match-title',
dest='matchtitle', metavar='REGEX',
help='Download only matching titles (regex or caseless sub-string)')
selection.add_option(
'--reject-title',
dest='rejecttitle', metavar='REGEX',
help='Skip download for matching titles (regex or caseless sub-string)')
selection.add_option(
'--max-downloads',
dest='max_downloads', metavar='NUMBER', type=int, default=None,
help='Abort after downloading NUMBER files')
selection.add_option(
'--min-filesize',
metavar='SIZE', dest='min_filesize', default=None,
help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--max-filesize',
metavar='SIZE', dest='max_filesize', default=None,
help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)')
selection.add_option(
'--date',
metavar='DATE', dest='date', default=None,
help='Download only videos uploaded in this date')
selection.add_option(
'--datebefore',
metavar='DATE', dest='datebefore', default=None,
help='Download only videos uploaded on or before this date (i.e. inclusive)')
selection.add_option(
'--dateafter',
metavar='DATE', dest='dateafter', default=None,
help='Download only videos uploaded on or after this date (i.e. inclusive)')
selection.add_option(
'--min-views',
metavar='COUNT', dest='min_views', default=None, type=int,
help='Do not download any videos with less than COUNT views')
selection.add_option(
'--max-views',
metavar='COUNT', dest='max_views', default=None, type=int,
help='Do not download any videos with more than COUNT views')
selection.add_option(
'--match-filter',
metavar='FILTER', dest='match_filter', default=None,
help=(
'Generic video filter (experimental). '
'Specify any key (see help for -o for a list of available keys) to'
' match if the key is present, '
'!key to check if the key is not present,'
'key > NUMBER (like "comment_count > 12", also works with '
'>=, <, <=, !=, =) to compare against a number, and '
'& to require multiple matches. '
'Values which are not known are excluded unless you'
' put a question mark (?) after the operator.'
'For example, to only match videos that have been liked more than '
'100 times and disliked less than 50 times (or the dislike '
'functionality is not available at the given service), but who '
'also have a description, use --match-filter '
'"like_count > 100 & dislike_count <? 50 & description" .'
))
selection.add_option(
'--no-playlist',
action='store_true', dest='noplaylist', default=False,
help='Download only the video, if the URL refers to a video and a playlist.')
selection.add_option(
'--yes-playlist',
action='store_false', dest='noplaylist', default=False,
help='Download the playlist, if the URL refers to a video and a playlist.')
selection.add_option(
'--age-limit',
metavar='YEARS', dest='age_limit', default=None, type=int,
help='Download only videos suitable for the given age')
selection.add_option(
'--download-archive', metavar='FILE',
dest='download_archive',
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
selection.add_option(
'--include-ads',
dest='include_ads', action='store_true',
help='Download advertisements as well (experimental)')
authentication = optparse.OptionGroup(parser, 'Authentication Options')
authentication.add_option(
'-u', '--username',
dest='username', metavar='USERNAME',
help='Login with this account ID')
authentication.add_option(
'-p', '--password',
dest='password', metavar='PASSWORD',
help='Account password. If this option is left out, youtube-dl will ask interactively.')
authentication.add_option(
'-2', '--twofactor',
dest='twofactor', metavar='TWOFACTOR',
help='Two-factor authentication code')
authentication.add_option(
'-n', '--netrc',
action='store_true', dest='usenetrc', default=False,
help='Use .netrc authentication data')
authentication.add_option(
'--video-password',
dest='videopassword', metavar='PASSWORD',
help='Video password (vimeo, smotri, youku)')
adobe_pass = optparse.OptionGroup(parser, 'Adobe Pass Options')
adobe_pass.add_option(
'--ap-mso',
dest='ap_mso', metavar='MSO',
help='Adobe Pass multiple-system operator (TV provider) identifier, use --ap-list-mso for a list of available MSOs')
adobe_pass.add_option(
'--ap-username',
dest='ap_username', metavar='USERNAME',
help='Multiple-system operator account login')
adobe_pass.add_option(
'--ap-password',
dest='ap_password', metavar='PASSWORD',
help='Multiple-system operator account password. If this option is left out, youtube-dl will ask interactively.')
adobe_pass.add_option(
'--ap-list-mso',
action='store_true', dest='ap_list_mso', default=False,
help='List all supported multiple-system operators')
video_format = optparse.OptionGroup(parser, 'Video Format Options')
video_format.add_option(
'-f', '--format',
action='store', dest='format', metavar='FORMAT', default=None,
help='Video format code, see the "FORMAT SELECTION" for all the info')
video_format.add_option(
'--all-formats',
action='store_const', dest='format', const='all',
help='Download all available video formats')
video_format.add_option(
'--prefer-free-formats',
action='store_true', dest='prefer_free_formats', default=False,
help='Prefer free video formats unless a specific one is requested')
video_format.add_option(
'-F', '--list-formats',
action='store_true', dest='listformats',
help='List all available formats of requested videos')
video_format.add_option(
'--youtube-include-dash-manifest',
action='store_true', dest='youtube_include_dash_manifest', default=True,
help=optparse.SUPPRESS_HELP)
video_format.add_option(
'--youtube-skip-dash-manifest',
action='store_false', dest='youtube_include_dash_manifest',
help='Do not download the DASH manifests and related data on YouTube videos')
video_format.add_option(
'--merge-output-format',
action='store', dest='merge_output_format', metavar='FORMAT', default=None,
help=(
'If a merge is required (e.g. bestvideo+bestaudio), '
'output to given container format. One of mkv, mp4, ogg, webm, flv. '
'Ignored if no merge is required'))
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
subtitles.add_option(
'--write-sub', '--write-srt',
action='store_true', dest='writesubtitles', default=False,
help='Write subtitle file')
subtitles.add_option(
'--write-auto-sub', '--write-automatic-sub',
action='store_true', dest='writeautomaticsub', default=False,
help='Write automatically generated subtitle file (YouTube only)')
subtitles.add_option(
'--all-subs',
action='store_true', dest='allsubtitles', default=False,
help='Download all the available subtitles of the video')
subtitles.add_option(
'--list-subs',
action='store_true', dest='listsubtitles', default=False,
help='List all available subtitles for the video')
subtitles.add_option(
'--sub-format',
action='store', dest='subtitlesformat', metavar='FORMAT', default='best',
help='Subtitle format, accepts formats preference, for example: "srt" or "ass/srt/best"')
subtitles.add_option(
'--sub-lang', '--sub-langs', '--srt-lang',
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
default=[], callback=_comma_separated_values_options_callback,
help='Languages of the subtitles to download (optional) separated by commas, use --list-subs for available language tags')
downloader = optparse.OptionGroup(parser, 'Download Options')
downloader.add_option(
'-r', '--limit-rate', '--rate-limit',
dest='ratelimit', metavar='RATE',
help='Maximum download rate in bytes per second (e.g. 50K or 4.2M)')
downloader.add_option(
'-R', '--retries',
dest='retries', metavar='RETRIES', default=10,
help='Number of retries (default is %default), or "infinite".')
downloader.add_option(
'--fragment-retries',
dest='fragment_retries', metavar='RETRIES', default=10,
help='Number of retries for a fragment (default is %default), or "infinite" (DASH and hlsnative only)')
downloader.add_option(
'--skip-unavailable-fragments',
action='store_true', dest='skip_unavailable_fragments', default=True,
help='Skip unavailable fragments (DASH and hlsnative only)')
general.add_option(
'--abort-on-unavailable-fragment',
action='store_false', dest='skip_unavailable_fragments',
help='Abort downloading when some fragment is not available')
downloader.add_option(
'--buffer-size',
dest='buffersize', metavar='SIZE', default='1024',
help='Size of download buffer (e.g. 1024 or 16K) (default is %default)')
downloader.add_option(
'--no-resize-buffer',
action='store_true', dest='noresizebuffer', default=False,
help='Do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.')
downloader.add_option(
'--test',
action='store_true', dest='test', default=False,
help=optparse.SUPPRESS_HELP)
downloader.add_option(
'--playlist-reverse',
action='store_true',
help='Download playlist videos in reverse order')
downloader.add_option(
'--xattr-set-filesize',
dest='xattr_set_filesize', action='store_true',
help='Set file xattribute ytdl.filesize with expected file size (experimental)')
downloader.add_option(
'--hls-prefer-native',
dest='hls_prefer_native', action='store_true', default=None,
help='Use the native HLS downloader instead of ffmpeg')
downloader.add_option(
'--hls-prefer-ffmpeg',
dest='hls_prefer_native', action='store_false', default=None,
help='Use ffmpeg instead of the native HLS downloader')
downloader.add_option(
'--hls-use-mpegts',
dest='hls_use_mpegts', action='store_true',
help='Use the mpegts container for HLS videos, allowing to play the '
'video while downloading (some players may not be able to play it)')
downloader.add_option(
'--external-downloader',
dest='external_downloader', metavar='COMMAND',
help='Use the specified external downloader. '
'Currently supports %s' % ','.join(list_external_downloaders()))
downloader.add_option(
'--external-downloader-args',
dest='external_downloader_args', metavar='ARGS',
help='Give these arguments to the external downloader')
workarounds = optparse.OptionGroup(parser, 'Workarounds')
workarounds.add_option(
'--encoding',
dest='encoding', metavar='ENCODING',
help='Force the specified encoding (experimental)')
workarounds.add_option(
'--no-check-certificate',
action='store_true', dest='no_check_certificate', default=False,
help='Suppress HTTPS certificate validation')
workarounds.add_option(
'--prefer-insecure',
'--prefer-unsecure', action='store_true', dest='prefer_insecure',
help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
workarounds.add_option(
'--user-agent',
metavar='UA', dest='user_agent',
help='Specify a custom user agent')
workarounds.add_option(
'--referer',
metavar='URL', dest='referer', default=None,
help='Specify a custom referer, use if the video access is restricted to one domain',
)
workarounds.add_option(
'--add-header',
metavar='FIELD:VALUE', dest='headers', action='append',
help='Specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
)
workarounds.add_option(
'--bidi-workaround',
dest='bidi_workaround', action='store_true',
help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
workarounds.add_option(
'--sleep-interval', '--min-sleep-interval', metavar='SECONDS',
dest='sleep_interval', type=float,
help=(
'Number of seconds to sleep before each download when used alone '
'or a lower bound of a range for randomized sleep before each download '
'(minimum possible number of seconds to sleep) when used along with '
'--max-sleep-interval.'))
workarounds.add_option(
'--max-sleep-interval', metavar='SECONDS',
dest='max_sleep_interval', type=float,
help=(
'Upper bound of a range for randomized sleep before each download '
'(maximum possible number of seconds to sleep). Must only be used '
'along with --min-sleep-interval.'))
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
verbosity.add_option(
'-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='Activate quiet mode')
verbosity.add_option(
'--no-warnings',
dest='no_warnings', action='store_true', default=False,
help='Ignore warnings')
verbosity.add_option(
'-s', '--simulate',
action='store_true', dest='simulate', default=False,
help='Do not download the video and do not write anything to disk')
verbosity.add_option(
'--skip-download',
action='store_true', dest='skip_download', default=False,
help='Do not download the video')
verbosity.add_option(
'-g', '--get-url',
action='store_true', dest='geturl', default=False,
help='Simulate, quiet but print URL')
verbosity.add_option(
'-e', '--get-title',
action='store_true', dest='gettitle', default=False,
help='Simulate, quiet but print title')
verbosity.add_option(
'--get-id',
action='store_true', dest='getid', default=False,
help='Simulate, quiet but print id')
verbosity.add_option(
'--get-thumbnail',
action='store_true', dest='getthumbnail', default=False,
help='Simulate, quiet but print thumbnail URL')
verbosity.add_option(
'--get-description',
action='store_true', dest='getdescription', default=False,
help='Simulate, quiet but print video description')
verbosity.add_option(
'--get-duration',
action='store_true', dest='getduration', default=False,
help='Simulate, quiet but print video length')
verbosity.add_option(
'--get-filename',
action='store_true', dest='getfilename', default=False,
help='Simulate, quiet but print output filename')
verbosity.add_option(
'--get-format',
action='store_true', dest='getformat', default=False,
help='Simulate, quiet but print output format')
verbosity.add_option(
'-j', '--dump-json',
action='store_true', dest='dumpjson', default=False,
help='Simulate, quiet but print JSON information. See --output for a description of available keys.')
verbosity.add_option(
'-J', '--dump-single-json',
action='store_true', dest='dump_single_json', default=False,
help='Simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.')
verbosity.add_option(
'--print-json',
action='store_true', dest='print_json', default=False,
help='Be quiet and print the video information as JSON (video is still being downloaded).',
)
verbosity.add_option(
'--newline',
action='store_true', dest='progress_with_newline', default=False,
help='Output progress bar as new lines')
verbosity.add_option(
'--no-progress',
action='store_true', dest='noprogress', default=False,
help='Do not print progress bar')
verbosity.add_option(
'--console-title',
action='store_true', dest='consoletitle', default=False,
help='Display progress in console titlebar')
verbosity.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print various debugging information')
verbosity.add_option(
'--dump-pages', '--dump-intermediate-pages',
action='store_true', dest='dump_intermediate_pages', default=False,
help='Print downloaded pages encoded using base64 to debug problems (very verbose)')
verbosity.add_option(
'--write-pages',
action='store_true', dest='write_pages', default=False,
help='Write downloaded intermediary pages to files in the current directory to debug problems')
verbosity.add_option(
'--youtube-print-sig-code',
action='store_true', dest='youtube_print_sig_code', default=False,
help=optparse.SUPPRESS_HELP)
verbosity.add_option(
'--print-traffic', '--dump-headers',
dest='debug_printtraffic', action='store_true', default=False,
help='Display sent and read HTTP traffic')
verbosity.add_option(
'-C', '--call-home',
dest='call_home', action='store_true', default=False,
help='Contact the youtube-dl server for debugging')
verbosity.add_option(
'--no-call-home',
dest='call_home', action='store_false', default=False,
help='Do NOT contact the youtube-dl server for debugging')
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
filesystem.add_option(
'-a', '--batch-file',
dest='batchfile', metavar='FILE',
help='File containing URLs to download (\'-\' for stdin)')
filesystem.add_option(
'--id', default=False,
action='store_true', dest='useid', help='Use only video ID in file name')
filesystem.add_option(
'-o', '--output',
dest='outtmpl', metavar='TEMPLATE',
help=('Output filename template, see the "OUTPUT TEMPLATE" for all the info'))
filesystem.add_option(
'--autonumber-size',
dest='autonumber_size', metavar='NUMBER',
help='Specify the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
filesystem.add_option(
'--restrict-filenames',
action='store_true', dest='restrictfilenames', default=False,
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames')
filesystem.add_option(
'-A', '--auto-number',
action='store_true', dest='autonumber', default=False,
help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000')
filesystem.add_option(
'-t', '--title',
action='store_true', dest='usetitle', default=False,
help='[deprecated] Use title in file name (default)')
filesystem.add_option(
'-l', '--literal', default=False,
action='store_true', dest='usetitle',
help='[deprecated] Alias of --title')
filesystem.add_option(
'-w', '--no-overwrites',
action='store_true', dest='nooverwrites', default=False,
help='Do not overwrite files')
filesystem.add_option(
'-c', '--continue',
action='store_true', dest='continue_dl', default=True,
help='Force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.')
filesystem.add_option(
'--no-continue',
action='store_false', dest='continue_dl',
help='Do not resume partially downloaded files (restart from beginning)')
filesystem.add_option(
'--no-part',
action='store_true', dest='nopart', default=False,
help='Do not use .part files - write directly into output file')
filesystem.add_option(
'--no-mtime',
action='store_false', dest='updatetime', default=True,
help='Do not use the Last-modified header to set the file modification time')
filesystem.add_option(
'--write-description',
action='store_true', dest='writedescription', default=False,
help='Write video description to a .description file')
filesystem.add_option(
'--write-info-json',
action='store_true', dest='writeinfojson', default=False,
help='Write video metadata to a .info.json file')
filesystem.add_option(
'--write-annotations',
action='store_true', dest='writeannotations', default=False,
help='Write video annotations to a .annotations.xml file')
filesystem.add_option(
'--load-info-json', '--load-info',
dest='load_info_filename', metavar='FILE',
help='JSON file containing the video information (created with the "--write-info-json" option)')
filesystem.add_option(
'--cookies',
dest='cookiefile', metavar='FILE',
help='File to read cookies from and dump cookie jar in')
filesystem.add_option(
'--cache-dir', dest='cachedir', default=None, metavar='DIR',
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
filesystem.add_option(
'--no-cache-dir', action='store_const', const=False, dest='cachedir',
help='Disable filesystem caching')
filesystem.add_option(
'--rm-cache-dir',
action='store_true', dest='rm_cachedir',
help='Delete all filesystem cache files')
thumbnail = optparse.OptionGroup(parser, 'Thumbnail images')
thumbnail.add_option(
'--write-thumbnail',
action='store_true', dest='writethumbnail', default=False,
help='Write thumbnail image to disk')
thumbnail.add_option(
'--write-all-thumbnails',
action='store_true', dest='write_all_thumbnails', default=False,
help='Write all thumbnail image formats to disk')
thumbnail.add_option(
'--list-thumbnails',
action='store_true', dest='list_thumbnails', default=False,
help='Simulate and list all available thumbnail formats')
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
postproc.add_option(
'-x', '--extract-audio',
action='store_true', dest='extractaudio', default=False,
help='Convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
postproc.add_option(
'--audio-format', metavar='FORMAT', dest='audioformat', default='best',
help='Specify audio format: "best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "%default" by default')
postproc.add_option(
'--audio-quality', metavar='QUALITY',
dest='audioquality', default='5',
help='Specify ffmpeg/avconv audio quality, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)')
postproc.add_option(
'--recode-video',
metavar='FORMAT', dest='recodevideo', default=None,
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv|avi)')
postproc.add_option(
'--postprocessor-args',
dest='postprocessor_args', metavar='ARGS',
help='Give these arguments to the postprocessor')
postproc.add_option(
'-k', '--keep-video',
action='store_true', dest='keepvideo', default=False,
help='Keep the video file on disk after the post-processing; the video is erased by default')
postproc.add_option(
'--no-post-overwrites',
action='store_true', dest='nopostoverwrites', default=False,
help='Do not overwrite post-processed files; the post-processed files are overwritten by default')
postproc.add_option(
'--embed-subs',
action='store_true', dest='embedsubtitles', default=False,
help='Embed subtitles in the video (only for mp4, webm and mkv videos)')
postproc.add_option(
'--embed-thumbnail',
action='store_true', dest='embedthumbnail', default=False,
help='Embed thumbnail in the audio as cover art')
postproc.add_option(
'--add-metadata',
action='store_true', dest='addmetadata', default=False,
help='Write metadata to the video file')
postproc.add_option(
'--metadata-from-title',
metavar='FORMAT', dest='metafromtitle',
help='Parse additional metadata like song title / artist from the video title. '
'The format syntax is the same as --output, '
'the parsed parameters replace existing values. '
'Additional templates: %(album)s, %(artist)s. '
'Example: --metadata-from-title "%(artist)s - %(title)s" matches a title like '
'"Coldplay - Paradise"')
postproc.add_option(
'--xattrs',
action='store_true', dest='xattrs', default=False,
help='Write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
postproc.add_option(
'--fixup',
metavar='POLICY', dest='fixup', default='detect_or_warn',
help='Automatically correct known faults of the file. '
'One of never (do nothing), warn (only emit a warning), '
'detect_or_warn (the default; fix file if we can, warn otherwise)')
postproc.add_option(
'--prefer-avconv',
action='store_false', dest='prefer_ffmpeg',
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
postproc.add_option(
'--prefer-ffmpeg',
action='store_true', dest='prefer_ffmpeg',
help='Prefer ffmpeg over avconv for running the postprocessors')
postproc.add_option(
'--ffmpeg-location', '--avconv-location', metavar='PATH',
dest='ffmpeg_location',
help='Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.')
postproc.add_option(
'--exec',
metavar='CMD', dest='exec_cmd',
help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
postproc.add_option(
'--convert-subs', '--convert-subtitles',
metavar='FORMAT', dest='convertsubtitles', default=None,
help='Convert the subtitles to other format (currently supported: srt|ass|vtt)')
parser.add_option_group(general)
parser.add_option_group(network)
parser.add_option_group(selection)
parser.add_option_group(downloader)
parser.add_option_group(filesystem)
parser.add_option_group(thumbnail)
parser.add_option_group(verbosity)
parser.add_option_group(workarounds)
parser.add_option_group(video_format)
parser.add_option_group(subtitles)
parser.add_option_group(authentication)
parser.add_option_group(adobe_pass)
parser.add_option_group(postproc)
if overrideArguments is not None:
opts, args = parser.parse_args(overrideArguments)
if opts.verbose:
write_string('[debug] Override config: ' + repr(overrideArguments) + '\n')
else:
def compat_conf(conf):
if sys.version_info < (3,):
return [a.decode(preferredencoding(), 'replace') for a in conf]
return conf
command_line_conf = compat_conf(sys.argv[1:])
opts, args = parser.parse_args(command_line_conf)
system_conf = user_conf = custom_conf = []
if '--config-location' in command_line_conf:
location = compat_expanduser(opts.config_location)
if os.path.isdir(location):
location = os.path.join(location, 'youtube-dl.conf')
if not os.path.exists(location):
parser.error('config-location %s does not exist.' % location)
custom_conf = _readOptions(location)
elif '--ignore-config' in command_line_conf:
pass
else:
system_conf = _readOptions('/etc/youtube-dl.conf')
if '--ignore-config' not in system_conf:
user_conf = _readUserConf()
argv = system_conf + user_conf + custom_conf + command_line_conf
opts, args = parser.parse_args(argv)
if opts.verbose:
for conf_label, conf in (
('System config', system_conf),
('User config', user_conf),
('Custom config', custom_conf),
('Command-line args', command_line_conf)):
write_string('[debug] %s: %s\n' % (conf_label, repr(_hide_login_info(conf))))
return parser, opts, args
|
googleapis/googleapis-gen | refs/heads/master | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/types/ad_group_ad_label_service.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.resources.types import ad_group_ad_label
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.services',
marshal='google.ads.googleads.v8',
manifest={
'GetAdGroupAdLabelRequest',
'MutateAdGroupAdLabelsRequest',
'AdGroupAdLabelOperation',
'MutateAdGroupAdLabelsResponse',
'MutateAdGroupAdLabelResult',
},
)
class GetAdGroupAdLabelRequest(proto.Message):
r"""Request message for
[AdGroupAdLabelService.GetAdGroupAdLabel][google.ads.googleads.v8.services.AdGroupAdLabelService.GetAdGroupAdLabel].
Attributes:
resource_name (str):
Required. The resource name of the ad group
ad label to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateAdGroupAdLabelsRequest(proto.Message):
r"""Request message for
[AdGroupAdLabelService.MutateAdGroupAdLabels][google.ads.googleads.v8.services.AdGroupAdLabelService.MutateAdGroupAdLabels].
Attributes:
customer_id (str):
Required. ID of the customer whose ad group
ad labels are being modified.
operations (Sequence[google.ads.googleads.v8.services.types.AdGroupAdLabelOperation]):
Required. The list of operations to perform
on ad group ad labels.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='AdGroupAdLabelOperation',
)
partial_failure = proto.Field(
proto.BOOL,
number=3,
)
validate_only = proto.Field(
proto.BOOL,
number=4,
)
class AdGroupAdLabelOperation(proto.Message):
r"""A single operation (create, remove) on an ad group ad label.
Attributes:
create (google.ads.googleads.v8.resources.types.AdGroupAdLabel):
Create operation: No resource name is
expected for the new ad group ad label.
remove (str):
Remove operation: A resource name for the ad group ad label
being removed, in this format:
``customers/{customer_id}/adGroupAdLabels/{ad_group_id}~{ad_id} _{label_id}``
"""
create = proto.Field(
proto.MESSAGE,
number=1,
oneof='operation',
message=ad_group_ad_label.AdGroupAdLabel,
)
remove = proto.Field(
proto.STRING,
number=2,
oneof='operation',
)
class MutateAdGroupAdLabelsResponse(proto.Message):
r"""Response message for an ad group ad labels mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v8.services.types.MutateAdGroupAdLabelResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MutateAdGroupAdLabelResult',
)
class MutateAdGroupAdLabelResult(proto.Message):
r"""The result for an ad group ad label mutate.
Attributes:
resource_name (str):
Returned for successful operations.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
ProjectSWGCore/NGECore2 | refs/heads/master | scripts/mobiles/corellia/corsec_deserter.py | 2 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('corsec_deserter')
mobileTemplate.setLevel(33)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("corsec rogue")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_dressed_corsec_captain_human_female_01.iff')
templates.add('object/mobile/shared_dressed_corsec_captain_human_male_01.iff')
templates.add('object/mobile/shared_dressed_corsec_captain_human_male_02.iff')
templates.add('object/mobile/shared_dressed_corsec_captain_human_male_03.iff')
templates.add('object/mobile/shared_dressed_corsec_officer_human_female_01.iff')
templates.add('object/mobile/shared_dressed_corsec_officer_human_male_01.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_cdef.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
lootPoolNames_1 = ['Junk']
lootPoolChances_1 = [100]
lootGroupChance_1 = 100
mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1)
core.spawnService.addMobileTemplate('corsec_deserter', mobileTemplate)
return |
schrodinstein/my_fisrt_blog | refs/heads/master | blog/__init__.py | 12133432 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.