repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
pbrazdil/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/handlers/gc.py | 146 | 2038 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp
from model.queuestatus import QueueStatus
class GC(webapp.RequestHandler):
def get(self):
statuses = QueueStatus.all().order("-date")
seen_queues = set()
for status in statuses:
if status.active_patch_id or status.active_bug_id:
continue
if status.queue_name in seen_queues:
status.delete()
seen_queues.add(status.queue_name)
self.response.out.write("Done!")
| bsd-3-clause | 1,754,592,146,930,344,200 | 45.318182 | 72 | 0.740432 | false |
benlangmuir/swift | utils/type-layout-fuzzer.py | 32 | 4228 | #!/usr/bin/env python
# This script outputs a Swift source with randomly-generated type definitions,
# which can be used for ABI or layout algorithm fuzzing.
# TODO: generate types with generics, existentials, compositions
from __future__ import print_function
import random
import sys
maxDepth = 5
maxMembers = 5
typesDefined = []
classesDefined = []
nextToDefine = 0
objcInterop = False
if len(sys.argv) >= 2:
if sys.argv[1] == "--objc":
objcInterop = True
if sys.argv[1] == "--help":
print("Usage: " + sys.argv[0] + " [--objc]", file=sys.stderr)
print("", file=sys.stderr)
print(" --objc Include ObjC-interop types", file=sys.stderr)
sys.exit(2)
random.seed()
if objcInterop:
print("import Foundation")
print()
def randomTypeList(depth):
count = random.randint(0, maxMembers)
result = "("
for i in xrange(count):
if i > 0:
result += ", "
result += randomTypeReference(depth + 1)
result += ")"
return result
def randomTypeReference(depth):
def nominal():
global typesDefined
allowNew = depth < maxDepth
bound = len(classesDefined) if allowNew else len(classesDefined) - 1
which = random.randint(0, bound)
if which < len(classesDefined):
return classesDefined[which]
newName = "T" + str(len(typesDefined))
def defineRandomRelatedType(name):
defineRandomNominalType(name, depth)
typesDefined.append((newName, defineRandomRelatedType))
return newName
def tuple():
return randomTypeList(depth + 1)
def metatype():
return "(" + randomTypeReference(depth + 1) + ").Type"
def leaf():
leaves = ["Int", "String", "Int8", "Int16", "Int32", "Int64",
"(() -> ())", "(@convention(c) () -> ())", "AnyObject"]
if objcInterop:
leaves += ["NSObject", "(@convention(block) () -> ())"]
return random.choice(leaves)
if depth < maxDepth:
kinds = [nominal, tuple, metatype, leaf, leaf, leaf, leaf, leaf]
else:
kinds = [leaf]
return random.choice(kinds)()
def defineRandomFields(depth, basename):
numMembers = random.randint(0, maxMembers)
for i in xrange(numMembers):
print(" var " + basename + str(i) + ": " +
randomTypeReference(depth + 1))
def defineRandomClass(name, depth):
global classesDefined
classesDefined.append(name)
print("class " + name, end="")
def inheritNSObject():
print(": NSObject", end="")
def inheritsOtherClass():
print(": ", end="")
name = "T" + str(len(typesDefined))
def defineRandomBaseClass(name):
defineRandomClass(name, depth)
typesDefined.append((name, defineRandomBaseClass))
print(name, end="")
def inheritsNothing():
pass
inheritances = [inheritsNothing]
if depth == 0:
# The contents of classes are interesting only for top-level type
inheritances += [inheritsOtherClass]
if objcInterop:
inheritances += [inheritNSObject]
random.choice(inheritances)()
print(" {")
# Prevent errors about lack of initializers
print(" init(" + name + ": ()) { fatalError() }")
# The contents of classes are interesting only for top-level type
if depth == 0:
defineRandomFields(depth, "x" + name)
print("}")
print()
def defineRandomNominalType(name, depth=0):
def struct():
print("struct " + name + " {")
defineRandomFields(depth, "x")
print("}")
print()
def clas():
defineRandomClass(name, depth)
def enum():
# TODO: indirect cases
print("enum " + name + " {")
numCases = random.randint(0, maxMembers)
for i in xrange(numCases):
print(" case x" + str(i) + randomTypeList(depth + 1))
print("}")
print()
kinds = [struct, clas, enum]
return random.choice(kinds)()
typesDefined.append(("Generated", defineRandomNominalType))
while nextToDefine < len(typesDefined):
name, definer = typesDefined[nextToDefine]
definer(name)
nextToDefine += 1
| apache-2.0 | -6,604,167,780,531,561,000 | 25.591195 | 78 | 0.598628 | false |
Bismarrck/tensorflow | tensorflow/python/ops/linalg/linear_operator.py | 3 | 33838 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for linear operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import numpy as np
import six
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperator"]
# TODO(langmore) Use matrix_solve_ls for singular or non-square matrices.
@tf_export("linalg.LinearOperator")
@six.add_metaclass(abc.ABCMeta)
class LinearOperator(object):
"""Base class defining a [batch of] linear operator[s].
Subclasses of `LinearOperator` provide access to common methods on a
(batch) matrix, without the need to materialize the matrix. This allows:
* Matrix free computations
* Operators that take advantage of special structure, while providing a
consistent API to users.
#### Subclassing
To enable a public method, subclasses should implement the leading-underscore
version of the method. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable
`matmul(x, adjoint=False, name="matmul")` a subclass should implement
`_matmul(x, adjoint=False)`.
#### Performance contract
Subclasses should only implement the assert methods
(e.g. `assert_non_singular`) if they can be done in less than `O(N^3)`
time.
Class docstrings should contain an explanation of computational complexity.
Since this is a high-performance library, attention should be paid to detail,
and explanations can include constants as well as Big-O notation.
#### Shape compatibility
`LinearOperator` subclasses should operate on a [batch] matrix with
compatible shape. Class docstrings should define what is meant by compatible
shape. Some subclasses may not support batching.
Examples:
`x` is a batch matrix with compatible shape for `matmul` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
x.shape = [B1,...,Bb] + [N, R]
```
`rhs` is a batch matrix with compatible shape for `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
rhs.shape = [B1,...,Bb] + [M, R]
```
#### Example docstring for subclasses.
This operator acts like a (batch) matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `m x n` matrix. Again, this matrix `A` may not be materialized, but for
purposes of identifying and working with compatible arguments the shape is
relevant.
Examples:
```python
some_tensor = ... shape = ????
operator = MyLinOp(some_tensor)
operator.shape()
==> [2, 4, 4]
operator.log_abs_determinant()
==> Shape [2] Tensor
x = ... Shape [2, 4, 5] Tensor
operator.matmul(x)
==> Shape [2, 4, 5] Tensor
```
#### Shape compatibility
This operator acts on batch matrices with compatible shape.
FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE
#### Performance
FILL THIS IN
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
dtype,
graph_parents=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize the `LinearOperator`.
**This is a private method for subclass use.**
**Subclasses should copy-paste this `__init__` documentation.**
Args:
dtype: The type of the this `LinearOperator`. Arguments to `matmul` and
`solve` will have to be this type.
graph_parents: Python list of graph prerequisites of this `LinearOperator`
Typically tensors that are passed during initialization.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `dtype` is real, this is equivalent to being symmetric.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If any member of graph_parents is `None` or not a `Tensor`.
ValueError: If hints are set incorrectly.
"""
# Check and auto-set flags.
if is_positive_definite:
if is_non_singular is False:
raise ValueError("A positive definite matrix is always non-singular.")
is_non_singular = True
if is_non_singular:
if is_square is False:
raise ValueError("A non-singular matrix is always square.")
is_square = True
if is_self_adjoint:
if is_square is False:
raise ValueError("A self-adjoint matrix is always square.")
is_square = True
self._is_square_set_or_implied_by_hints = is_square
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._graph_parents = graph_parents
self._is_non_singular = is_non_singular
self._is_self_adjoint = is_self_adjoint
self._is_positive_definite = is_positive_definite
self._name = name or type(self).__name__
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(
name, values=((values or []) + self._graph_parents)) as scope:
yield scope
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `LinearOperator`."""
return self._dtype
@property
def name(self):
"""Name prepended to all ops created by this `LinearOperator`."""
return self._name
@property
def graph_parents(self):
"""List of graph dependencies of this `LinearOperator`."""
return self._graph_parents
@property
def is_non_singular(self):
return self._is_non_singular
@property
def is_self_adjoint(self):
return self._is_self_adjoint
@property
def is_positive_definite(self):
return self._is_positive_definite
@property
def is_square(self):
"""Return `True/False` depending on if this operator is square."""
# Static checks done after __init__. Why? Because domain/range dimension
# sometimes requires lots of work done in the derived class after init.
auto_square_check = self.domain_dimension == self.range_dimension
if self._is_square_set_or_implied_by_hints is False and auto_square_check:
raise ValueError(
"User set is_square hint to False, but the operator was square.")
if self._is_square_set_or_implied_by_hints is None:
return auto_square_check
return self._is_square_set_or_implied_by_hints
@abc.abstractmethod
def _shape(self):
# Write this in derived class to enable all static shape methods.
raise NotImplementedError("_shape is not implemented.")
@property
def shape(self):
"""`TensorShape` of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb, M, N])`, equivalent to `A.get_shape()`.
Returns:
`TensorShape`, statically determined, may be undefined.
"""
return self._shape()
@abc.abstractmethod
def _shape_tensor(self):
raise NotImplementedError("_shape_tensor is not implemented.")
def shape_tensor(self, name="shape_tensor"):
"""Shape of this `LinearOperator`, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.shape.is_fully_defined():
return linear_operator_util.shape_tensor(self.shape.as_list())
else:
return self._shape_tensor()
@property
def batch_shape(self):
"""`TensorShape` of batch dimensions of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb])`, equivalent to `A.get_shape()[:-2]`
Returns:
`TensorShape`, statically determined, may be undefined.
"""
# Derived classes get this "for free" once .shape is implemented.
return self.shape[:-2]
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of batch dimensions of this operator, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb]`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.batch_shape.is_fully_defined():
return linear_operator_util.shape_tensor(
self.batch_shape.as_list(), name="batch_shape")
else:
return self.shape_tensor()[:-2]
@property
def tensor_rank(self, name="tensor_rank"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op`.
Returns:
Python integer, or None if the tensor rank is undefined.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
return self.shape.ndims
def tensor_rank_tensor(self, name="tensor_rank_tensor"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`, determined at runtime.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.tensor_rank is not None:
return ops.convert_to_tensor(self.tensor_rank)
else:
return array_ops.size(self.shape_tensor())
@property
def domain_dimension(self):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
if self.shape.rank is None:
return tensor_shape.Dimension(None)
else:
return self.shape.dims[-1]
def domain_dimension_tensor(self, name="domain_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
dim_value = tensor_shape.dimension_value(self.domain_dimension)
if dim_value is not None:
return ops.convert_to_tensor(dim_value)
else:
return self.shape_tensor()[-1]
@property
def range_dimension(self):
"""Dimension (in the sense of vector spaces) of the range of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
if self.shape.dims:
return self.shape.dims[-2]
else:
return tensor_shape.Dimension(None)
def range_dimension_tensor(self, name="range_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the range of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
dim_value = tensor_shape.dimension_value(self.range_dimension)
if dim_value is not None:
return ops.convert_to_tensor(dim_value)
else:
return self.shape_tensor()[-2]
def _assert_non_singular(self):
"""Private default implementation of _assert_non_singular."""
logging.warn(
"Using (possibly slow) default implementation of assert_non_singular."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return self.assert_positive_definite()
else:
singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)
# TODO(langmore) Add .eig and .cond as methods.
cond = (math_ops.reduce_max(singular_values, axis=-1) /
math_ops.reduce_min(singular_values, axis=-1))
return check_ops.assert_less(
cond,
self._max_condition_number_to_be_non_singular(),
message="Singular matrix up to precision epsilon.")
def _max_condition_number_to_be_non_singular(self):
"""Return the maximum condition number that we consider nonsingular."""
with ops.name_scope("max_nonsingular_condition_number"):
dtype_eps = np.finfo(self.dtype.as_numpy_dtype).eps
eps = math_ops.cast(
math_ops.reduce_max([
100.,
math_ops.cast(self.range_dimension_tensor(), self.dtype),
math_ops.cast(self.domain_dimension_tensor(), self.dtype)
]), self.dtype) * dtype_eps
return 1. / eps
def assert_non_singular(self, name="assert_non_singular"):
"""Returns an `Op` that asserts this operator is non singular.
This operator is considered non-singular if
```
ConditionNumber < max{100, range_dimension, domain_dimension} * eps,
eps := np.finfo(self.dtype.as_numpy_dtype).eps
```
Args:
name: A string name to prepend to created ops.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is singular.
"""
with self._name_scope(name):
return self._assert_non_singular()
def _assert_positive_definite(self):
"""Default implementation of _assert_positive_definite."""
logging.warn(
"Using (possibly slow) default implementation of "
"assert_positive_definite."
" Requires conversion to a dense matrix and O(N^3) operations.")
# If the operator is self-adjoint, then checking that
# Cholesky decomposition succeeds + results in positive diag is necessary
# and sufficient.
if self.is_self_adjoint:
return check_ops.assert_positive(
array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())),
message="Matrix was not positive definite.")
# We have no generic check for positive definite.
raise NotImplementedError("assert_positive_definite is not implemented.")
def assert_positive_definite(self, name="assert_positive_definite"):
"""Returns an `Op` that asserts this operator is positive definite.
Here, positive definite means that the quadratic form `x^H A x` has positive
real part for all nonzero `x`. Note that we do not require the operator to
be self-adjoint to be positive definite.
Args:
name: A name to give this `Op`.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is not positive definite.
"""
with self._name_scope(name):
return self._assert_positive_definite()
def _assert_self_adjoint(self):
dense = self.to_dense()
logging.warn(
"Using (possibly slow) default implementation of assert_self_adjoint."
" Requires conversion to a dense matrix.")
return check_ops.assert_equal(
dense,
linalg.adjoint(dense),
message="Matrix was not equal to its adjoint.")
def assert_self_adjoint(self, name="assert_self_adjoint"):
"""Returns an `Op` that asserts this operator is self-adjoint.
Here we check that this operator is *exactly* equal to its hermitian
transpose.
Args:
name: A string name to prepend to created ops.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is not self-adjoint.
"""
with self._name_scope(name):
return self._assert_self_adjoint()
def _check_input_dtype(self, arg):
"""Check that arg.dtype == self.dtype."""
if arg.dtype != self.dtype:
raise TypeError(
"Expected argument to have dtype %s. Found: %s in tensor %s" %
(self.dtype, arg.dtype, arg))
@abc.abstractmethod
def _matmul(self, x, adjoint=False, adjoint_arg=False):
raise NotImplementedError("_matmul is not implemented.")
def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"):
"""Transform [batch] matrix `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
X = ... # shape [..., N, R], batch matrix, R > 0.
Y = operator.matmul(X)
Y.shape
==> [..., M, R]
Y[..., :, r] = sum_j A[..., :, j] X[j, r]
```
Args:
x: `LinearOperator` or `Tensor` with compatible shape and same `dtype` as
`self`. See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is
the hermitian transpose (transposition and complex conjugation).
name: A name for this `Op`.
Returns:
A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`
as `self`.
"""
if isinstance(x, LinearOperator):
if adjoint or adjoint_arg:
raise ValueError(".matmul not supported with adjoints.")
if (x.range_dimension is not None and
self.domain_dimension is not None and
x.range_dimension != self.domain_dimension):
raise ValueError(
"Operators are incompatible. Expected `x` to have dimension"
" {} but got {}.".format(self.domain_dimension, x.range_dimension))
with self._name_scope(name):
return linear_operator_algebra.matmul(self, x)
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
arg_dim = -1 if adjoint_arg else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
x.get_shape()[arg_dim])
return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _matvec(self, x, adjoint=False):
x_mat = array_ops.expand_dims(x, axis=-1)
y_mat = self.matmul(x_mat, adjoint=adjoint)
return array_ops.squeeze(y_mat, axis=-1)
def matvec(self, x, adjoint=False, name="matvec"):
"""Transform [batch] vector `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matric A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
X = ... # shape [..., N], batch vector
Y = operator.matvec(X)
Y.shape
==> [..., M]
Y[..., :] = sum_j A[..., :, j] X[..., j]
```
Args:
x: `Tensor` with compatible shape and same `dtype` as `self`.
`x` is treated as a [batch] vector meaning for every set of leading
dimensions, the last dimension defines a vector.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
name: A name for this `Op`.
Returns:
A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(x.get_shape()[-1])
return self._matvec(x, adjoint=adjoint)
def _determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return math_ops.exp(self.log_abs_determinant())
return linalg_ops.matrix_determinant(self.to_dense())
def determinant(self, name="det"):
"""Determinant for every batch member.
Args:
name: A name for this `Op`.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._determinant()
def _log_abs_determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
diag = array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense()))
return 2 * math_ops.reduce_sum(math_ops.log(diag), axis=[-1])
_, log_abs_det = linalg.slogdet(self.to_dense())
return log_abs_det
def log_abs_determinant(self, name="log_abs_det"):
"""Log absolute value of determinant for every batch member.
Args:
name: A name for this `Op`.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._log_abs_determinant()
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
"""Default implementation of _solve."""
if self.is_square is False:
raise NotImplementedError(
"Solve is not yet implemented for non-square operators.")
logging.warn(
"Using (possibly slow) default implementation of solve."
" Requires conversion to a dense matrix and O(N^3) operations.")
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
if self._can_use_cholesky():
return linear_operator_util.cholesky_solve_with_broadcast(
linalg_ops.cholesky(self.to_dense()), rhs)
return linear_operator_util.matrix_solve_with_broadcast(
self.to_dense(), rhs, adjoint=adjoint)
def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
"""Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve R > 0 linear systems for every member of the batch.
RHS = ... # shape [..., M, R]
X = operator.solve(RHS)
# X[..., :, r] is the solution to the r'th linear system
# sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]
operator.matmul(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator and compatible shape.
`rhs` is treated like a [batch] matrix meaning for every set of leading
dimensions, the last two dimensions defines a matrix.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`
is the hermitian transpose (transposition and complex conjugation).
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
if self.is_non_singular is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"be singular.")
if self.is_square is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name, values=[rhs]):
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
self_dim = -1 if adjoint else -2
arg_dim = -1 if adjoint_arg else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
rhs.get_shape()[arg_dim])
return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _solvevec(self, rhs, adjoint=False):
"""Default implementation of _solvevec."""
rhs_mat = array_ops.expand_dims(rhs, axis=-1)
solution_mat = self.solve(rhs_mat, adjoint=adjoint)
return array_ops.squeeze(solution_mat, axis=-1)
def solvevec(self, rhs, adjoint=False, name="solve"):
"""Solve single equation with best effort: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve one linear system for every member of the batch.
RHS = ... # shape [..., M]
X = operator.solvevec(RHS)
# X is the solution to the linear system
# sum_j A[..., :, j] X[..., j] = RHS[..., :]
operator.matvec(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator.
`rhs` is treated like a [batch] vector meaning for every set of leading
dimensions, the last dimension defines a vector. See class docstring
for definition of compatibility regarding batch dimensions.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
with self._name_scope(name, values=[rhs]):
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
self_dim = -1 if adjoint else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
rhs.get_shape()[-1])
return self._solvevec(rhs, adjoint=adjoint)
def cholesky(self, name="cholesky"):
"""Returns a Cholesky factor as a `LinearOperator`.
Given `A` representing this `LinearOperator`, if `A` is positive definite
self-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky
decomposition.
Args:
name: A name for this `Op`.
Returns:
`LinearOperator` which represents the lower triangular matrix
in the Cholesky decomposition.
Raises:
ValueError: When the `LinearOperator` is not hinted to be positive
definite and self adjoint.
"""
if not self._can_use_cholesky():
raise ValueError("Cannot take the Cholesky decomposition: "
"Not a positive definite self adjoint matrix.")
with self._name_scope(name):
return linear_operator_algebra.cholesky(self)
def _to_dense(self):
"""Generic and often inefficient implementation. Override often."""
logging.warn("Using (possibly slow) default implementation of to_dense."
" Converts by self.matmul(identity).")
if self.batch_shape.is_fully_defined():
batch_shape = self.batch_shape
else:
batch_shape = self.batch_shape_tensor()
dim_value = tensor_shape.dimension_value(self.domain_dimension)
if dim_value is not None:
n = dim_value
else:
n = self.domain_dimension_tensor()
eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)
return self.matmul(eye)
def to_dense(self, name="to_dense"):
"""Return a dense (batch) matrix representing this operator."""
with self._name_scope(name):
return self._to_dense()
def _diag_part(self):
"""Generic and often inefficient implementation. Override often."""
return array_ops.matrix_diag_part(self.to_dense())
def diag_part(self, name="diag_part"):
"""Efficiently get the [batch] diagonal part of this operator.
If this operator has shape `[B1,...,Bb, M, N]`, this returns a
`Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where
`diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`.
```
my_operator = LinearOperatorDiag([1., 2.])
# Efficiently get the diagonal
my_operator.diag_part()
==> [1., 2.]
# Equivalent, but inefficient method
tf.matrix_diag_part(my_operator.to_dense())
==> [1., 2.]
```
Args:
name: A name for this `Op`.
Returns:
diag_part: A `Tensor` of same `dtype` as self.
"""
with self._name_scope(name):
return self._diag_part()
def _trace(self):
return math_ops.reduce_sum(self.diag_part(), axis=-1)
def trace(self, name="trace"):
"""Trace of the linear operator, equal to sum of `self.diag_part()`.
If the operator is square, this is also the sum of the eigenvalues.
Args:
name: A name for this `Op`.
Returns:
Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.
"""
with self._name_scope(name):
return self._trace()
def _add_to_tensor(self, x):
# Override if a more efficient implementation is available.
return self.to_dense() + x
def add_to_tensor(self, x, name="add_to_tensor"):
"""Add matrix represented by this operator to `x`. Equivalent to `A + x`.
Args:
x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
return self._add_to_tensor(x)
def _can_use_cholesky(self):
return self.is_self_adjoint and self.is_positive_definite
| apache-2.0 | -4,438,973,360,618,543,600 | 33.992761 | 99 | 0.646788 | false |
thesuperzapper/tensorflow | tensorflow/python/kernel_tests/attention_ops_test.py | 85 | 7316 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image.extract_glimpse()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
class ExtractGlimpseTest(test.TestCase):
def _VerifyValues(self, tensor_in_sizes, glimpse_sizes, offsets,
expected_rows, expected_cols):
"""Verifies the output values of the glimpse extraction kernel.
Args:
tensor_in_sizes: Input tensor dimensions in [input_rows, input_cols].
glimpse_sizes: Dimensions of the glimpse in [glimpse_rows, glimpse_cols].
offsets: Relative location of the center of the glimpse in the input
image expressed as [row_offset, col_offset].
expected_rows: A list containing the expected row numbers (None for
out of bound entries that are expected to be replaced by uniform
random entries in [0,1) ).
expected_cols: Same as expected_rows, but for column numbers.
"""
rows = tensor_in_sizes[0]
cols = tensor_in_sizes[1]
# Row Tensor with entries by row.
# [[ 1 1 1 ... ]
# [ 2 2 2 ... ]
# [ 3 3 3 ... ]
# [ ...
# ]
t_rows = array_ops.tile(
[[1.0 * r] for r in range(1, rows + 1)], [1, cols], name='tile_rows')
# Shuffle to switch to a convention of (batch_size, height, width, depth).
t_rows_4d = array_ops.transpose(
array_ops.expand_dims(array_ops.expand_dims(t_rows, 0), 3),
[0, 2, 1, 3])
# Column Tensor with entries by column.
# [[ 1 2 3 4 ... ]
# [ 1 2 3 4 ... ]
# [ 1 2 3 4 ... ]
# [ ... ]
# ]
t_cols = array_ops.tile(
[[1.0 * r for r in range(1, cols + 1)]], [rows, 1], name='tile_cols')
# Shuffle to switch to a convention of (batch_size, height, width, depth).
t_cols_4d = array_ops.transpose(
array_ops.expand_dims(array_ops.expand_dims(t_cols, 0), 3),
[0, 2, 1, 3])
# extract_glimpses from Row and Column Tensor, respectively.
# Switch order for glimpse_sizes and offsets to switch from (row, col)
# convention to tensorflows (height, width) convention.
t1 = constant_op.constant([glimpse_sizes[1], glimpse_sizes[0]], shape=[2])
t2 = constant_op.constant([offsets[1], offsets[0]], shape=[1, 2])
glimpse_rows = (array_ops.transpose(
image_ops.extract_glimpse(t_rows_4d, t1, t2), [0, 2, 1, 3]))
glimpse_cols = (array_ops.transpose(
image_ops.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3]))
# Evaluate the TensorFlow Graph.
with self.test_session() as sess:
value_rows, value_cols = sess.run([glimpse_rows, glimpse_cols])
# Check dimensions of returned glimpse.
self.assertEqual(value_rows.shape[1], glimpse_sizes[0])
self.assertEqual(value_rows.shape[2], glimpse_sizes[1])
self.assertEqual(value_cols.shape[1], glimpse_sizes[0])
self.assertEqual(value_cols.shape[2], glimpse_sizes[1])
# Check entries.
min_random_val = 0
max_random_val = max(rows, cols)
for i in range(glimpse_sizes[0]):
for j in range(glimpse_sizes[1]):
if expected_rows[i] is None or expected_cols[j] is None:
self.assertGreaterEqual(value_rows[0][i][j][0], min_random_val)
self.assertLessEqual(value_rows[0][i][j][0], max_random_val)
self.assertGreaterEqual(value_cols[0][i][j][0], min_random_val)
self.assertLessEqual(value_cols[0][i][j][0], max_random_val)
else:
self.assertEqual(value_rows[0][i][j][0], expected_rows[i])
self.assertEqual(value_cols[0][i][j][0], expected_cols[j])
def testCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[3, 5],
offsets=[0.0, 0.0],
expected_rows=[20, 21, 22],
expected_cols=[29, 30, 31, 32, 33])
def testEmptyTensor(self):
empty_image = np.zeros((0, 4, 3, 0))
offsets = np.zeros((0, 2))
with self.test_session():
result = image_ops.extract_glimpse(empty_image, [1, 1], offsets)
self.assertAllEqual(
np.zeros(
(0, 1, 1, 0), dtype=np.float32), result.eval())
def testLargeCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[41, 61],
offsets=[0.0, 0.0],
expected_rows=list(range(1, 42)),
expected_cols=list(range(1, 62)))
def testTooLargeCenterGlimpse(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[43, 63],
offsets=[0.0, 0.0],
expected_rows=[None] + list(range(1, 42)) + [None],
expected_cols=[None] + list(range(1, 62)) + [None])
def testGlimpseFullOverlap(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[3, 5],
offsets=[0.1, 0.3],
expected_rows=[22, 23, 24],
expected_cols=[38, 39, 40, 41, 42])
def testGlimpseFullOverlap2(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 3],
offsets=[-0.7, -0.7],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[8, 9, 10])
def testGlimpseBeforeLeftMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 5],
offsets=[-0.7, -0.9],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[1, 2, 3, 4, 5])
def testGlimpseLowerRightCorner(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[7, 5],
offsets=[1.0, 1.0],
expected_rows=[38, 39, 40, 41, None, None, None],
expected_cols=[59, 60, 61, None, None])
def testGlimpseNoOverlap(self):
self._VerifyValues(
tensor_in_sizes=[20, 30],
glimpse_sizes=[3, 3],
offsets=[-2.0, 2.0],
expected_rows=[None, None, None],
expected_cols=[None, None, None])
def testGlimpseOnLeftMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[11, 7],
offsets=[-0.7, -1.0],
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
expected_cols=[None, None, None, 1, 2, 3, 4])
def testGlimpseUpperMargin(self):
self._VerifyValues(
tensor_in_sizes=[41, 61],
glimpse_sizes=[7, 5],
offsets=[-1, 0.9],
expected_rows=[None, None, None, 1, 2, 3, 4],
expected_cols=[56, 57, 58, 59, 60])
if __name__ == '__main__':
test.main()
| apache-2.0 | -4,091,728,654,811,364,000 | 35.39801 | 80 | 0.602242 | false |
staslev/incubator-beam | sdks/python/apache_beam/runners/dataflow/internal/clients/dataflow/dataflow_v1b3_messages.py | 5 | 195799 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generated message classes for dataflow version v1b3.
Develops and executes data processing patterns like ETL, batch computation,
and continuous computation.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'dataflow'
class ApproximateProgress(_messages.Message):
"""Obsolete in favor of ApproximateReportedProgress and
ApproximateSplitRequest.
Fields:
percentComplete: Obsolete.
position: Obsolete.
remainingTime: Obsolete.
"""
percentComplete = _messages.FloatField(1, variant=_messages.Variant.FLOAT)
position = _messages.MessageField('Position', 2)
remainingTime = _messages.StringField(3)
class ApproximateReportedProgress(_messages.Message):
"""A progress measurement of a WorkItem by a worker.
Fields:
consumedParallelism: Total amount of parallelism in the portion of input
of this task that has already been consumed and is no longer active. In
the first two examples above (see remaining_parallelism), the value
should be 29 or 2 respectively. The sum of remaining_parallelism and
consumed_parallelism should equal the total amount of parallelism in
this work item. If specified, must be finite.
fractionConsumed: Completion as fraction of the input consumed, from 0.0
(beginning, nothing consumed), to 1.0 (end of the input, entire input
consumed).
position: A Position within the work to represent a progress.
remainingParallelism: Total amount of parallelism in the input of this
task that remains, (i.e. can be delegated to this task and any new tasks
via dynamic splitting). Always at least 1 for non-finished work items
and 0 for finished. "Amount of parallelism" refers to how many non-
empty parts of the input can be read in parallel. This does not
necessarily equal number of records. An input that can be read in
parallel down to the individual records is called "perfectly
splittable". An example of non-perfectly parallelizable input is a
block-compressed file format where a block of records has to be read as
a whole, but different blocks can be read in parallel. Examples: * If
we are processing record #30 (starting at 1) out of 50 in a perfectly
splittable 50-record input, this value should be 21 (20 remaining + 1
current). * If we are reading through block 3 in a block-compressed file
consisting of 5 blocks, this value should be 3 (since blocks 4 and 5
can be processed in parallel by new tasks via dynamic splitting and
the current task remains processing block 3). * If we are reading
through the last block in a block-compressed file, or reading or
processing the last record in a perfectly splittable input, this value
should be 1, because apart from the current task, no additional
remainder can be split off.
"""
consumedParallelism = _messages.MessageField('ReportedParallelism', 1)
fractionConsumed = _messages.FloatField(2)
position = _messages.MessageField('Position', 3)
remainingParallelism = _messages.MessageField('ReportedParallelism', 4)
class ApproximateSplitRequest(_messages.Message):
"""A suggestion by the service to the worker to dynamically split the
WorkItem.
Fields:
fractionConsumed: A fraction at which to split the work item, from 0.0
(beginning of the input) to 1.0 (end of the input).
position: A Position at which to split the work item.
"""
fractionConsumed = _messages.FloatField(1)
position = _messages.MessageField('Position', 2)
class AutoscalingEvent(_messages.Message):
"""A structured message reporting an autoscaling decision made by the
Dataflow service.
Enums:
EventTypeValueValuesEnum: The type of autoscaling event to report.
Fields:
currentNumWorkers: The current number of workers the job has.
description: A message describing why the system decided to adjust the
current number of workers, why it failed, or why the system decided to
not make any changes to the number of workers.
eventType: The type of autoscaling event to report.
targetNumWorkers: The target number of workers the worker pool wants to
resize to use.
time: The time this event was emitted to indicate a new target or current
num_workers value.
"""
class EventTypeValueValuesEnum(_messages.Enum):
"""The type of autoscaling event to report.
Values:
TYPE_UNKNOWN: Default type for the enum. Value should never be
returned.
TARGET_NUM_WORKERS_CHANGED: The TARGET_NUM_WORKERS_CHANGED type should
be used when the target worker pool size has changed at the start of
an actuation. An event should always be specified as
TARGET_NUM_WORKERS_CHANGED if it reflects a change in the
target_num_workers.
CURRENT_NUM_WORKERS_CHANGED: The CURRENT_NUM_WORKERS_CHANGED type should
be used when actual worker pool size has been changed, but the
target_num_workers has not changed.
ACTUATION_FAILURE: The ACTUATION_FAILURE type should be used when we
want to report an error to the user indicating why the current number
of workers in the pool could not be changed. Displayed in the current
status and history widgets.
NO_CHANGE: Used when we want to report to the user a reason why we are
not currently adjusting the number of workers. Should specify both
target_num_workers, current_num_workers and a decision_message.
"""
TYPE_UNKNOWN = 0
TARGET_NUM_WORKERS_CHANGED = 1
CURRENT_NUM_WORKERS_CHANGED = 2
ACTUATION_FAILURE = 3
NO_CHANGE = 4
currentNumWorkers = _messages.IntegerField(1)
description = _messages.MessageField('StructuredMessage', 2)
eventType = _messages.EnumField('EventTypeValueValuesEnum', 3)
targetNumWorkers = _messages.IntegerField(4)
time = _messages.StringField(5)
class AutoscalingSettings(_messages.Message):
"""Settings for WorkerPool autoscaling.
Enums:
AlgorithmValueValuesEnum: The algorithm to use for autoscaling.
Fields:
algorithm: The algorithm to use for autoscaling.
maxNumWorkers: The maximum number of workers to cap scaling at.
"""
class AlgorithmValueValuesEnum(_messages.Enum):
"""The algorithm to use for autoscaling.
Values:
AUTOSCALING_ALGORITHM_UNKNOWN: The algorithm is unknown, or unspecified.
AUTOSCALING_ALGORITHM_NONE: Disable autoscaling.
AUTOSCALING_ALGORITHM_BASIC: Increase worker count over time to reduce
job execution time.
"""
AUTOSCALING_ALGORITHM_UNKNOWN = 0
AUTOSCALING_ALGORITHM_NONE = 1
AUTOSCALING_ALGORITHM_BASIC = 2
algorithm = _messages.EnumField('AlgorithmValueValuesEnum', 1)
maxNumWorkers = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class CPUTime(_messages.Message):
"""Modeled after information exposed by /proc/stat.
Fields:
rate: Average CPU utilization rate (% non-idle cpu / second) since
previous sample.
timestamp: Timestamp of the measurement.
totalMs: Total active CPU time across all cores (ie., non-idle) in
milliseconds since start-up.
"""
rate = _messages.FloatField(1)
timestamp = _messages.StringField(2)
totalMs = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
class ComponentSource(_messages.Message):
"""Description of an interstitial value between transforms in an execution
stage.
Fields:
name: Dataflow service generated name for this source.
originalTransformOrCollection: User name for the original user transform
or collection with which this source is most closely associated.
userName: Human-readable name for this transform; may be user or system
generated.
"""
name = _messages.StringField(1)
originalTransformOrCollection = _messages.StringField(2)
userName = _messages.StringField(3)
class ComponentTransform(_messages.Message):
"""Description of a transform executed as part of an execution stage.
Fields:
name: Dataflow service generated name for this source.
originalTransform: User name for the original user transform with which
this transform is most closely associated.
userName: Human-readable name for this transform; may be user or system
generated.
"""
name = _messages.StringField(1)
originalTransform = _messages.StringField(2)
userName = _messages.StringField(3)
class ComputationTopology(_messages.Message):
"""All configuration data for a particular Computation.
Fields:
computationId: The ID of the computation.
inputs: The inputs to the computation.
keyRanges: The key ranges processed by the computation.
outputs: The outputs from the computation.
stateFamilies: The state family values.
systemStageName: The system stage name.
"""
computationId = _messages.StringField(1)
inputs = _messages.MessageField('StreamLocation', 2, repeated=True)
keyRanges = _messages.MessageField('KeyRangeLocation', 3, repeated=True)
outputs = _messages.MessageField('StreamLocation', 4, repeated=True)
stateFamilies = _messages.MessageField('StateFamilyConfig', 5, repeated=True)
systemStageName = _messages.StringField(6)
class ConcatPosition(_messages.Message):
"""A position that encapsulates an inner position and an index for the inner
position. A ConcatPosition can be used by a reader of a source that
encapsulates a set of other sources.
Fields:
index: Index of the inner source.
position: Position within the inner source.
"""
index = _messages.IntegerField(1, variant=_messages.Variant.INT32)
position = _messages.MessageField('Position', 2)
class CounterMetadata(_messages.Message):
"""CounterMetadata includes all static non-name non-value counter
attributes.
Enums:
KindValueValuesEnum: Counter aggregation kind.
StandardUnitsValueValuesEnum: System defined Units, see above enum.
Fields:
description: Human-readable description of the counter semantics.
kind: Counter aggregation kind.
otherUnits: A string referring to the unit type.
standardUnits: System defined Units, see above enum.
"""
class KindValueValuesEnum(_messages.Enum):
"""Counter aggregation kind.
Values:
INVALID: Counter aggregation kind was not set.
SUM: Aggregated value is the sum of all contributed values.
MAX: Aggregated value is the max of all contributed values.
MIN: Aggregated value is the min of all contributed values.
MEAN: Aggregated value is the mean of all contributed values.
OR: Aggregated value represents the logical 'or' of all contributed
values.
AND: Aggregated value represents the logical 'and' of all contributed
values.
SET: Aggregated value is a set of unique contributed values.
DISTRIBUTION: Aggregated value captures statistics about a distribution.
"""
INVALID = 0
SUM = 1
MAX = 2
MIN = 3
MEAN = 4
OR = 5
AND = 6
SET = 7
DISTRIBUTION = 8
class StandardUnitsValueValuesEnum(_messages.Enum):
"""System defined Units, see above enum.
Values:
BYTES: Counter returns a value in bytes.
BYTES_PER_SEC: Counter returns a value in bytes per second.
MILLISECONDS: Counter returns a value in milliseconds.
MICROSECONDS: Counter returns a value in microseconds.
NANOSECONDS: Counter returns a value in nanoseconds.
TIMESTAMP_MSEC: Counter returns a timestamp in milliseconds.
TIMESTAMP_USEC: Counter returns a timestamp in microseconds.
TIMESTAMP_NSEC: Counter returns a timestamp in nanoseconds.
"""
BYTES = 0
BYTES_PER_SEC = 1
MILLISECONDS = 2
MICROSECONDS = 3
NANOSECONDS = 4
TIMESTAMP_MSEC = 5
TIMESTAMP_USEC = 6
TIMESTAMP_NSEC = 7
description = _messages.StringField(1)
kind = _messages.EnumField('KindValueValuesEnum', 2)
otherUnits = _messages.StringField(3)
standardUnits = _messages.EnumField('StandardUnitsValueValuesEnum', 4)
class CounterStructuredName(_messages.Message):
"""Identifies a counter within a per-job namespace. Counters whose
structured names are the same get merged into a single value for the job.
Enums:
OriginValueValuesEnum: One of the standard Origins defined above.
PortionValueValuesEnum: Portion of this counter, either key or value.
Fields:
componentStepName: Name of the optimized step being executed by the
workers.
executionStepName: Name of the stage. An execution step contains multiple
component steps.
name: Counter name. Not necessarily globally-unique, but unique within the
context of the other fields. Required.
origin: One of the standard Origins defined above.
originNamespace: A string containing a more specific namespace of the
counter's origin.
originalStepName: System generated name of the original step in the user's
graph, before optimization.
portion: Portion of this counter, either key or value.
workerId: ID of a particular worker.
"""
class OriginValueValuesEnum(_messages.Enum):
"""One of the standard Origins defined above.
Values:
SYSTEM: Counter was created by the Dataflow system.
USER: Counter was created by the user.
"""
SYSTEM = 0
USER = 1
class PortionValueValuesEnum(_messages.Enum):
"""Portion of this counter, either key or value.
Values:
ALL: Counter portion has not been set.
KEY: Counter reports a key.
VALUE: Counter reports a value.
"""
ALL = 0
KEY = 1
VALUE = 2
componentStepName = _messages.StringField(1)
executionStepName = _messages.StringField(2)
name = _messages.StringField(3)
origin = _messages.EnumField('OriginValueValuesEnum', 4)
originNamespace = _messages.StringField(5)
originalStepName = _messages.StringField(6)
portion = _messages.EnumField('PortionValueValuesEnum', 7)
workerId = _messages.StringField(8)
class CounterStructuredNameAndMetadata(_messages.Message):
"""A single message which encapsulates structured name and metadata for a
given counter.
Fields:
metadata: Metadata associated with a counter
name: Structured name of the counter.
"""
metadata = _messages.MessageField('CounterMetadata', 1)
name = _messages.MessageField('CounterStructuredName', 2)
class CounterUpdate(_messages.Message):
"""An update to a Counter sent from a worker.
Fields:
boolean: Boolean value for And, Or.
cumulative: True if this counter is reported as the total cumulative
aggregate value accumulated since the worker started working on this
WorkItem. By default this is false, indicating that this counter is
reported as a delta.
distribution: Distribution data
floatingPoint: Floating point value for Sum, Max, Min.
floatingPointList: List of floating point numbers, for Set.
floatingPointMean: Floating point mean aggregation value for Mean.
integer: Integer value for Sum, Max, Min.
integerList: List of integers, for Set.
integerMean: Integer mean aggregation value for Mean.
internal: Value for internally-defined counters used by the Dataflow
service.
nameAndKind: Counter name and aggregation type.
shortId: The service-generated short identifier for this counter. The
short_id -> (name, metadata) mapping is constant for the lifetime of a
job.
stringList: List of strings, for Set.
structuredNameAndMetadata: Counter structured name and metadata.
"""
boolean = _messages.BooleanField(1)
cumulative = _messages.BooleanField(2)
distribution = _messages.MessageField('DistributionUpdate', 3)
floatingPoint = _messages.FloatField(4)
floatingPointList = _messages.MessageField('FloatingPointList', 5)
floatingPointMean = _messages.MessageField('FloatingPointMean', 6)
integer = _messages.MessageField('SplitInt64', 7)
integerList = _messages.MessageField('IntegerList', 8)
integerMean = _messages.MessageField('IntegerMean', 9)
internal = _messages.MessageField('extra_types.JsonValue', 10)
nameAndKind = _messages.MessageField('NameAndKind', 11)
shortId = _messages.IntegerField(12)
stringList = _messages.MessageField('StringList', 13)
structuredNameAndMetadata = _messages.MessageField('CounterStructuredNameAndMetadata', 14)
class CreateJobFromTemplateRequest(_messages.Message):
"""A request to create a Cloud Dataflow job from a template.
Messages:
ParametersValue: The runtime parameters to pass to the job.
Fields:
environment: The runtime environment for the job.
gcsPath: Required. A Cloud Storage path to the template from which to
create the job. Must be a valid Cloud Storage URL, beginning with
`gs://`.
jobName: Required. The job name to use for the created job.
location: The location to which to direct the request.
parameters: The runtime parameters to pass to the job.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParametersValue(_messages.Message):
"""The runtime parameters to pass to the job.
Messages:
AdditionalProperty: An additional property for a ParametersValue object.
Fields:
additionalProperties: Additional properties of type ParametersValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ParametersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
environment = _messages.MessageField('RuntimeEnvironment', 1)
gcsPath = _messages.StringField(2)
jobName = _messages.StringField(3)
location = _messages.StringField(4)
parameters = _messages.MessageField('ParametersValue', 5)
class CustomSourceLocation(_messages.Message):
"""Identifies the location of a custom souce.
Fields:
stateful: Whether this source is stateful.
"""
stateful = _messages.BooleanField(1)
class DataDiskAssignment(_messages.Message):
"""Data disk assignment for a given VM instance.
Fields:
dataDisks: Mounted data disks. The order is important a data disk's
0-based index in this list defines which persistent directory the disk
is mounted to, for example the list of {
"myproject-1014-104817-4c2-harness-0-disk-0" }, {
"myproject-1014-104817-4c2-harness-0-disk-1" }.
vmInstance: VM instance name the data disks mounted to, for example
"myproject-1014-104817-4c2-harness-0".
"""
dataDisks = _messages.StringField(1, repeated=True)
vmInstance = _messages.StringField(2)
class DataflowProjectsJobsAggregatedRequest(_messages.Message):
"""A DataflowProjectsJobsAggregatedRequest object.
Enums:
FilterValueValuesEnum: The kind of filter to use.
ViewValueValuesEnum: Level of information requested in response. Default
is `JOB_VIEW_SUMMARY`.
Fields:
filter: The kind of filter to use.
location: The location that contains this job.
pageSize: If there are many jobs, limit response to at most this many. The
actual number of jobs returned will be the lesser of max_responses and
an unspecified server-defined limit.
pageToken: Set this to the 'next_page_token' field of a previous response
to request additional results in a long list.
projectId: The project which owns the jobs.
view: Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
"""
class FilterValueValuesEnum(_messages.Enum):
"""The kind of filter to use.
Values:
UNKNOWN: <no description>
ALL: <no description>
TERMINATED: <no description>
ACTIVE: <no description>
"""
UNKNOWN = 0
ALL = 1
TERMINATED = 2
ACTIVE = 3
class ViewValueValuesEnum(_messages.Enum):
"""Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
filter = _messages.EnumField('FilterValueValuesEnum', 1)
location = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 6)
class DataflowProjectsJobsCreateRequest(_messages.Message):
"""A DataflowProjectsJobsCreateRequest object.
Enums:
ViewValueValuesEnum: The level of information requested in response.
Fields:
job: A Job resource to be passed as the request body.
location: The location that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
replaceJobId: Deprecated. This field is now in the Job message.
view: The level of information requested in response.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The level of information requested in response.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
job = _messages.MessageField('Job', 1)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
replaceJobId = _messages.StringField(4)
view = _messages.EnumField('ViewValueValuesEnum', 5)
class DataflowProjectsJobsDebugGetConfigRequest(_messages.Message):
"""A DataflowProjectsJobsDebugGetConfigRequest object.
Fields:
getDebugConfigRequest: A GetDebugConfigRequest resource to be passed as
the request body.
jobId: The job id.
projectId: The project id.
"""
getDebugConfigRequest = _messages.MessageField('GetDebugConfigRequest', 1)
jobId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class DataflowProjectsJobsDebugSendCaptureRequest(_messages.Message):
"""A DataflowProjectsJobsDebugSendCaptureRequest object.
Fields:
jobId: The job id.
projectId: The project id.
sendDebugCaptureRequest: A SendDebugCaptureRequest resource to be passed
as the request body.
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
sendDebugCaptureRequest = _messages.MessageField('SendDebugCaptureRequest', 3)
class DataflowProjectsJobsGetMetricsRequest(_messages.Message):
"""A DataflowProjectsJobsGetMetricsRequest object.
Fields:
jobId: The job to get messages for.
location: The location which contains the job specified by job_id.
projectId: A project id.
startTime: Return only metric data that has changed since this time.
Default is to return all information about all metrics for the job.
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
startTime = _messages.StringField(4)
class DataflowProjectsJobsGetRequest(_messages.Message):
"""A DataflowProjectsJobsGetRequest object.
Enums:
ViewValueValuesEnum: The level of information requested in response.
Fields:
jobId: The job ID.
location: The location that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
view: The level of information requested in response.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The level of information requested in response.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class DataflowProjectsJobsListRequest(_messages.Message):
"""A DataflowProjectsJobsListRequest object.
Enums:
FilterValueValuesEnum: The kind of filter to use.
ViewValueValuesEnum: Level of information requested in response. Default
is `JOB_VIEW_SUMMARY`.
Fields:
filter: The kind of filter to use.
location: The location that contains this job.
pageSize: If there are many jobs, limit response to at most this many. The
actual number of jobs returned will be the lesser of max_responses and
an unspecified server-defined limit.
pageToken: Set this to the 'next_page_token' field of a previous response
to request additional results in a long list.
projectId: The project which owns the jobs.
view: Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
"""
class FilterValueValuesEnum(_messages.Enum):
"""The kind of filter to use.
Values:
UNKNOWN: <no description>
ALL: <no description>
TERMINATED: <no description>
ACTIVE: <no description>
"""
UNKNOWN = 0
ALL = 1
TERMINATED = 2
ACTIVE = 3
class ViewValueValuesEnum(_messages.Enum):
"""Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
filter = _messages.EnumField('FilterValueValuesEnum', 1)
location = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 6)
class DataflowProjectsJobsMessagesListRequest(_messages.Message):
"""A DataflowProjectsJobsMessagesListRequest object.
Enums:
MinimumImportanceValueValuesEnum: Filter to only get messages with
importance >= level
Fields:
endTime: Return only messages with timestamps < end_time. The default is
now (i.e. return up to the latest messages available).
jobId: The job to get messages about.
location: The location which contains the job specified by job_id.
minimumImportance: Filter to only get messages with importance >= level
pageSize: If specified, determines the maximum number of messages to
return. If unspecified, the service may choose an appropriate default,
or may return an arbitrarily large number of results.
pageToken: If supplied, this should be the value of next_page_token
returned by an earlier call. This will cause the next page of results to
be returned.
projectId: A project id.
startTime: If specified, return only messages with timestamps >=
start_time. The default is the job creation time (i.e. beginning of
messages).
"""
class MinimumImportanceValueValuesEnum(_messages.Enum):
"""Filter to only get messages with importance >= level
Values:
JOB_MESSAGE_IMPORTANCE_UNKNOWN: <no description>
JOB_MESSAGE_DEBUG: <no description>
JOB_MESSAGE_DETAILED: <no description>
JOB_MESSAGE_BASIC: <no description>
JOB_MESSAGE_WARNING: <no description>
JOB_MESSAGE_ERROR: <no description>
"""
JOB_MESSAGE_IMPORTANCE_UNKNOWN = 0
JOB_MESSAGE_DEBUG = 1
JOB_MESSAGE_DETAILED = 2
JOB_MESSAGE_BASIC = 3
JOB_MESSAGE_WARNING = 4
JOB_MESSAGE_ERROR = 5
endTime = _messages.StringField(1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3)
minimumImportance = _messages.EnumField('MinimumImportanceValueValuesEnum', 4)
pageSize = _messages.IntegerField(5, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(6)
projectId = _messages.StringField(7, required=True)
startTime = _messages.StringField(8)
class DataflowProjectsJobsUpdateRequest(_messages.Message):
"""A DataflowProjectsJobsUpdateRequest object.
Fields:
job: A Job resource to be passed as the request body.
jobId: The job ID.
location: The location that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
"""
job = _messages.MessageField('Job', 1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
class DataflowProjectsJobsWorkItemsLeaseRequest(_messages.Message):
"""A DataflowProjectsJobsWorkItemsLeaseRequest object.
Fields:
jobId: Identifies the workflow job this worker belongs to.
leaseWorkItemRequest: A LeaseWorkItemRequest resource to be passed as the
request body.
projectId: Identifies the project this worker belongs to.
"""
jobId = _messages.StringField(1, required=True)
leaseWorkItemRequest = _messages.MessageField('LeaseWorkItemRequest', 2)
projectId = _messages.StringField(3, required=True)
class DataflowProjectsJobsWorkItemsReportStatusRequest(_messages.Message):
"""A DataflowProjectsJobsWorkItemsReportStatusRequest object.
Fields:
jobId: The job which the WorkItem is part of.
projectId: The project which owns the WorkItem's job.
reportWorkItemStatusRequest: A ReportWorkItemStatusRequest resource to be
passed as the request body.
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
reportWorkItemStatusRequest = _messages.MessageField('ReportWorkItemStatusRequest', 3)
class DataflowProjectsLocationsJobsCreateRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsCreateRequest object.
Enums:
ViewValueValuesEnum: The level of information requested in response.
Fields:
job: A Job resource to be passed as the request body.
location: The location that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
replaceJobId: Deprecated. This field is now in the Job message.
view: The level of information requested in response.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The level of information requested in response.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
job = _messages.MessageField('Job', 1)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
replaceJobId = _messages.StringField(4)
view = _messages.EnumField('ViewValueValuesEnum', 5)
class DataflowProjectsLocationsJobsDebugGetConfigRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsDebugGetConfigRequest object.
Fields:
getDebugConfigRequest: A GetDebugConfigRequest resource to be passed as
the request body.
jobId: The job id.
location: The location which contains the job specified by job_id.
projectId: The project id.
"""
getDebugConfigRequest = _messages.MessageField('GetDebugConfigRequest', 1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
class DataflowProjectsLocationsJobsDebugSendCaptureRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsDebugSendCaptureRequest object.
Fields:
jobId: The job id.
location: The location which contains the job specified by job_id.
projectId: The project id.
sendDebugCaptureRequest: A SendDebugCaptureRequest resource to be passed
as the request body.
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
sendDebugCaptureRequest = _messages.MessageField('SendDebugCaptureRequest', 4)
class DataflowProjectsLocationsJobsGetMetricsRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsGetMetricsRequest object.
Fields:
jobId: The job to get messages for.
location: The location which contains the job specified by job_id.
projectId: A project id.
startTime: Return only metric data that has changed since this time.
Default is to return all information about all metrics for the job.
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
startTime = _messages.StringField(4)
class DataflowProjectsLocationsJobsGetRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsGetRequest object.
Enums:
ViewValueValuesEnum: The level of information requested in response.
Fields:
jobId: The job ID.
location: The location that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
view: The level of information requested in response.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The level of information requested in response.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class DataflowProjectsLocationsJobsListRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsListRequest object.
Enums:
FilterValueValuesEnum: The kind of filter to use.
ViewValueValuesEnum: Level of information requested in response. Default
is `JOB_VIEW_SUMMARY`.
Fields:
filter: The kind of filter to use.
location: The location that contains this job.
pageSize: If there are many jobs, limit response to at most this many. The
actual number of jobs returned will be the lesser of max_responses and
an unspecified server-defined limit.
pageToken: Set this to the 'next_page_token' field of a previous response
to request additional results in a long list.
projectId: The project which owns the jobs.
view: Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
"""
class FilterValueValuesEnum(_messages.Enum):
"""The kind of filter to use.
Values:
UNKNOWN: <no description>
ALL: <no description>
TERMINATED: <no description>
ACTIVE: <no description>
"""
UNKNOWN = 0
ALL = 1
TERMINATED = 2
ACTIVE = 3
class ViewValueValuesEnum(_messages.Enum):
"""Level of information requested in response. Default is
`JOB_VIEW_SUMMARY`.
Values:
JOB_VIEW_UNKNOWN: <no description>
JOB_VIEW_SUMMARY: <no description>
JOB_VIEW_ALL: <no description>
JOB_VIEW_DESCRIPTION: <no description>
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
JOB_VIEW_ALL = 2
JOB_VIEW_DESCRIPTION = 3
filter = _messages.EnumField('FilterValueValuesEnum', 1)
location = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 6)
class DataflowProjectsLocationsJobsMessagesListRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsMessagesListRequest object.
Enums:
MinimumImportanceValueValuesEnum: Filter to only get messages with
importance >= level
Fields:
endTime: Return only messages with timestamps < end_time. The default is
now (i.e. return up to the latest messages available).
jobId: The job to get messages about.
location: The location which contains the job specified by job_id.
minimumImportance: Filter to only get messages with importance >= level
pageSize: If specified, determines the maximum number of messages to
return. If unspecified, the service may choose an appropriate default,
or may return an arbitrarily large number of results.
pageToken: If supplied, this should be the value of next_page_token
returned by an earlier call. This will cause the next page of results to
be returned.
projectId: A project id.
startTime: If specified, return only messages with timestamps >=
start_time. The default is the job creation time (i.e. beginning of
messages).
"""
class MinimumImportanceValueValuesEnum(_messages.Enum):
"""Filter to only get messages with importance >= level
Values:
JOB_MESSAGE_IMPORTANCE_UNKNOWN: <no description>
JOB_MESSAGE_DEBUG: <no description>
JOB_MESSAGE_DETAILED: <no description>
JOB_MESSAGE_BASIC: <no description>
JOB_MESSAGE_WARNING: <no description>
JOB_MESSAGE_ERROR: <no description>
"""
JOB_MESSAGE_IMPORTANCE_UNKNOWN = 0
JOB_MESSAGE_DEBUG = 1
JOB_MESSAGE_DETAILED = 2
JOB_MESSAGE_BASIC = 3
JOB_MESSAGE_WARNING = 4
JOB_MESSAGE_ERROR = 5
endTime = _messages.StringField(1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3, required=True)
minimumImportance = _messages.EnumField('MinimumImportanceValueValuesEnum', 4)
pageSize = _messages.IntegerField(5, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(6)
projectId = _messages.StringField(7, required=True)
startTime = _messages.StringField(8)
class DataflowProjectsLocationsJobsUpdateRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsUpdateRequest object.
Fields:
job: A Job resource to be passed as the request body.
jobId: The job ID.
location: The location that contains this job.
projectId: The ID of the Cloud Platform project that the job belongs to.
"""
job = _messages.MessageField('Job', 1)
jobId = _messages.StringField(2, required=True)
location = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
class DataflowProjectsLocationsJobsWorkItemsLeaseRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsWorkItemsLeaseRequest object.
Fields:
jobId: Identifies the workflow job this worker belongs to.
leaseWorkItemRequest: A LeaseWorkItemRequest resource to be passed as the
request body.
location: The location which contains the WorkItem's job.
projectId: Identifies the project this worker belongs to.
"""
jobId = _messages.StringField(1, required=True)
leaseWorkItemRequest = _messages.MessageField('LeaseWorkItemRequest', 2)
location = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
class DataflowProjectsLocationsJobsWorkItemsReportStatusRequest(_messages.Message):
"""A DataflowProjectsLocationsJobsWorkItemsReportStatusRequest object.
Fields:
jobId: The job which the WorkItem is part of.
location: The location which contains the WorkItem's job.
projectId: The project which owns the WorkItem's job.
reportWorkItemStatusRequest: A ReportWorkItemStatusRequest resource to be
passed as the request body.
"""
jobId = _messages.StringField(1, required=True)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
reportWorkItemStatusRequest = _messages.MessageField('ReportWorkItemStatusRequest', 4)
class DataflowProjectsLocationsTemplatesCreateRequest(_messages.Message):
"""A DataflowProjectsLocationsTemplatesCreateRequest object.
Fields:
createJobFromTemplateRequest: A CreateJobFromTemplateRequest resource to
be passed as the request body.
location: The location to which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
"""
createJobFromTemplateRequest = _messages.MessageField('CreateJobFromTemplateRequest', 1)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class DataflowProjectsLocationsTemplatesGetRequest(_messages.Message):
"""A DataflowProjectsLocationsTemplatesGetRequest object.
Enums:
ViewValueValuesEnum: The view to retrieve. Defaults to METADATA_ONLY.
Fields:
gcsPath: Required. A Cloud Storage path to the template from which to
create the job. Must be a valid Cloud Storage URL, beginning with
`gs://`.
location: The location to which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
view: The view to retrieve. Defaults to METADATA_ONLY.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The view to retrieve. Defaults to METADATA_ONLY.
Values:
METADATA_ONLY: <no description>
"""
METADATA_ONLY = 0
gcsPath = _messages.StringField(1)
location = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class DataflowProjectsLocationsTemplatesLaunchRequest(_messages.Message):
"""A DataflowProjectsLocationsTemplatesLaunchRequest object.
Fields:
gcsPath: Required. A Cloud Storage path to the template from which to
create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.
launchTemplateParameters: A LaunchTemplateParameters resource to be passed
as the request body.
location: The location to which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
validateOnly: If true, the request is validated but not actually executed.
Defaults to false.
"""
gcsPath = _messages.StringField(1)
launchTemplateParameters = _messages.MessageField('LaunchTemplateParameters', 2)
location = _messages.StringField(3, required=True)
projectId = _messages.StringField(4, required=True)
validateOnly = _messages.BooleanField(5)
class DataflowProjectsLocationsWorkerMessagesRequest(_messages.Message):
"""A DataflowProjectsLocationsWorkerMessagesRequest object.
Fields:
location: The location which contains the job
projectId: The project to send the WorkerMessages to.
sendWorkerMessagesRequest: A SendWorkerMessagesRequest resource to be
passed as the request body.
"""
location = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
sendWorkerMessagesRequest = _messages.MessageField('SendWorkerMessagesRequest', 3)
class DataflowProjectsTemplatesCreateRequest(_messages.Message):
"""A DataflowProjectsTemplatesCreateRequest object.
Fields:
createJobFromTemplateRequest: A CreateJobFromTemplateRequest resource to
be passed as the request body.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
"""
createJobFromTemplateRequest = _messages.MessageField('CreateJobFromTemplateRequest', 1)
projectId = _messages.StringField(2, required=True)
class DataflowProjectsTemplatesGetRequest(_messages.Message):
"""A DataflowProjectsTemplatesGetRequest object.
Enums:
ViewValueValuesEnum: The view to retrieve. Defaults to METADATA_ONLY.
Fields:
gcsPath: Required. A Cloud Storage path to the template from which to
create the job. Must be a valid Cloud Storage URL, beginning with
`gs://`.
location: The location to which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
view: The view to retrieve. Defaults to METADATA_ONLY.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The view to retrieve. Defaults to METADATA_ONLY.
Values:
METADATA_ONLY: <no description>
"""
METADATA_ONLY = 0
gcsPath = _messages.StringField(1)
location = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class DataflowProjectsTemplatesLaunchRequest(_messages.Message):
"""A DataflowProjectsTemplatesLaunchRequest object.
Fields:
gcsPath: Required. A Cloud Storage path to the template from which to
create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.
launchTemplateParameters: A LaunchTemplateParameters resource to be passed
as the request body.
location: The location to which to direct the request.
projectId: Required. The ID of the Cloud Platform project that the job
belongs to.
validateOnly: If true, the request is validated but not actually executed.
Defaults to false.
"""
gcsPath = _messages.StringField(1)
launchTemplateParameters = _messages.MessageField('LaunchTemplateParameters', 2)
location = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
validateOnly = _messages.BooleanField(5)
class DataflowProjectsWorkerMessagesRequest(_messages.Message):
"""A DataflowProjectsWorkerMessagesRequest object.
Fields:
projectId: The project to send the WorkerMessages to.
sendWorkerMessagesRequest: A SendWorkerMessagesRequest resource to be
passed as the request body.
"""
projectId = _messages.StringField(1, required=True)
sendWorkerMessagesRequest = _messages.MessageField('SendWorkerMessagesRequest', 2)
class DerivedSource(_messages.Message):
"""Specification of one of the bundles produced as a result of splitting a
Source (e.g. when executing a SourceSplitRequest, or when splitting an
active task using WorkItemStatus.dynamic_source_split), relative to the
source being split.
Enums:
DerivationModeValueValuesEnum: What source to base the produced source on
(if any).
Fields:
derivationMode: What source to base the produced source on (if any).
source: Specification of the source.
"""
class DerivationModeValueValuesEnum(_messages.Enum):
"""What source to base the produced source on (if any).
Values:
SOURCE_DERIVATION_MODE_UNKNOWN: The source derivation is unknown, or
unspecified.
SOURCE_DERIVATION_MODE_INDEPENDENT: Produce a completely independent
Source with no base.
SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT: Produce a Source based on the
Source being split.
SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT: Produce a Source based on the
base of the Source being split.
"""
SOURCE_DERIVATION_MODE_UNKNOWN = 0
SOURCE_DERIVATION_MODE_INDEPENDENT = 1
SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT = 2
SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT = 3
derivationMode = _messages.EnumField('DerivationModeValueValuesEnum', 1)
source = _messages.MessageField('Source', 2)
class Disk(_messages.Message):
"""Describes the data disk used by a workflow job.
Fields:
diskType: Disk storage type, as defined by Google Compute Engine. This
must be a disk type appropriate to the project and zone in which the
workers will run. If unknown or unspecified, the service will attempt
to choose a reasonable default. For example, the standard persistent
disk type is a resource name typically ending in "pd-standard". If SSD
persistent disks are available, the resource name typically ends with
"pd-ssd". The actual valid values are defined the Google Compute Engine
API, not by the Cloud Dataflow API; consult the Google Compute Engine
documentation for more information about determining the set of
available disk types for a particular project and zone. Google Compute
Engine Disk types are local to a particular project in a particular
zone, and so the resource name will typically look something like this:
compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-
standard
mountPoint: Directory in a VM where disk is mounted.
sizeGb: Size of disk in GB. If zero or unspecified, the service will
attempt to choose a reasonable default.
"""
diskType = _messages.StringField(1)
mountPoint = _messages.StringField(2)
sizeGb = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class DisplayData(_messages.Message):
"""Data provided with a pipeline or transform to provide descriptive info.
Fields:
boolValue: Contains value if the data is of a boolean type.
durationValue: Contains value if the data is of duration type.
floatValue: Contains value if the data is of float type.
int64Value: Contains value if the data is of int64 type.
javaClassValue: Contains value if the data is of java class type.
key: The key identifying the display data. This is intended to be used as
a label for the display data when viewed in a dax monitoring system.
label: An optional label to display in a dax UI for the element.
namespace: The namespace for the key. This is usually a class name or
programming language namespace (i.e. python module) which defines the
display data. This allows a dax monitoring system to specially handle
the data and perform custom rendering.
shortStrValue: A possible additional shorter value to display. For example
a java_class_name_value of com.mypackage.MyDoFn will be stored with
MyDoFn as the short_str_value and com.mypackage.MyDoFn as the
java_class_name value. short_str_value can be displayed and
java_class_name_value will be displayed as a tooltip.
strValue: Contains value if the data is of string type.
timestampValue: Contains value if the data is of timestamp type.
url: An optional full URL.
"""
boolValue = _messages.BooleanField(1)
durationValue = _messages.StringField(2)
floatValue = _messages.FloatField(3, variant=_messages.Variant.FLOAT)
int64Value = _messages.IntegerField(4)
javaClassValue = _messages.StringField(5)
key = _messages.StringField(6)
label = _messages.StringField(7)
namespace = _messages.StringField(8)
shortStrValue = _messages.StringField(9)
strValue = _messages.StringField(10)
timestampValue = _messages.StringField(11)
url = _messages.StringField(12)
class DistributionUpdate(_messages.Message):
"""A metric value representing a distribution.
Fields:
count: The count of the number of elements present in the distribution.
logBuckets: (Optional) Logarithmic histogram of values. Each log may be in
no more than one bucket. Order does not matter.
max: The maximum value present in the distribution.
min: The minimum value present in the distribution.
sum: Use an int64 since we'd prefer the added precision. If overflow is a
common problem we can detect it and use an additional int64 or a double.
sumOfSquares: Use a double since the sum of squares is likely to overflow
int64.
"""
count = _messages.MessageField('SplitInt64', 1)
logBuckets = _messages.MessageField('LogBucket', 2, repeated=True)
max = _messages.MessageField('SplitInt64', 3)
min = _messages.MessageField('SplitInt64', 4)
sum = _messages.MessageField('SplitInt64', 5)
sumOfSquares = _messages.FloatField(6)
class DynamicSourceSplit(_messages.Message):
"""When a task splits using WorkItemStatus.dynamic_source_split, this
message describes the two parts of the split relative to the description of
the current task's input.
Fields:
primary: Primary part (continued to be processed by worker). Specified
relative to the previously-current source. Becomes current.
residual: Residual part (returned to the pool of work). Specified relative
to the previously-current source.
"""
primary = _messages.MessageField('DerivedSource', 1)
residual = _messages.MessageField('DerivedSource', 2)
class Environment(_messages.Message):
"""Describes the environment in which a Dataflow Job runs.
Messages:
InternalExperimentsValue: Experimental settings.
SdkPipelineOptionsValue: The Cloud Dataflow SDK pipeline options specified
by the user. These options are passed through the service and are used
to recreate the SDK pipeline options on the worker in a language
agnostic and platform independent way.
UserAgentValue: A description of the process that generated the request.
VersionValue: A structure describing which components and their versions
of the service are required in order to run the job.
Fields:
clusterManagerApiService: The type of cluster manager API to use. If
unknown or unspecified, the service will attempt to choose a reasonable
default. This should be in the form of the API service name, e.g.
"compute.googleapis.com".
dataset: The dataset for the current project where various workflow
related tables are stored. The supported resource type is: Google
BigQuery: bigquery.googleapis.com/{dataset}
experiments: The list of experiments to enable.
internalExperiments: Experimental settings.
sdkPipelineOptions: The Cloud Dataflow SDK pipeline options specified by
the user. These options are passed through the service and are used to
recreate the SDK pipeline options on the worker in a language agnostic
and platform independent way.
serviceAccountEmail: Identity to run virtual machines as. Defaults to the
default account.
tempStoragePrefix: The prefix of the resources the system should use for
temporary storage. The system will append the suffix "/temp-{JOBNAME}
to this resource prefix, where {JOBNAME} is the value of the job_name
field. The resulting bucket and object prefix is used as the prefix of
the resources used to store temporary data needed during the job
execution. NOTE: This will override the value in taskrunner_settings.
The supported resource type is: Google Cloud Storage:
storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
userAgent: A description of the process that generated the request.
version: A structure describing which components and their versions of the
service are required in order to run the job.
workerPools: The worker pools. At least one "harness" worker pool must be
specified in order for the job to have workers.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class InternalExperimentsValue(_messages.Message):
"""Experimental settings.
Messages:
AdditionalProperty: An additional property for a
InternalExperimentsValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a InternalExperimentsValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class SdkPipelineOptionsValue(_messages.Message):
"""The Cloud Dataflow SDK pipeline options specified by the user. These
options are passed through the service and are used to recreate the SDK
pipeline options on the worker in a language agnostic and platform
independent way.
Messages:
AdditionalProperty: An additional property for a SdkPipelineOptionsValue
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a SdkPipelineOptionsValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class UserAgentValue(_messages.Message):
"""A description of the process that generated the request.
Messages:
AdditionalProperty: An additional property for a UserAgentValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a UserAgentValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class VersionValue(_messages.Message):
"""A structure describing which components and their versions of the
service are required in order to run the job.
Messages:
AdditionalProperty: An additional property for a VersionValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a VersionValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusterManagerApiService = _messages.StringField(1)
dataset = _messages.StringField(2)
experiments = _messages.StringField(3, repeated=True)
internalExperiments = _messages.MessageField('InternalExperimentsValue', 4)
sdkPipelineOptions = _messages.MessageField('SdkPipelineOptionsValue', 5)
serviceAccountEmail = _messages.StringField(6)
tempStoragePrefix = _messages.StringField(7)
userAgent = _messages.MessageField('UserAgentValue', 8)
version = _messages.MessageField('VersionValue', 9)
workerPools = _messages.MessageField('WorkerPool', 10, repeated=True)
class ExecutionStageState(_messages.Message):
"""A message describing the state of a particular execution stage.
Enums:
ExecutionStageStateValueValuesEnum: Executions stage states allow the same
set of values as JobState.
Fields:
currentStateTime: The time at which the stage transitioned to this state.
executionStageName: The name of the execution stage.
executionStageState: Executions stage states allow the same set of values
as JobState.
"""
class ExecutionStageStateValueValuesEnum(_messages.Enum):
"""Executions stage states allow the same set of values as JobState.
Values:
JOB_STATE_UNKNOWN: The job's run state isn't specified.
JOB_STATE_STOPPED: `JOB_STATE_STOPPED` indicates that the job has not
yet started to run.
JOB_STATE_RUNNING: `JOB_STATE_RUNNING` indicates that the job is
currently running.
JOB_STATE_DONE: `JOB_STATE_DONE` indicates that the job has successfully
completed. This is a terminal job state. This state may be set by the
Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It
may also be set via a Cloud Dataflow `UpdateJob` call, if the job has
not yet reached a terminal state.
JOB_STATE_FAILED: `JOB_STATE_FAILED` indicates that the job has failed.
This is a terminal job state. This state may only be set by the Cloud
Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_CANCELLED: `JOB_STATE_CANCELLED` indicates that the job has
been explicitly cancelled. This is a terminal job state. This state
may only be set via a Cloud Dataflow `UpdateJob` call, and only if the
job has not yet reached another terminal state.
JOB_STATE_UPDATED: `JOB_STATE_UPDATED` indicates that the job was
successfully updated, meaning that this job was stopped and another
job was started, inheriting state from this one. This is a terminal
job state. This state may only be set by the Cloud Dataflow service,
and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_DRAINING: `JOB_STATE_DRAINING` indicates that the job is in
the process of draining. A draining job has stopped pulling from its
input sources and is processing any data that remains in-flight. This
state may be set via a Cloud Dataflow `UpdateJob` call, but only as a
transition from `JOB_STATE_RUNNING`. Jobs that are draining may only
transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or
`JOB_STATE_FAILED`.
JOB_STATE_DRAINED: `JOB_STATE_DRAINED` indicates that the job has been
drained. A drained job terminated by stopping pulling from its input
sources and processing any data that remained in-flight when draining
was requested. This state is a terminal state, may only be set by the
Cloud Dataflow service, and only as a transition from
`JOB_STATE_DRAINING`.
JOB_STATE_PENDING: 'JOB_STATE_PENDING' indicates that the job has been
created but is not yet running. Jobs that are pending may only
transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`.
JOB_STATE_CANCELLING: 'JOB_STATE_CANCELLING' indicates that the job has
been explicitly cancelled and is in the process of stopping. Jobs
that are cancelling may only transition to 'JOB_STATE_CANCELLED' or
'JOB_STATE_FAILED'.
"""
JOB_STATE_UNKNOWN = 0
JOB_STATE_STOPPED = 1
JOB_STATE_RUNNING = 2
JOB_STATE_DONE = 3
JOB_STATE_FAILED = 4
JOB_STATE_CANCELLED = 5
JOB_STATE_UPDATED = 6
JOB_STATE_DRAINING = 7
JOB_STATE_DRAINED = 8
JOB_STATE_PENDING = 9
JOB_STATE_CANCELLING = 10
currentStateTime = _messages.StringField(1)
executionStageName = _messages.StringField(2)
executionStageState = _messages.EnumField('ExecutionStageStateValueValuesEnum', 3)
class ExecutionStageSummary(_messages.Message):
"""Description of the composing transforms, names/ids, and input/outputs of
a stage of execution. Some composing transforms and sources may have been
generated by the Dataflow service during execution planning.
Enums:
KindValueValuesEnum: Type of tranform this stage is executing.
Fields:
componentSource: Collections produced and consumed by component transforms
of this stage.
componentTransform: Transforms that comprise this execution stage.
id: Dataflow service generated id for this stage.
inputSource: Input sources for this stage.
kind: Type of tranform this stage is executing.
name: Dataflow service generated name for this stage.
outputSource: Output sources for this stage.
"""
class KindValueValuesEnum(_messages.Enum):
"""Type of tranform this stage is executing.
Values:
UNKNOWN_KIND: Unrecognized transform type.
PAR_DO_KIND: ParDo transform.
GROUP_BY_KEY_KIND: Group By Key transform.
FLATTEN_KIND: Flatten transform.
READ_KIND: Read transform.
WRITE_KIND: Write transform.
CONSTANT_KIND: Constructs from a constant value, such as with Create.of.
SINGLETON_KIND: Creates a Singleton view of a collection.
SHUFFLE_KIND: Opening or closing a shuffle session, often as part of a
GroupByKey.
"""
UNKNOWN_KIND = 0
PAR_DO_KIND = 1
GROUP_BY_KEY_KIND = 2
FLATTEN_KIND = 3
READ_KIND = 4
WRITE_KIND = 5
CONSTANT_KIND = 6
SINGLETON_KIND = 7
SHUFFLE_KIND = 8
componentSource = _messages.MessageField('ComponentSource', 1, repeated=True)
componentTransform = _messages.MessageField('ComponentTransform', 2, repeated=True)
id = _messages.StringField(3)
inputSource = _messages.MessageField('StageSource', 4, repeated=True)
kind = _messages.EnumField('KindValueValuesEnum', 5)
name = _messages.StringField(6)
outputSource = _messages.MessageField('StageSource', 7, repeated=True)
class FailedLocation(_messages.Message):
"""Indicates which location failed to respond to a request for data.
Fields:
name: The name of the failed location.
"""
name = _messages.StringField(1)
class FlattenInstruction(_messages.Message):
"""An instruction that copies its inputs (zero or more) to its (single)
output.
Fields:
inputs: Describes the inputs to the flatten instruction.
"""
inputs = _messages.MessageField('InstructionInput', 1, repeated=True)
class FloatingPointList(_messages.Message):
"""A metric value representing a list of floating point numbers.
Fields:
elements: Elements of the list.
"""
elements = _messages.FloatField(1, repeated=True)
class FloatingPointMean(_messages.Message):
"""A representation of a floating point mean metric contribution.
Fields:
count: The number of values being aggregated.
sum: The sum of all values being aggregated.
"""
count = _messages.MessageField('SplitInt64', 1)
sum = _messages.FloatField(2)
class GetDebugConfigRequest(_messages.Message):
"""Request to get updated debug configuration for component.
Fields:
componentId: The internal component id for which debug configuration is
requested.
location: The location which contains the job specified by job_id.
workerId: The worker id, i.e., VM hostname.
"""
componentId = _messages.StringField(1)
location = _messages.StringField(2)
workerId = _messages.StringField(3)
class GetDebugConfigResponse(_messages.Message):
"""Response to a get debug configuration request.
Fields:
config: The encoded debug configuration for the requested component.
"""
config = _messages.StringField(1)
class GetTemplateResponse(_messages.Message):
"""The response to a GetTemplate request.
Fields:
metadata: The template metadata describing the template name, available
parameters, etc.
status: The status of the get template request. Any problems with the
request will be indicated in the error_details.
"""
metadata = _messages.MessageField('TemplateMetadata', 1)
status = _messages.MessageField('Status', 2)
class InstructionInput(_messages.Message):
"""An input of an instruction, as a reference to an output of a producer
instruction.
Fields:
outputNum: The output index (origin zero) within the producer.
producerInstructionIndex: The index (origin zero) of the parallel
instruction that produces the output to be consumed by this input. This
index is relative to the list of instructions in this input's
instruction's containing MapTask.
"""
outputNum = _messages.IntegerField(1, variant=_messages.Variant.INT32)
producerInstructionIndex = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class InstructionOutput(_messages.Message):
"""An output of an instruction.
Messages:
CodecValue: The codec to use to encode data being written via this output.
Fields:
codec: The codec to use to encode data being written via this output.
name: The user-provided name of this output.
onlyCountKeyBytes: For system-generated byte and mean byte metrics,
certain instructions should only report the key size.
onlyCountValueBytes: For system-generated byte and mean byte metrics,
certain instructions should only report the value size.
originalName: System-defined name for this output in the original workflow
graph. Outputs that do not contribute to an original instruction do not
set this.
systemName: System-defined name of this output. Unique across the
workflow.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class CodecValue(_messages.Message):
"""The codec to use to encode data being written via this output.
Messages:
AdditionalProperty: An additional property for a CodecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a CodecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
codec = _messages.MessageField('CodecValue', 1)
name = _messages.StringField(2)
onlyCountKeyBytes = _messages.BooleanField(3)
onlyCountValueBytes = _messages.BooleanField(4)
originalName = _messages.StringField(5)
systemName = _messages.StringField(6)
class IntegerList(_messages.Message):
"""A metric value representing a list of integers.
Fields:
elements: Elements of the list.
"""
elements = _messages.MessageField('SplitInt64', 1, repeated=True)
class IntegerMean(_messages.Message):
"""A representation of an integer mean metric contribution.
Fields:
count: The number of values being aggregated.
sum: The sum of all values being aggregated.
"""
count = _messages.MessageField('SplitInt64', 1)
sum = _messages.MessageField('SplitInt64', 2)
class Job(_messages.Message):
"""Defines a job to be run by the Cloud Dataflow service.
Enums:
CurrentStateValueValuesEnum: The current state of the job. Jobs are
created in the `JOB_STATE_STOPPED` state unless otherwise specified. A
job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal
state. After a job has reached a terminal state, no further state
updates may be made. This field may be mutated by the Cloud Dataflow
service; callers cannot mutate it.
RequestedStateValueValuesEnum: The job's requested state. `UpdateJob` may
be used to switch between the `JOB_STATE_STOPPED` and
`JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may
also be used to directly set a job's requested state to
`JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the
job if it has not already reached a terminal state.
TypeValueValuesEnum: The type of Cloud Dataflow job.
Messages:
LabelsValue: User-defined labels for this job. The labels map can contain
no more than 64 entries. Entries of the labels map are UTF8 strings
that comply with the following restrictions: * Keys must conform to
regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp:
[\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally
constrained to be <= 128 bytes in size.
TransformNameMappingValue: The map of transform name prefixes of the job
to be replaced to the corresponding name prefixes of the new job.
Fields:
clientRequestId: The client's unique identifier of the job, re-used across
retried attempts. If this field is set, the service will ensure its
uniqueness. The request to create a job will fail if the service has
knowledge of a previously submitted job with the same client's ID and
job name. The caller may use this field to ensure idempotence of job
creation across retried attempts to create a job. By default, the field
is empty and, in that case, the service ignores it.
createTime: The timestamp when the job was initially created. Immutable
and set by the Cloud Dataflow service.
currentState: The current state of the job. Jobs are created in the
`JOB_STATE_STOPPED` state unless otherwise specified. A job in the
`JOB_STATE_RUNNING` state may asynchronously enter a terminal state.
After a job has reached a terminal state, no further state updates may
be made. This field may be mutated by the Cloud Dataflow service;
callers cannot mutate it.
currentStateTime: The timestamp associated with the current state.
environment: The environment for the job.
executionInfo: Deprecated.
id: The unique ID of this job. This field is set by the Cloud Dataflow
service when the Job is created, and is immutable for the life of the
job.
labels: User-defined labels for this job. The labels map can contain no
more than 64 entries. Entries of the labels map are UTF8 strings that
comply with the following restrictions: * Keys must conform to regexp:
\p{Ll}\p{Lo}{0,62} * Values must conform to regexp:
[\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally
constrained to be <= 128 bytes in size.
location: The location that contains this job.
name: The user-specified Cloud Dataflow job name. Only one Job with a
given name may exist in a project at any given time. If a caller
attempts to create a Job with the same name as an already-existing Job,
the attempt returns the existing Job. The name must match the regular
expression `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`
pipelineDescription: Preliminary field: The format of this data may change
at any time. A description of the user pipeline and stages through which
it is executed. Created by Cloud Dataflow service. Only retrieved with
JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL.
projectId: The ID of the Cloud Platform project that the job belongs to.
replaceJobId: If this job is an update of an existing job, this field is
the job ID of the job it replaced. When sending a `CreateJobRequest`,
you can update a job by specifying it here. The job named here is
stopped, and its intermediate state is transferred to this job.
replacedByJobId: If another job is an update of this job (and thus, this
job is in `JOB_STATE_UPDATED`), this field contains the ID of that job.
requestedState: The job's requested state. `UpdateJob` may be used to
switch between the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` states,
by setting requested_state. `UpdateJob` may also be used to directly
set a job's requested state to `JOB_STATE_CANCELLED` or
`JOB_STATE_DONE`, irrevocably terminating the job if it has not already
reached a terminal state.
stageStates: This field may be mutated by the Cloud Dataflow service;
callers cannot mutate it.
steps: The top-level steps that constitute the entire job.
tempFiles: A set of files the system should be aware of that are used for
temporary storage. These temporary files will be removed on job
completion. No duplicates are allowed. No file patterns are supported.
The supported files are: Google Cloud Storage:
storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
transformNameMapping: The map of transform name prefixes of the job to be
replaced to the corresponding name prefixes of the new job.
type: The type of Cloud Dataflow job.
"""
class CurrentStateValueValuesEnum(_messages.Enum):
"""The current state of the job. Jobs are created in the
`JOB_STATE_STOPPED` state unless otherwise specified. A job in the
`JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After
a job has reached a terminal state, no further state updates may be made.
This field may be mutated by the Cloud Dataflow service; callers cannot
mutate it.
Values:
JOB_STATE_UNKNOWN: The job's run state isn't specified.
JOB_STATE_STOPPED: `JOB_STATE_STOPPED` indicates that the job has not
yet started to run.
JOB_STATE_RUNNING: `JOB_STATE_RUNNING` indicates that the job is
currently running.
JOB_STATE_DONE: `JOB_STATE_DONE` indicates that the job has successfully
completed. This is a terminal job state. This state may be set by the
Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It
may also be set via a Cloud Dataflow `UpdateJob` call, if the job has
not yet reached a terminal state.
JOB_STATE_FAILED: `JOB_STATE_FAILED` indicates that the job has failed.
This is a terminal job state. This state may only be set by the Cloud
Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_CANCELLED: `JOB_STATE_CANCELLED` indicates that the job has
been explicitly cancelled. This is a terminal job state. This state
may only be set via a Cloud Dataflow `UpdateJob` call, and only if the
job has not yet reached another terminal state.
JOB_STATE_UPDATED: `JOB_STATE_UPDATED` indicates that the job was
successfully updated, meaning that this job was stopped and another
job was started, inheriting state from this one. This is a terminal
job state. This state may only be set by the Cloud Dataflow service,
and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_DRAINING: `JOB_STATE_DRAINING` indicates that the job is in
the process of draining. A draining job has stopped pulling from its
input sources and is processing any data that remains in-flight. This
state may be set via a Cloud Dataflow `UpdateJob` call, but only as a
transition from `JOB_STATE_RUNNING`. Jobs that are draining may only
transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or
`JOB_STATE_FAILED`.
JOB_STATE_DRAINED: `JOB_STATE_DRAINED` indicates that the job has been
drained. A drained job terminated by stopping pulling from its input
sources and processing any data that remained in-flight when draining
was requested. This state is a terminal state, may only be set by the
Cloud Dataflow service, and only as a transition from
`JOB_STATE_DRAINING`.
JOB_STATE_PENDING: 'JOB_STATE_PENDING' indicates that the job has been
created but is not yet running. Jobs that are pending may only
transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`.
JOB_STATE_CANCELLING: 'JOB_STATE_CANCELLING' indicates that the job has
been explicitly cancelled and is in the process of stopping. Jobs
that are cancelling may only transition to 'JOB_STATE_CANCELLED' or
'JOB_STATE_FAILED'.
"""
JOB_STATE_UNKNOWN = 0
JOB_STATE_STOPPED = 1
JOB_STATE_RUNNING = 2
JOB_STATE_DONE = 3
JOB_STATE_FAILED = 4
JOB_STATE_CANCELLED = 5
JOB_STATE_UPDATED = 6
JOB_STATE_DRAINING = 7
JOB_STATE_DRAINED = 8
JOB_STATE_PENDING = 9
JOB_STATE_CANCELLING = 10
class RequestedStateValueValuesEnum(_messages.Enum):
"""The job's requested state. `UpdateJob` may be used to switch between
the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` states, by setting
requested_state. `UpdateJob` may also be used to directly set a job's
requested state to `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably
terminating the job if it has not already reached a terminal state.
Values:
JOB_STATE_UNKNOWN: The job's run state isn't specified.
JOB_STATE_STOPPED: `JOB_STATE_STOPPED` indicates that the job has not
yet started to run.
JOB_STATE_RUNNING: `JOB_STATE_RUNNING` indicates that the job is
currently running.
JOB_STATE_DONE: `JOB_STATE_DONE` indicates that the job has successfully
completed. This is a terminal job state. This state may be set by the
Cloud Dataflow service, as a transition from `JOB_STATE_RUNNING`. It
may also be set via a Cloud Dataflow `UpdateJob` call, if the job has
not yet reached a terminal state.
JOB_STATE_FAILED: `JOB_STATE_FAILED` indicates that the job has failed.
This is a terminal job state. This state may only be set by the Cloud
Dataflow service, and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_CANCELLED: `JOB_STATE_CANCELLED` indicates that the job has
been explicitly cancelled. This is a terminal job state. This state
may only be set via a Cloud Dataflow `UpdateJob` call, and only if the
job has not yet reached another terminal state.
JOB_STATE_UPDATED: `JOB_STATE_UPDATED` indicates that the job was
successfully updated, meaning that this job was stopped and another
job was started, inheriting state from this one. This is a terminal
job state. This state may only be set by the Cloud Dataflow service,
and only as a transition from `JOB_STATE_RUNNING`.
JOB_STATE_DRAINING: `JOB_STATE_DRAINING` indicates that the job is in
the process of draining. A draining job has stopped pulling from its
input sources and is processing any data that remains in-flight. This
state may be set via a Cloud Dataflow `UpdateJob` call, but only as a
transition from `JOB_STATE_RUNNING`. Jobs that are draining may only
transition to `JOB_STATE_DRAINED`, `JOB_STATE_CANCELLED`, or
`JOB_STATE_FAILED`.
JOB_STATE_DRAINED: `JOB_STATE_DRAINED` indicates that the job has been
drained. A drained job terminated by stopping pulling from its input
sources and processing any data that remained in-flight when draining
was requested. This state is a terminal state, may only be set by the
Cloud Dataflow service, and only as a transition from
`JOB_STATE_DRAINING`.
JOB_STATE_PENDING: 'JOB_STATE_PENDING' indicates that the job has been
created but is not yet running. Jobs that are pending may only
transition to `JOB_STATE_RUNNING`, or `JOB_STATE_FAILED`.
JOB_STATE_CANCELLING: 'JOB_STATE_CANCELLING' indicates that the job has
been explicitly cancelled and is in the process of stopping. Jobs
that are cancelling may only transition to 'JOB_STATE_CANCELLED' or
'JOB_STATE_FAILED'.
"""
JOB_STATE_UNKNOWN = 0
JOB_STATE_STOPPED = 1
JOB_STATE_RUNNING = 2
JOB_STATE_DONE = 3
JOB_STATE_FAILED = 4
JOB_STATE_CANCELLED = 5
JOB_STATE_UPDATED = 6
JOB_STATE_DRAINING = 7
JOB_STATE_DRAINED = 8
JOB_STATE_PENDING = 9
JOB_STATE_CANCELLING = 10
class TypeValueValuesEnum(_messages.Enum):
"""The type of Cloud Dataflow job.
Values:
JOB_TYPE_UNKNOWN: The type of the job is unspecified, or unknown.
JOB_TYPE_BATCH: A batch job with a well-defined end point: data is read,
data is processed, data is written, and the job is done.
JOB_TYPE_STREAMING: A continuously streaming job with no end: data is
read, processed, and written continuously.
"""
JOB_TYPE_UNKNOWN = 0
JOB_TYPE_BATCH = 1
JOB_TYPE_STREAMING = 2
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""User-defined labels for this job. The labels map can contain no more
than 64 entries. Entries of the labels map are UTF8 strings that comply
with the following restrictions: * Keys must conform to regexp:
\p{Ll}\p{Lo}{0,62} * Values must conform to regexp:
[\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally
constrained to be <= 128 bytes in size.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class TransformNameMappingValue(_messages.Message):
"""The map of transform name prefixes of the job to be replaced to the
corresponding name prefixes of the new job.
Messages:
AdditionalProperty: An additional property for a
TransformNameMappingValue object.
Fields:
additionalProperties: Additional properties of type
TransformNameMappingValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a TransformNameMappingValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clientRequestId = _messages.StringField(1)
createTime = _messages.StringField(2)
currentState = _messages.EnumField('CurrentStateValueValuesEnum', 3)
currentStateTime = _messages.StringField(4)
environment = _messages.MessageField('Environment', 5)
executionInfo = _messages.MessageField('JobExecutionInfo', 6)
id = _messages.StringField(7)
labels = _messages.MessageField('LabelsValue', 8)
location = _messages.StringField(9)
name = _messages.StringField(10)
pipelineDescription = _messages.MessageField('PipelineDescription', 11)
projectId = _messages.StringField(12)
replaceJobId = _messages.StringField(13)
replacedByJobId = _messages.StringField(14)
requestedState = _messages.EnumField('RequestedStateValueValuesEnum', 15)
stageStates = _messages.MessageField('ExecutionStageState', 16, repeated=True)
steps = _messages.MessageField('Step', 17, repeated=True)
tempFiles = _messages.StringField(18, repeated=True)
transformNameMapping = _messages.MessageField('TransformNameMappingValue', 19)
type = _messages.EnumField('TypeValueValuesEnum', 20)
class JobExecutionInfo(_messages.Message):
"""Additional information about how a Cloud Dataflow job will be executed
that isn't contained in the submitted job.
Messages:
StagesValue: A mapping from each stage to the information about that
stage.
Fields:
stages: A mapping from each stage to the information about that stage.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class StagesValue(_messages.Message):
"""A mapping from each stage to the information about that stage.
Messages:
AdditionalProperty: An additional property for a StagesValue object.
Fields:
additionalProperties: Additional properties of type StagesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a StagesValue object.
Fields:
key: Name of the additional property.
value: A JobExecutionStageInfo attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('JobExecutionStageInfo', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
stages = _messages.MessageField('StagesValue', 1)
class JobExecutionStageInfo(_messages.Message):
"""Contains information about how a particular google.dataflow.v1beta3.Step
will be executed.
Fields:
stepName: The steps associated with the execution stage. Note that stages
may have several steps, and that a given step might be run by more than
one stage.
"""
stepName = _messages.StringField(1, repeated=True)
class JobMessage(_messages.Message):
"""A particular message pertaining to a Dataflow job.
Enums:
MessageImportanceValueValuesEnum: Importance level of the message.
Fields:
id: Deprecated.
messageImportance: Importance level of the message.
messageText: The text of the message.
time: The timestamp of the message.
"""
class MessageImportanceValueValuesEnum(_messages.Enum):
"""Importance level of the message.
Values:
JOB_MESSAGE_IMPORTANCE_UNKNOWN: The message importance isn't specified,
or is unknown.
JOB_MESSAGE_DEBUG: The message is at the 'debug' level: typically only
useful for software engineers working on the code the job is running.
Typically, Dataflow pipeline runners do not display log messages at
this level by default.
JOB_MESSAGE_DETAILED: The message is at the 'detailed' level: somewhat
verbose, but potentially useful to users. Typically, Dataflow
pipeline runners do not display log messages at this level by default.
These messages are displayed by default in the Dataflow monitoring UI.
JOB_MESSAGE_BASIC: The message is at the 'basic' level: useful for
keeping track of the execution of a Dataflow pipeline. Typically,
Dataflow pipeline runners display log messages at this level by
default, and these messages are displayed by default in the Dataflow
monitoring UI.
JOB_MESSAGE_WARNING: The message is at the 'warning' level: indicating a
condition pertaining to a job which may require human intervention.
Typically, Dataflow pipeline runners display log messages at this
level by default, and these messages are displayed by default in the
Dataflow monitoring UI.
JOB_MESSAGE_ERROR: The message is at the 'error' level: indicating a
condition preventing a job from succeeding. Typically, Dataflow
pipeline runners display log messages at this level by default, and
these messages are displayed by default in the Dataflow monitoring UI.
"""
JOB_MESSAGE_IMPORTANCE_UNKNOWN = 0
JOB_MESSAGE_DEBUG = 1
JOB_MESSAGE_DETAILED = 2
JOB_MESSAGE_BASIC = 3
JOB_MESSAGE_WARNING = 4
JOB_MESSAGE_ERROR = 5
id = _messages.StringField(1)
messageImportance = _messages.EnumField('MessageImportanceValueValuesEnum', 2)
messageText = _messages.StringField(3)
time = _messages.StringField(4)
class JobMetrics(_messages.Message):
"""JobMetrics contains a collection of metrics descibing the detailed
progress of a Dataflow job. Metrics correspond to user-defined and system-
defined metrics in the job. This resource captures only the most recent
values of each metric; time-series data can be queried for them (under the
same metric names) from Cloud Monitoring.
Fields:
metricTime: Timestamp as of which metric values are current.
metrics: All metrics for this job.
"""
metricTime = _messages.StringField(1)
metrics = _messages.MessageField('MetricUpdate', 2, repeated=True)
class KeyRangeDataDiskAssignment(_messages.Message):
"""Data disk assignment information for a specific key-range of a sharded
computation. Currently we only support UTF-8 character splits to simplify
encoding into JSON.
Fields:
dataDisk: The name of the data disk where data for this range is stored.
This name is local to the Google Cloud Platform project and uniquely
identifies the disk within that project, for example
"myproject-1014-104817-4c2-harness-0-disk-1".
end: The end (exclusive) of the key range.
start: The start (inclusive) of the key range.
"""
dataDisk = _messages.StringField(1)
end = _messages.StringField(2)
start = _messages.StringField(3)
class KeyRangeLocation(_messages.Message):
"""Location information for a specific key-range of a sharded computation.
Currently we only support UTF-8 character splits to simplify encoding into
JSON.
Fields:
dataDisk: The name of the data disk where data for this range is stored.
This name is local to the Google Cloud Platform project and uniquely
identifies the disk within that project, for example
"myproject-1014-104817-4c2-harness-0-disk-1".
deliveryEndpoint: The physical location of this range assignment to be
used for streaming computation cross-worker message delivery.
deprecatedPersistentDirectory: DEPRECATED. The location of the persistent
state for this range, as a persistent directory in the worker local
filesystem.
end: The end (exclusive) of the key range.
start: The start (inclusive) of the key range.
"""
dataDisk = _messages.StringField(1)
deliveryEndpoint = _messages.StringField(2)
deprecatedPersistentDirectory = _messages.StringField(3)
end = _messages.StringField(4)
start = _messages.StringField(5)
class LaunchTemplateParameters(_messages.Message):
"""Parameters to provide to the template being launched.
Messages:
ParametersValue: The runtime parameters to pass to the job.
Fields:
environment: The runtime environment for the job.
jobName: Required. The job name to use for the created job.
parameters: The runtime parameters to pass to the job.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParametersValue(_messages.Message):
"""The runtime parameters to pass to the job.
Messages:
AdditionalProperty: An additional property for a ParametersValue object.
Fields:
additionalProperties: Additional properties of type ParametersValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ParametersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
environment = _messages.MessageField('RuntimeEnvironment', 1)
jobName = _messages.StringField(2)
parameters = _messages.MessageField('ParametersValue', 3)
class LaunchTemplateResponse(_messages.Message):
"""Response to the request to launch a template.
Fields:
job: The job that was launched, if the request was not a dry run and the
job was successfully launched.
"""
job = _messages.MessageField('Job', 1)
class LeaseWorkItemRequest(_messages.Message):
"""Request to lease WorkItems.
Fields:
currentWorkerTime: The current timestamp at the worker.
location: The location which contains the WorkItem's job.
requestedLeaseDuration: The initial lease period.
workItemTypes: Filter for WorkItem type.
workerCapabilities: Worker capabilities. WorkItems might be limited to
workers with specific capabilities.
workerId: Identifies the worker leasing work -- typically the ID of the
virtual machine running the worker.
"""
currentWorkerTime = _messages.StringField(1)
location = _messages.StringField(2)
requestedLeaseDuration = _messages.StringField(3)
workItemTypes = _messages.StringField(4, repeated=True)
workerCapabilities = _messages.StringField(5, repeated=True)
workerId = _messages.StringField(6)
class LeaseWorkItemResponse(_messages.Message):
"""Response to a request to lease WorkItems.
Fields:
workItems: A list of the leased WorkItems.
"""
workItems = _messages.MessageField('WorkItem', 1, repeated=True)
class ListJobMessagesResponse(_messages.Message):
"""Response to a request to list job messages.
Fields:
autoscalingEvents: Autoscaling events in ascending timestamp order.
jobMessages: Messages in ascending timestamp order.
nextPageToken: The token to obtain the next page of results if there are
more.
"""
autoscalingEvents = _messages.MessageField('AutoscalingEvent', 1, repeated=True)
jobMessages = _messages.MessageField('JobMessage', 2, repeated=True)
nextPageToken = _messages.StringField(3)
class ListJobsResponse(_messages.Message):
"""Response to a request to list Cloud Dataflow jobs. This may be a partial
response, depending on the page size in the ListJobsRequest.
Fields:
failedLocation: Zero or more messages describing locations that failed to
respond.
jobs: A subset of the requested job information.
nextPageToken: Set if there may be more results than fit in this response.
"""
failedLocation = _messages.MessageField('FailedLocation', 1, repeated=True)
jobs = _messages.MessageField('Job', 2, repeated=True)
nextPageToken = _messages.StringField(3)
class LogBucket(_messages.Message):
"""Bucket of values for Distribution's logarithmic histogram.
Fields:
count: Number of values in this bucket.
log: floor(log2(value)); defined to be zero for nonpositive values.
log(-1) = 0 log(0) = 0 log(1) = 0 log(2) = 1 log(3) = 1 log(4)
= 2 log(5) = 2
"""
count = _messages.IntegerField(1)
log = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class MapTask(_messages.Message):
"""MapTask consists of an ordered set of instructions, each of which
describes one particular low-level operation for the worker to perform in
order to accomplish the MapTask's WorkItem. Each instruction must appear in
the list before any instructions which depends on its output.
Fields:
instructions: The instructions in the MapTask.
stageName: System-defined name of the stage containing this MapTask.
Unique across the workflow.
systemName: System-defined name of this MapTask. Unique across the
workflow.
"""
instructions = _messages.MessageField('ParallelInstruction', 1, repeated=True)
stageName = _messages.StringField(2)
systemName = _messages.StringField(3)
class MetricShortId(_messages.Message):
"""The metric short id is returned to the user alongside an offset into
ReportWorkItemStatusRequest
Fields:
metricIndex: The index of the corresponding metric in the
ReportWorkItemStatusRequest. Required.
shortId: The service-generated short identifier for the metric.
"""
metricIndex = _messages.IntegerField(1, variant=_messages.Variant.INT32)
shortId = _messages.IntegerField(2)
class MetricStructuredName(_messages.Message):
"""Identifies a metric, by describing the source which generated the metric.
Messages:
ContextValue: Zero or more labeled fields which identify the part of the
job this metric is associated with, such as the name of a step or
collection. For example, built-in counters associated with steps will
have context['step'] = <step-name>. Counters associated with
PCollections in the SDK will have context['pcollection'] = <pcollection-
name>.
Fields:
context: Zero or more labeled fields which identify the part of the job
this metric is associated with, such as the name of a step or
collection. For example, built-in counters associated with steps will
have context['step'] = <step-name>. Counters associated with
PCollections in the SDK will have context['pcollection'] = <pcollection-
name>.
name: Worker-defined metric name.
origin: Origin (namespace) of metric name. May be blank for user-define
metrics; will be "dataflow" for metrics defined by the Dataflow service
or SDK.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ContextValue(_messages.Message):
"""Zero or more labeled fields which identify the part of the job this
metric is associated with, such as the name of a step or collection. For
example, built-in counters associated with steps will have context['step']
= <step-name>. Counters associated with PCollections in the SDK will have
context['pcollection'] = <pcollection-name>.
Messages:
AdditionalProperty: An additional property for a ContextValue object.
Fields:
additionalProperties: Additional properties of type ContextValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ContextValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
context = _messages.MessageField('ContextValue', 1)
name = _messages.StringField(2)
origin = _messages.StringField(3)
class MetricUpdate(_messages.Message):
"""Describes the state of a metric.
Fields:
cumulative: True if this metric is reported as the total cumulative
aggregate value accumulated since the worker started working on this
WorkItem. By default this is false, indicating that this metric is
reported as a delta that is not associated with any WorkItem.
distribution: A struct value describing properties of a distribution of
numeric values.
internal: Worker-computed aggregate value for internal use by the Dataflow
service.
kind: Metric aggregation kind. The possible metric aggregation kinds are
"Sum", "Max", "Min", "Mean", "Set", "And", "Or", and "Distribution". The
specified aggregation kind is case-insensitive. If omitted, this is not
an aggregated value but instead a single metric sample value.
meanCount: Worker-computed aggregate value for the "Mean" aggregation
kind. This holds the count of the aggregated values and is used in
combination with mean_sum above to obtain the actual mean aggregate
value. The only possible value type is Long.
meanSum: Worker-computed aggregate value for the "Mean" aggregation kind.
This holds the sum of the aggregated values and is used in combination
with mean_count below to obtain the actual mean aggregate value. The
only possible value types are Long and Double.
name: Name of the metric.
scalar: Worker-computed aggregate value for aggregation kinds "Sum",
"Max", "Min", "And", and "Or". The possible value types are Long,
Double, and Boolean.
set: Worker-computed aggregate value for the "Set" aggregation kind. The
only possible value type is a list of Values whose type can be Long,
Double, or String, according to the metric's type. All Values in the
list must be of the same type.
updateTime: Timestamp associated with the metric value. Optional when
workers are reporting work progress; it will be filled in responses from
the metrics API.
"""
cumulative = _messages.BooleanField(1)
distribution = _messages.MessageField('extra_types.JsonValue', 2)
internal = _messages.MessageField('extra_types.JsonValue', 3)
kind = _messages.StringField(4)
meanCount = _messages.MessageField('extra_types.JsonValue', 5)
meanSum = _messages.MessageField('extra_types.JsonValue', 6)
name = _messages.MessageField('MetricStructuredName', 7)
scalar = _messages.MessageField('extra_types.JsonValue', 8)
set = _messages.MessageField('extra_types.JsonValue', 9)
updateTime = _messages.StringField(10)
class MountedDataDisk(_messages.Message):
"""Describes mounted data disk.
Fields:
dataDisk: The name of the data disk. This name is local to the Google
Cloud Platform project and uniquely identifies the disk within that
project, for example "myproject-1014-104817-4c2-harness-0-disk-1".
"""
dataDisk = _messages.StringField(1)
class MultiOutputInfo(_messages.Message):
"""Information about an output of a multi-output DoFn.
Fields:
tag: The id of the tag the user code will emit to this output by; this
should correspond to the tag of some SideInputInfo.
"""
tag = _messages.StringField(1)
class NameAndKind(_messages.Message):
"""Basic metadata about a counter.
Enums:
KindValueValuesEnum: Counter aggregation kind.
Fields:
kind: Counter aggregation kind.
name: Name of the counter.
"""
class KindValueValuesEnum(_messages.Enum):
"""Counter aggregation kind.
Values:
INVALID: Counter aggregation kind was not set.
SUM: Aggregated value is the sum of all contributed values.
MAX: Aggregated value is the max of all contributed values.
MIN: Aggregated value is the min of all contributed values.
MEAN: Aggregated value is the mean of all contributed values.
OR: Aggregated value represents the logical 'or' of all contributed
values.
AND: Aggregated value represents the logical 'and' of all contributed
values.
SET: Aggregated value is a set of unique contributed values.
DISTRIBUTION: Aggregated value captures statistics about a distribution.
"""
INVALID = 0
SUM = 1
MAX = 2
MIN = 3
MEAN = 4
OR = 5
AND = 6
SET = 7
DISTRIBUTION = 8
kind = _messages.EnumField('KindValueValuesEnum', 1)
name = _messages.StringField(2)
class Package(_messages.Message):
"""The packages that must be installed in order for a worker to run the
steps of the Cloud Dataflow job that will be assigned to its worker pool.
This is the mechanism by which the Cloud Dataflow SDK causes code to be
loaded onto the workers. For example, the Cloud Dataflow Java SDK might use
this to install jars containing the user's code and all of the various
dependencies (libraries, data files, etc.) required in order for that code
to run.
Fields:
location: The resource to read the package from. The supported resource
type is: Google Cloud Storage: storage.googleapis.com/{bucket}
bucket.storage.googleapis.com/
name: The name of the package.
"""
location = _messages.StringField(1)
name = _messages.StringField(2)
class ParDoInstruction(_messages.Message):
"""An instruction that does a ParDo operation. Takes one main input and zero
or more side inputs, and produces zero or more outputs. Runs user code.
Messages:
UserFnValue: The user function to invoke.
Fields:
input: The input.
multiOutputInfos: Information about each of the outputs, if user_fn is a
MultiDoFn.
numOutputs: The number of outputs.
sideInputs: Zero or more side inputs.
userFn: The user function to invoke.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UserFnValue(_messages.Message):
"""The user function to invoke.
Messages:
AdditionalProperty: An additional property for a UserFnValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a UserFnValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
input = _messages.MessageField('InstructionInput', 1)
multiOutputInfos = _messages.MessageField('MultiOutputInfo', 2, repeated=True)
numOutputs = _messages.IntegerField(3, variant=_messages.Variant.INT32)
sideInputs = _messages.MessageField('SideInputInfo', 4, repeated=True)
userFn = _messages.MessageField('UserFnValue', 5)
class ParallelInstruction(_messages.Message):
"""Describes a particular operation comprising a MapTask.
Fields:
flatten: Additional information for Flatten instructions.
name: User-provided name of this operation.
originalName: System-defined name for the operation in the original
workflow graph.
outputs: Describes the outputs of the instruction.
parDo: Additional information for ParDo instructions.
partialGroupByKey: Additional information for PartialGroupByKey
instructions.
read: Additional information for Read instructions.
systemName: System-defined name of this operation. Unique across the
workflow.
write: Additional information for Write instructions.
"""
flatten = _messages.MessageField('FlattenInstruction', 1)
name = _messages.StringField(2)
originalName = _messages.StringField(3)
outputs = _messages.MessageField('InstructionOutput', 4, repeated=True)
parDo = _messages.MessageField('ParDoInstruction', 5)
partialGroupByKey = _messages.MessageField('PartialGroupByKeyInstruction', 6)
read = _messages.MessageField('ReadInstruction', 7)
systemName = _messages.StringField(8)
write = _messages.MessageField('WriteInstruction', 9)
class Parameter(_messages.Message):
"""Structured data associated with this message.
Fields:
key: Key or name for this parameter.
value: Value for this parameter.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
class ParameterMetadata(_messages.Message):
"""Metadata for a specific parameter.
Fields:
helpText: Required. The help text to display for the parameter.
isOptional: Optional. Whether the parameter is optional. Defaults to
false.
label: Required. The label to display for the parameter.
name: Required. The name of the parameter.
regexes: Optional. Regexes that the parameter must match.
"""
helpText = _messages.StringField(1)
isOptional = _messages.BooleanField(2)
label = _messages.StringField(3)
name = _messages.StringField(4)
regexes = _messages.StringField(5, repeated=True)
class PartialGroupByKeyInstruction(_messages.Message):
"""An instruction that does a partial group-by-key. One input and one
output.
Messages:
InputElementCodecValue: The codec to use for interpreting an element in
the input PTable.
ValueCombiningFnValue: The value combining function to invoke.
Fields:
input: Describes the input to the partial group-by-key instruction.
inputElementCodec: The codec to use for interpreting an element in the
input PTable.
originalCombineValuesInputStoreName: If this instruction includes a
combining function this is the name of the intermediate store between
the GBK and the CombineValues.
originalCombineValuesStepName: If this instruction includes a combining
function, this is the name of the CombineValues instruction lifted into
this instruction.
sideInputs: Zero or more side inputs.
valueCombiningFn: The value combining function to invoke.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class InputElementCodecValue(_messages.Message):
"""The codec to use for interpreting an element in the input PTable.
Messages:
AdditionalProperty: An additional property for a InputElementCodecValue
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a InputElementCodecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ValueCombiningFnValue(_messages.Message):
"""The value combining function to invoke.
Messages:
AdditionalProperty: An additional property for a ValueCombiningFnValue
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ValueCombiningFnValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
input = _messages.MessageField('InstructionInput', 1)
inputElementCodec = _messages.MessageField('InputElementCodecValue', 2)
originalCombineValuesInputStoreName = _messages.StringField(3)
originalCombineValuesStepName = _messages.StringField(4)
sideInputs = _messages.MessageField('SideInputInfo', 5, repeated=True)
valueCombiningFn = _messages.MessageField('ValueCombiningFnValue', 6)
class PipelineDescription(_messages.Message):
"""A descriptive representation of submitted pipeline as well as the
executed form. This data is provided by the Dataflow service for ease of
visualizing the pipeline and interpretting Dataflow provided metrics.
Fields:
displayData: Pipeline level display data.
executionPipelineStage: Description of each stage of execution of the
pipeline.
originalPipelineTransform: Description of each transform in the pipeline
and collections between them.
"""
displayData = _messages.MessageField('DisplayData', 1, repeated=True)
executionPipelineStage = _messages.MessageField('ExecutionStageSummary', 2, repeated=True)
originalPipelineTransform = _messages.MessageField('TransformSummary', 3, repeated=True)
class Position(_messages.Message):
"""Position defines a position within a collection of data. The value can
be either the end position, a key (used with ordered collections), a byte
offset, or a record index.
Fields:
byteOffset: Position is a byte offset.
concatPosition: CloudPosition is a concat position.
end: Position is past all other positions. Also useful for the end
position of an unbounded range.
key: Position is a string key, ordered lexicographically.
recordIndex: Position is a record index.
shufflePosition: CloudPosition is a base64 encoded BatchShufflePosition
(with FIXED sharding).
"""
byteOffset = _messages.IntegerField(1)
concatPosition = _messages.MessageField('ConcatPosition', 2)
end = _messages.BooleanField(3)
key = _messages.StringField(4)
recordIndex = _messages.IntegerField(5)
shufflePosition = _messages.StringField(6)
class PubsubLocation(_messages.Message):
"""Identifies a pubsub location to use for transferring data into or out of
a streaming Dataflow job.
Fields:
dropLateData: Indicates whether the pipeline allows late-arriving data.
idLabel: If set, contains a pubsub label from which to extract record ids.
If left empty, record deduplication will be strictly best effort.
subscription: A pubsub subscription, in the form of
"pubsub.googleapis.com/subscriptions/<project-id>/<subscription-name>"
timestampLabel: If set, contains a pubsub label from which to extract
record timestamps. If left empty, record timestamps will be generated
upon arrival.
topic: A pubsub topic, in the form of "pubsub.googleapis.com/topics
/<project-id>/<topic-name>"
trackingSubscription: If set, specifies the pubsub subscription that will
be used for tracking custom time timestamps for watermark estimation.
withAttributes: If true, then the client has requested to get pubsub
attributes.
"""
dropLateData = _messages.BooleanField(1)
idLabel = _messages.StringField(2)
subscription = _messages.StringField(3)
timestampLabel = _messages.StringField(4)
topic = _messages.StringField(5)
trackingSubscription = _messages.StringField(6)
withAttributes = _messages.BooleanField(7)
class ReadInstruction(_messages.Message):
"""An instruction that reads records. Takes no inputs, produces one output.
Fields:
source: The source to read from.
"""
source = _messages.MessageField('Source', 1)
class ReportWorkItemStatusRequest(_messages.Message):
"""Request to report the status of WorkItems.
Fields:
currentWorkerTime: The current timestamp at the worker.
location: The location which contains the WorkItem's job.
workItemStatuses: The order is unimportant, except that the order of the
WorkItemServiceState messages in the ReportWorkItemStatusResponse
corresponds to the order of WorkItemStatus messages here.
workerId: The ID of the worker reporting the WorkItem status. If this
does not match the ID of the worker which the Dataflow service believes
currently has the lease on the WorkItem, the report will be dropped
(with an error response).
"""
currentWorkerTime = _messages.StringField(1)
location = _messages.StringField(2)
workItemStatuses = _messages.MessageField('WorkItemStatus', 3, repeated=True)
workerId = _messages.StringField(4)
class ReportWorkItemStatusResponse(_messages.Message):
"""Response from a request to report the status of WorkItems.
Fields:
workItemServiceStates: A set of messages indicating the service-side state
for each WorkItem whose status was reported, in the same order as the
WorkItemStatus messages in the ReportWorkItemStatusRequest which
resulting in this response.
"""
workItemServiceStates = _messages.MessageField('WorkItemServiceState', 1, repeated=True)
class ReportedParallelism(_messages.Message):
"""Represents the level of parallelism in a WorkItem's input, reported by
the worker.
Fields:
isInfinite: Specifies whether the parallelism is infinite. If true,
"value" is ignored. Infinite parallelism means the service will assume
that the work item can always be split into more non-empty work items by
dynamic splitting. This is a work-around for lack of support for
infinity by the current JSON-based Java RPC stack.
value: Specifies the level of parallelism in case it is finite.
"""
isInfinite = _messages.BooleanField(1)
value = _messages.FloatField(2)
class ResourceUtilizationReport(_messages.Message):
"""Worker metrics exported from workers. This contains resource utilization
metrics accumulated from a variety of sources. For more information, see go
/df-resource-signals.
Fields:
cpuTime: CPU utilization samples.
"""
cpuTime = _messages.MessageField('CPUTime', 1, repeated=True)
class ResourceUtilizationReportResponse(_messages.Message):
"""Service-side response to WorkerMessage reporting resource utilization.
"""
class RuntimeEnvironment(_messages.Message):
"""The environment values to set at runtime.
Fields:
bypassTempDirValidation: Whether to bypass the safety checks for the job's
temporary directory. Use with caution.
machineType: The machine type to use for the job. Defaults to the value
from the template if not specified.
maxWorkers: The maximum number of Google Compute Engine instances to be
made available to your pipeline during execution, from 1 to 1000.
serviceAccountEmail: The email address of the service account to run the
job as.
tempLocation: The Cloud Storage path to use for temporary files. Must be a
valid Cloud Storage URL, beginning with `gs://`.
zone: The Compute Engine [availability
zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones)
for launching worker instances to run your pipeline.
"""
bypassTempDirValidation = _messages.BooleanField(1)
machineType = _messages.StringField(2)
maxWorkers = _messages.IntegerField(3, variant=_messages.Variant.INT32)
serviceAccountEmail = _messages.StringField(4)
tempLocation = _messages.StringField(5)
zone = _messages.StringField(6)
class SendDebugCaptureRequest(_messages.Message):
"""Request to send encoded debug information.
Fields:
componentId: The internal component id for which debug information is
sent.
data: The encoded debug information.
location: The location which contains the job specified by job_id.
workerId: The worker id, i.e., VM hostname.
"""
componentId = _messages.StringField(1)
data = _messages.StringField(2)
location = _messages.StringField(3)
workerId = _messages.StringField(4)
class SendDebugCaptureResponse(_messages.Message):
"""Response to a send capture request.
nothing"""
class SendWorkerMessagesRequest(_messages.Message):
"""A request for sending worker messages to the service.
Fields:
location: The location which contains the job
workerMessages: The WorkerMessages to send.
"""
location = _messages.StringField(1)
workerMessages = _messages.MessageField('WorkerMessage', 2, repeated=True)
class SendWorkerMessagesResponse(_messages.Message):
"""The response to the worker messages.
Fields:
workerMessageResponses: The servers response to the worker messages.
"""
workerMessageResponses = _messages.MessageField('WorkerMessageResponse', 1, repeated=True)
class SeqMapTask(_messages.Message):
"""Describes a particular function to invoke.
Messages:
UserFnValue: The user function to invoke.
Fields:
inputs: Information about each of the inputs.
name: The user-provided name of the SeqDo operation.
outputInfos: Information about each of the outputs.
stageName: System-defined name of the stage containing the SeqDo
operation. Unique across the workflow.
systemName: System-defined name of the SeqDo operation. Unique across the
workflow.
userFn: The user function to invoke.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UserFnValue(_messages.Message):
"""The user function to invoke.
Messages:
AdditionalProperty: An additional property for a UserFnValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a UserFnValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
inputs = _messages.MessageField('SideInputInfo', 1, repeated=True)
name = _messages.StringField(2)
outputInfos = _messages.MessageField('SeqMapTaskOutputInfo', 3, repeated=True)
stageName = _messages.StringField(4)
systemName = _messages.StringField(5)
userFn = _messages.MessageField('UserFnValue', 6)
class SeqMapTaskOutputInfo(_messages.Message):
"""Information about an output of a SeqMapTask.
Fields:
sink: The sink to write the output value to.
tag: The id of the TupleTag the user code will tag the output value by.
"""
sink = _messages.MessageField('Sink', 1)
tag = _messages.StringField(2)
class ShellTask(_messages.Message):
"""A task which consists of a shell command for the worker to execute.
Fields:
command: The shell command to run.
exitCode: Exit code for the task.
"""
command = _messages.StringField(1)
exitCode = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class SideInputInfo(_messages.Message):
"""Information about a side input of a DoFn or an input of a SeqDoFn.
Messages:
KindValue: How to interpret the source element(s) as a side input value.
Fields:
kind: How to interpret the source element(s) as a side input value.
sources: The source(s) to read element(s) from to get the value of this
side input. If more than one source, then the elements are taken from
the sources, in the specified order if order matters. At least one
source is required.
tag: The id of the tag the user code will access this side input by; this
should correspond to the tag of some MultiOutputInfo.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class KindValue(_messages.Message):
"""How to interpret the source element(s) as a side input value.
Messages:
AdditionalProperty: An additional property for a KindValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a KindValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
kind = _messages.MessageField('KindValue', 1)
sources = _messages.MessageField('Source', 2, repeated=True)
tag = _messages.StringField(3)
class Sink(_messages.Message):
"""A sink that records can be encoded and written to.
Messages:
CodecValue: The codec to use to encode data written to the sink.
SpecValue: The sink to write to, plus its parameters.
Fields:
codec: The codec to use to encode data written to the sink.
spec: The sink to write to, plus its parameters.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class CodecValue(_messages.Message):
"""The codec to use to encode data written to the sink.
Messages:
AdditionalProperty: An additional property for a CodecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a CodecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class SpecValue(_messages.Message):
"""The sink to write to, plus its parameters.
Messages:
AdditionalProperty: An additional property for a SpecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a SpecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
codec = _messages.MessageField('CodecValue', 1)
spec = _messages.MessageField('SpecValue', 2)
class Source(_messages.Message):
"""A source that records can be read and decoded from.
Messages:
BaseSpecsValueListEntry: A BaseSpecsValueListEntry object.
CodecValue: The codec to use to decode data read from the source.
SpecValue: The source to read from, plus its parameters.
Fields:
baseSpecs: While splitting, sources may specify the produced bundles as
differences against another source, in order to save backend-side memory
and allow bigger jobs. For details, see SourceSplitRequest. To support
this use case, the full set of parameters of the source is logically
obtained by taking the latest explicitly specified value of each
parameter in the order: base_specs (later items win), spec (overrides
anything in base_specs).
codec: The codec to use to decode data read from the source.
doesNotNeedSplitting: Setting this value to true hints to the framework
that the source doesn't need splitting, and using SourceSplitRequest on
it would yield SOURCE_SPLIT_OUTCOME_USE_CURRENT. E.g. a file splitter
may set this to true when splitting a single file into a set of byte
ranges of appropriate size, and set this to false when splitting a
filepattern into individual files. However, for efficiency, a file
splitter may decide to produce file subranges directly from the
filepattern to avoid a splitting round-trip. See SourceSplitRequest for
an overview of the splitting process. This field is meaningful only in
the Source objects populated by the user (e.g. when filling in a
DerivedSource). Source objects supplied by the framework to the user
don't have this field populated.
metadata: Optionally, metadata for this source can be supplied right away,
avoiding a SourceGetMetadataOperation roundtrip (see
SourceOperationRequest). This field is meaningful only in the Source
objects populated by the user (e.g. when filling in a DerivedSource).
Source objects supplied by the framework to the user don't have this
field populated.
spec: The source to read from, plus its parameters.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class BaseSpecsValueListEntry(_messages.Message):
"""A BaseSpecsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a BaseSpecsValueListEntry
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a BaseSpecsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class CodecValue(_messages.Message):
"""The codec to use to decode data read from the source.
Messages:
AdditionalProperty: An additional property for a CodecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a CodecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class SpecValue(_messages.Message):
"""The source to read from, plus its parameters.
Messages:
AdditionalProperty: An additional property for a SpecValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a SpecValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
baseSpecs = _messages.MessageField('BaseSpecsValueListEntry', 1, repeated=True)
codec = _messages.MessageField('CodecValue', 2)
doesNotNeedSplitting = _messages.BooleanField(3)
metadata = _messages.MessageField('SourceMetadata', 4)
spec = _messages.MessageField('SpecValue', 5)
class SourceFork(_messages.Message):
"""DEPRECATED in favor of DynamicSourceSplit.
Fields:
primary: DEPRECATED
primarySource: DEPRECATED
residual: DEPRECATED
residualSource: DEPRECATED
"""
primary = _messages.MessageField('SourceSplitShard', 1)
primarySource = _messages.MessageField('DerivedSource', 2)
residual = _messages.MessageField('SourceSplitShard', 3)
residualSource = _messages.MessageField('DerivedSource', 4)
class SourceGetMetadataRequest(_messages.Message):
"""A request to compute the SourceMetadata of a Source.
Fields:
source: Specification of the source whose metadata should be computed.
"""
source = _messages.MessageField('Source', 1)
class SourceGetMetadataResponse(_messages.Message):
"""The result of a SourceGetMetadataOperation.
Fields:
metadata: The computed metadata.
"""
metadata = _messages.MessageField('SourceMetadata', 1)
class SourceMetadata(_messages.Message):
"""Metadata about a Source useful for automatically optimizing and tuning
the pipeline, etc.
Fields:
estimatedSizeBytes: An estimate of the total size (in bytes) of the data
that would be read from this source. This estimate is in terms of
external storage size, before any decompression or other processing done
by the reader.
infinite: Specifies that the size of this source is known to be infinite
(this is a streaming source).
producesSortedKeys: Whether this source is known to produce key/value
pairs with the (encoded) keys in lexicographically sorted order.
"""
estimatedSizeBytes = _messages.IntegerField(1)
infinite = _messages.BooleanField(2)
producesSortedKeys = _messages.BooleanField(3)
class SourceOperationRequest(_messages.Message):
"""A work item that represents the different operations that can be
performed on a user-defined Source specification.
Fields:
getMetadata: Information about a request to get metadata about a source.
split: Information about a request to split a source.
"""
getMetadata = _messages.MessageField('SourceGetMetadataRequest', 1)
split = _messages.MessageField('SourceSplitRequest', 2)
class SourceOperationResponse(_messages.Message):
"""The result of a SourceOperationRequest, specified in
ReportWorkItemStatusRequest.source_operation when the work item is
completed.
Fields:
getMetadata: A response to a request to get metadata about a source.
split: A response to a request to split a source.
"""
getMetadata = _messages.MessageField('SourceGetMetadataResponse', 1)
split = _messages.MessageField('SourceSplitResponse', 2)
class SourceSplitOptions(_messages.Message):
"""Hints for splitting a Source into bundles (parts for parallel processing)
using SourceSplitRequest.
Fields:
desiredBundleSizeBytes: The source should be split into a set of bundles
where the estimated size of each is approximately this many bytes.
desiredShardSizeBytes: DEPRECATED in favor of desired_bundle_size_bytes.
"""
desiredBundleSizeBytes = _messages.IntegerField(1)
desiredShardSizeBytes = _messages.IntegerField(2)
class SourceSplitRequest(_messages.Message):
"""Represents the operation to split a high-level Source specification into
bundles (parts for parallel processing). At a high level, splitting of a
source into bundles happens as follows: SourceSplitRequest is applied to the
source. If it returns SOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting
happens and the source is used "as is". Otherwise, splitting is applied
recursively to each produced DerivedSource. As an optimization, for any
Source, if its does_not_need_splitting is true, the framework assumes that
splitting this source would return SOURCE_SPLIT_OUTCOME_USE_CURRENT, and
doesn't initiate a SourceSplitRequest. This applies both to the initial
source being split and to bundles produced from it.
Fields:
options: Hints for tuning the splitting process.
source: Specification of the source to be split.
"""
options = _messages.MessageField('SourceSplitOptions', 1)
source = _messages.MessageField('Source', 2)
class SourceSplitResponse(_messages.Message):
"""The response to a SourceSplitRequest.
Enums:
OutcomeValueValuesEnum: Indicates whether splitting happened and produced
a list of bundles. If this is USE_CURRENT_SOURCE_AS_IS, the current
source should be processed "as is" without splitting. "bundles" is
ignored in this case. If this is SPLITTING_HAPPENED, then "bundles"
contains a list of bundles into which the source was split.
Fields:
bundles: If outcome is SPLITTING_HAPPENED, then this is a list of bundles
into which the source was split. Otherwise this field is ignored. This
list can be empty, which means the source represents an empty input.
outcome: Indicates whether splitting happened and produced a list of
bundles. If this is USE_CURRENT_SOURCE_AS_IS, the current source should
be processed "as is" without splitting. "bundles" is ignored in this
case. If this is SPLITTING_HAPPENED, then "bundles" contains a list of
bundles into which the source was split.
shards: DEPRECATED in favor of bundles.
"""
class OutcomeValueValuesEnum(_messages.Enum):
"""Indicates whether splitting happened and produced a list of bundles. If
this is USE_CURRENT_SOURCE_AS_IS, the current source should be processed
"as is" without splitting. "bundles" is ignored in this case. If this is
SPLITTING_HAPPENED, then "bundles" contains a list of bundles into which
the source was split.
Values:
SOURCE_SPLIT_OUTCOME_UNKNOWN: The source split outcome is unknown, or
unspecified.
SOURCE_SPLIT_OUTCOME_USE_CURRENT: The current source should be processed
"as is" without splitting.
SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED: Splitting produced a list of
bundles.
"""
SOURCE_SPLIT_OUTCOME_UNKNOWN = 0
SOURCE_SPLIT_OUTCOME_USE_CURRENT = 1
SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED = 2
bundles = _messages.MessageField('DerivedSource', 1, repeated=True)
outcome = _messages.EnumField('OutcomeValueValuesEnum', 2)
shards = _messages.MessageField('SourceSplitShard', 3, repeated=True)
class SourceSplitShard(_messages.Message):
"""DEPRECATED in favor of DerivedSource.
Enums:
DerivationModeValueValuesEnum: DEPRECATED
Fields:
derivationMode: DEPRECATED
source: DEPRECATED
"""
class DerivationModeValueValuesEnum(_messages.Enum):
"""DEPRECATED
Values:
SOURCE_DERIVATION_MODE_UNKNOWN: The source derivation is unknown, or
unspecified.
SOURCE_DERIVATION_MODE_INDEPENDENT: Produce a completely independent
Source with no base.
SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT: Produce a Source based on the
Source being split.
SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT: Produce a Source based on the
base of the Source being split.
"""
SOURCE_DERIVATION_MODE_UNKNOWN = 0
SOURCE_DERIVATION_MODE_INDEPENDENT = 1
SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT = 2
SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT = 3
derivationMode = _messages.EnumField('DerivationModeValueValuesEnum', 1)
source = _messages.MessageField('Source', 2)
class SplitInt64(_messages.Message):
"""A representation of an int64, n, that is immune to precision loss when
encoded in JSON.
Fields:
highBits: The high order bits, including the sign: n >> 32.
lowBits: The low order bits: n & 0xffffffff.
"""
highBits = _messages.IntegerField(1, variant=_messages.Variant.INT32)
lowBits = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
class StageSource(_messages.Message):
"""Description of an input or output of an execution stage.
Fields:
name: Dataflow service generated name for this source.
originalTransformOrCollection: User name for the original user transform
or collection with which this source is most closely associated.
sizeBytes: Size of the source, if measurable.
userName: Human-readable name for this source; may be user or system
generated.
"""
name = _messages.StringField(1)
originalTransformOrCollection = _messages.StringField(2)
sizeBytes = _messages.IntegerField(3)
userName = _messages.StringField(4)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class StateFamilyConfig(_messages.Message):
"""State family configuration.
Fields:
isRead: If true, this family corresponds to a read operation.
stateFamily: The state family value.
"""
isRead = _messages.BooleanField(1)
stateFamily = _messages.StringField(2)
class Status(_messages.Message):
"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). The error model is designed to be:
- Simple to use and understand for most users - Flexible enough to meet
unexpected needs # Overview The `Status` message contains three pieces of
data: error code, error message, and error details. The error code should be
an enum value of google.rpc.Code, but it may accept additional error codes
if needed. The error message should be a developer-facing English message
that helps developers *understand* and *resolve* the error. If a localized
user-facing error message is needed, put the localized message in the error
details or localize it in the client. The optional error details may contain
arbitrary information about the error. There is a predefined set of error
detail types in the package `google.rpc` that can be used for common error
conditions. # Language mapping The `Status` message is the logical
representation of the error model, but it is not necessarily the actual wire
format. When the `Status` message is exposed in different client libraries
and different wire protocols, it can be mapped differently. For example, it
will likely be mapped to some exceptions in Java, but more likely mapped to
some error codes in C. # Other uses The error model and the `Status`
message can be used in a variety of environments, either with or without
APIs, to provide a consistent developer experience across different
environments. Example uses of this error model include: - Partial errors.
If a service needs to return partial errors to the client, it may embed
the `Status` in the normal response to indicate the partial errors. -
Workflow errors. A typical workflow has multiple steps. Each step may
have a `Status` message for error reporting. - Batch operations. If a
client uses batch request and batch response, the `Status` message
should be used directly inside batch response, one for each error sub-
response. - Asynchronous operations. If an API call embeds asynchronous
operation results in its response, the status of those operations should
be represented directly using the `Status` message. - Logging. If some
API errors are stored in logs, the message `Status` could be used
directly after any stripping needed for security/privacy reasons.
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class Step(_messages.Message):
"""Defines a particular step within a Cloud Dataflow job. A job consists of
multiple steps, each of which performs some specific operation as part of
the overall job. Data is typically passed from one step to another as part
of the job. Here's an example of a sequence of steps which together
implement a Map-Reduce job: * Read a collection of data from some source,
parsing the collection's elements. * Validate the elements. *
Apply a user-defined function to map each element to some value and
extract an element-specific key value. * Group elements with the same key
into a single element with that key, transforming a multiply-keyed
collection into a uniquely-keyed collection. * Write the elements out
to some data sink. Note that the Cloud Dataflow service may be used to run
many different types of jobs, not just Map-Reduce.
Messages:
PropertiesValue: Named properties associated with the step. Each kind of
predefined step has its own required set of properties. Must be provided
on Create. Only retrieved with JOB_VIEW_ALL.
Fields:
kind: The kind of step in the Cloud Dataflow job.
name: The name that identifies the step. This must be unique for each step
with respect to all other steps in the Cloud Dataflow job.
properties: Named properties associated with the step. Each kind of
predefined step has its own required set of properties. Must be provided
on Create. Only retrieved with JOB_VIEW_ALL.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
"""Named properties associated with the step. Each kind of predefined step
has its own required set of properties. Must be provided on Create. Only
retrieved with JOB_VIEW_ALL.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
kind = _messages.StringField(1)
name = _messages.StringField(2)
properties = _messages.MessageField('PropertiesValue', 3)
class StreamLocation(_messages.Message):
"""Describes a stream of data, either as input to be processed or as output
of a streaming Dataflow job.
Fields:
customSourceLocation: The stream is a custom source.
pubsubLocation: The stream is a pubsub stream.
sideInputLocation: The stream is a streaming side input.
streamingStageLocation: The stream is part of another computation within
the current streaming Dataflow job.
"""
customSourceLocation = _messages.MessageField('CustomSourceLocation', 1)
pubsubLocation = _messages.MessageField('PubsubLocation', 2)
sideInputLocation = _messages.MessageField('StreamingSideInputLocation', 3)
streamingStageLocation = _messages.MessageField('StreamingStageLocation', 4)
class StreamingComputationConfig(_messages.Message):
"""Configuration information for a single streaming computation.
Fields:
computationId: Unique identifier for this computation.
instructions: Instructions that comprise the computation.
stageName: Stage name of this computation.
systemName: System defined name for this computation.
"""
computationId = _messages.StringField(1)
instructions = _messages.MessageField('ParallelInstruction', 2, repeated=True)
stageName = _messages.StringField(3)
systemName = _messages.StringField(4)
class StreamingComputationRanges(_messages.Message):
"""Describes full or partial data disk assignment information of the
computation ranges.
Fields:
computationId: The ID of the computation.
rangeAssignments: Data disk assignments for ranges from this computation.
"""
computationId = _messages.StringField(1)
rangeAssignments = _messages.MessageField('KeyRangeDataDiskAssignment', 2, repeated=True)
class StreamingComputationTask(_messages.Message):
"""A task which describes what action should be performed for the specified
streaming computation ranges.
Enums:
TaskTypeValueValuesEnum: A type of streaming computation task.
Fields:
computationRanges: Contains ranges of a streaming computation this task
should apply to.
dataDisks: Describes the set of data disks this task should apply to.
taskType: A type of streaming computation task.
"""
class TaskTypeValueValuesEnum(_messages.Enum):
"""A type of streaming computation task.
Values:
STREAMING_COMPUTATION_TASK_UNKNOWN: The streaming computation task is
unknown, or unspecified.
STREAMING_COMPUTATION_TASK_STOP: Stop processing specified streaming
computation range(s).
STREAMING_COMPUTATION_TASK_START: Start processing specified streaming
computation range(s).
"""
STREAMING_COMPUTATION_TASK_UNKNOWN = 0
STREAMING_COMPUTATION_TASK_STOP = 1
STREAMING_COMPUTATION_TASK_START = 2
computationRanges = _messages.MessageField('StreamingComputationRanges', 1, repeated=True)
dataDisks = _messages.MessageField('MountedDataDisk', 2, repeated=True)
taskType = _messages.EnumField('TaskTypeValueValuesEnum', 3)
class StreamingConfigTask(_messages.Message):
"""A task that carries configuration information for streaming computations.
Messages:
UserStepToStateFamilyNameMapValue: Map from user step names to state
families.
Fields:
streamingComputationConfigs: Set of computation configuration information.
userStepToStateFamilyNameMap: Map from user step names to state families.
windmillServiceEndpoint: If present, the worker must use this endpoint to
communicate with Windmill Service dispatchers, otherwise the worker must
continue to use whatever endpoint it had been using.
windmillServicePort: If present, the worker must use this port to
communicate with Windmill Service dispatchers. Only applicable when
windmill_service_endpoint is specified.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UserStepToStateFamilyNameMapValue(_messages.Message):
"""Map from user step names to state families.
Messages:
AdditionalProperty: An additional property for a
UserStepToStateFamilyNameMapValue object.
Fields:
additionalProperties: Additional properties of type
UserStepToStateFamilyNameMapValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a UserStepToStateFamilyNameMapValue
object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
streamingComputationConfigs = _messages.MessageField('StreamingComputationConfig', 1, repeated=True)
userStepToStateFamilyNameMap = _messages.MessageField('UserStepToStateFamilyNameMapValue', 2)
windmillServiceEndpoint = _messages.StringField(3)
windmillServicePort = _messages.IntegerField(4)
class StreamingSetupTask(_messages.Message):
"""A task which initializes part of a streaming Dataflow job.
Fields:
drain: The user has requested drain.
receiveWorkPort: The TCP port on which the worker should listen for
messages from other streaming computation workers.
streamingComputationTopology: The global topology of the streaming
Dataflow job.
workerHarnessPort: The TCP port used by the worker to communicate with the
Dataflow worker harness.
"""
drain = _messages.BooleanField(1)
receiveWorkPort = _messages.IntegerField(2, variant=_messages.Variant.INT32)
streamingComputationTopology = _messages.MessageField('TopologyConfig', 3)
workerHarnessPort = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class StreamingSideInputLocation(_messages.Message):
"""Identifies the location of a streaming side input.
Fields:
stateFamily: Identifies the state family where this side input is stored.
tag: Identifies the particular side input within the streaming Dataflow
job.
"""
stateFamily = _messages.StringField(1)
tag = _messages.StringField(2)
class StreamingStageLocation(_messages.Message):
"""Identifies the location of a streaming computation stage, for stage-to-
stage communication.
Fields:
streamId: Identifies the particular stream within the streaming Dataflow
job.
"""
streamId = _messages.StringField(1)
class StringList(_messages.Message):
"""A metric value representing a list of strings.
Fields:
elements: Elements of the list.
"""
elements = _messages.StringField(1, repeated=True)
class StructuredMessage(_messages.Message):
"""A rich message format, including a human readable string, a key for
identifying the message, and structured data associated with the message for
programmatic consumption.
Fields:
messageKey: Idenfier for this message type. Used by external systems to
internationalize or personalize message.
messageText: Human-readable version of message.
parameters: The structured data associated with this message.
"""
messageKey = _messages.StringField(1)
messageText = _messages.StringField(2)
parameters = _messages.MessageField('Parameter', 3, repeated=True)
class TaskRunnerSettings(_messages.Message):
"""Taskrunner configuration settings.
Fields:
alsologtostderr: Whether to also send taskrunner log info to stderr.
baseTaskDir: The location on the worker for task-specific subdirectories.
baseUrl: The base URL for the taskrunner to use when accessing Google
Cloud APIs. When workers access Google Cloud APIs, they logically do so
via relative URLs. If this field is specified, it supplies the base URL
to use for resolving these relative URLs. The normative algorithm used
is defined by RFC 1808, "Relative Uniform Resource Locators". If not
specified, the default value is "http://www.googleapis.com/"
commandlinesFileName: The file to store preprocessing commands in.
continueOnException: Whether to continue taskrunner if an exception is
hit.
dataflowApiVersion: The API version of endpoint, e.g. "v1b3"
harnessCommand: The command to launch the worker harness.
languageHint: The suggested backend language.
logDir: The directory on the VM to store logs.
logToSerialconsole: Whether to send taskrunner log info to Google Compute
Engine VM serial console.
logUploadLocation: Indicates where to put logs. If this is not specified,
the logs will not be uploaded. The supported resource type is: Google
Cloud Storage: storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
oauthScopes: The OAuth2 scopes to be requested by the taskrunner in order
to access the Cloud Dataflow API.
parallelWorkerSettings: The settings to pass to the parallel worker
harness.
streamingWorkerMainClass: The streaming worker main class name.
taskGroup: The UNIX group ID on the worker VM to use for tasks launched by
taskrunner; e.g. "wheel".
taskUser: The UNIX user ID on the worker VM to use for tasks launched by
taskrunner; e.g. "root".
tempStoragePrefix: The prefix of the resources the taskrunner should use
for temporary storage. The supported resource type is: Google Cloud
Storage: storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
vmId: The ID string of the VM.
workflowFileName: The file to store the workflow in.
"""
alsologtostderr = _messages.BooleanField(1)
baseTaskDir = _messages.StringField(2)
baseUrl = _messages.StringField(3)
commandlinesFileName = _messages.StringField(4)
continueOnException = _messages.BooleanField(5)
dataflowApiVersion = _messages.StringField(6)
harnessCommand = _messages.StringField(7)
languageHint = _messages.StringField(8)
logDir = _messages.StringField(9)
logToSerialconsole = _messages.BooleanField(10)
logUploadLocation = _messages.StringField(11)
oauthScopes = _messages.StringField(12, repeated=True)
parallelWorkerSettings = _messages.MessageField('WorkerSettings', 13)
streamingWorkerMainClass = _messages.StringField(14)
taskGroup = _messages.StringField(15)
taskUser = _messages.StringField(16)
tempStoragePrefix = _messages.StringField(17)
vmId = _messages.StringField(18)
workflowFileName = _messages.StringField(19)
class TemplateMetadata(_messages.Message):
"""Metadata describing a template.
Fields:
description: Optional. A description of the template.
name: Required. The name of the template.
parameters: The parameters for the template.
"""
description = _messages.StringField(1)
name = _messages.StringField(2)
parameters = _messages.MessageField('ParameterMetadata', 3, repeated=True)
class TopologyConfig(_messages.Message):
"""Global topology of the streaming Dataflow job, including all computations
and their sharded locations.
Messages:
UserStageToComputationNameMapValue: Maps user stage names to stable
computation names.
Fields:
computations: The computations associated with a streaming Dataflow job.
dataDiskAssignments: The disks assigned to a streaming Dataflow job.
forwardingKeyBits: The size (in bits) of keys that will be assigned to
source messages.
persistentStateVersion: Version number for persistent state.
userStageToComputationNameMap: Maps user stage names to stable computation
names.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class UserStageToComputationNameMapValue(_messages.Message):
"""Maps user stage names to stable computation names.
Messages:
AdditionalProperty: An additional property for a
UserStageToComputationNameMapValue object.
Fields:
additionalProperties: Additional properties of type
UserStageToComputationNameMapValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a UserStageToComputationNameMapValue
object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
computations = _messages.MessageField('ComputationTopology', 1, repeated=True)
dataDiskAssignments = _messages.MessageField('DataDiskAssignment', 2, repeated=True)
forwardingKeyBits = _messages.IntegerField(3, variant=_messages.Variant.INT32)
persistentStateVersion = _messages.IntegerField(4, variant=_messages.Variant.INT32)
userStageToComputationNameMap = _messages.MessageField('UserStageToComputationNameMapValue', 5)
class TransformSummary(_messages.Message):
"""Description of the type, names/ids, and input/outputs for a transform.
Enums:
KindValueValuesEnum: Type of transform.
Fields:
displayData: Transform-specific display data.
id: SDK generated id of this transform instance.
inputCollectionName: User names for all collection inputs to this
transform.
kind: Type of transform.
name: User provided name for this transform instance.
outputCollectionName: User names for all collection outputs to this
transform.
"""
class KindValueValuesEnum(_messages.Enum):
"""Type of transform.
Values:
UNKNOWN_KIND: Unrecognized transform type.
PAR_DO_KIND: ParDo transform.
GROUP_BY_KEY_KIND: Group By Key transform.
FLATTEN_KIND: Flatten transform.
READ_KIND: Read transform.
WRITE_KIND: Write transform.
CONSTANT_KIND: Constructs from a constant value, such as with Create.of.
SINGLETON_KIND: Creates a Singleton view of a collection.
SHUFFLE_KIND: Opening or closing a shuffle session, often as part of a
GroupByKey.
"""
UNKNOWN_KIND = 0
PAR_DO_KIND = 1
GROUP_BY_KEY_KIND = 2
FLATTEN_KIND = 3
READ_KIND = 4
WRITE_KIND = 5
CONSTANT_KIND = 6
SINGLETON_KIND = 7
SHUFFLE_KIND = 8
displayData = _messages.MessageField('DisplayData', 1, repeated=True)
id = _messages.StringField(2)
inputCollectionName = _messages.StringField(3, repeated=True)
kind = _messages.EnumField('KindValueValuesEnum', 4)
name = _messages.StringField(5)
outputCollectionName = _messages.StringField(6, repeated=True)
class WorkItem(_messages.Message):
"""WorkItem represents basic information about a WorkItem to be executed in
the cloud.
Fields:
configuration: Work item-specific configuration as an opaque blob.
id: Identifies this WorkItem.
initialReportIndex: The initial index to use when reporting the status of
the WorkItem.
jobId: Identifies the workflow job this WorkItem belongs to.
leaseExpireTime: Time when the lease on this Work will expire.
mapTask: Additional information for MapTask WorkItems.
packages: Any required packages that need to be fetched in order to
execute this WorkItem.
projectId: Identifies the cloud project this WorkItem belongs to.
reportStatusInterval: Recommended reporting interval.
seqMapTask: Additional information for SeqMapTask WorkItems.
shellTask: Additional information for ShellTask WorkItems.
sourceOperationTask: Additional information for source operation
WorkItems.
streamingComputationTask: Additional information for
StreamingComputationTask WorkItems.
streamingConfigTask: Additional information for StreamingConfigTask
WorkItems.
streamingSetupTask: Additional information for StreamingSetupTask
WorkItems.
"""
configuration = _messages.StringField(1)
id = _messages.IntegerField(2)
initialReportIndex = _messages.IntegerField(3)
jobId = _messages.StringField(4)
leaseExpireTime = _messages.StringField(5)
mapTask = _messages.MessageField('MapTask', 6)
packages = _messages.MessageField('Package', 7, repeated=True)
projectId = _messages.StringField(8)
reportStatusInterval = _messages.StringField(9)
seqMapTask = _messages.MessageField('SeqMapTask', 10)
shellTask = _messages.MessageField('ShellTask', 11)
sourceOperationTask = _messages.MessageField('SourceOperationRequest', 12)
streamingComputationTask = _messages.MessageField('StreamingComputationTask', 13)
streamingConfigTask = _messages.MessageField('StreamingConfigTask', 14)
streamingSetupTask = _messages.MessageField('StreamingSetupTask', 15)
class WorkItemServiceState(_messages.Message):
"""The Dataflow service's idea of the current state of a WorkItem being
processed by a worker.
Messages:
HarnessDataValue: Other data returned by the service, specific to the
particular worker harness.
Fields:
harnessData: Other data returned by the service, specific to the
particular worker harness.
leaseExpireTime: Time at which the current lease will expire.
metricShortId: The short ids that workers should use in subsequent metric
updates. Workers should strive to use short ids whenever possible, but
it is ok to request the short_id again if a worker lost track of it
(e.g. if the worker is recovering from a crash). NOTE: it is possible
that the response may have short ids for a subset of the metrics.
nextReportIndex: The index value to use for the next report sent by the
worker. Note: If the report call fails for whatever reason, the worker
should reuse this index for subsequent report attempts.
reportStatusInterval: New recommended reporting interval.
splitRequest: The progress point in the WorkItem where the Dataflow
service suggests that the worker truncate the task.
suggestedStopPoint: DEPRECATED in favor of split_request.
suggestedStopPosition: Obsolete, always empty.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class HarnessDataValue(_messages.Message):
"""Other data returned by the service, specific to the particular worker
harness.
Messages:
AdditionalProperty: An additional property for a HarnessDataValue
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a HarnessDataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
harnessData = _messages.MessageField('HarnessDataValue', 1)
leaseExpireTime = _messages.StringField(2)
metricShortId = _messages.MessageField('MetricShortId', 3, repeated=True)
nextReportIndex = _messages.IntegerField(4)
reportStatusInterval = _messages.StringField(5)
splitRequest = _messages.MessageField('ApproximateSplitRequest', 6)
suggestedStopPoint = _messages.MessageField('ApproximateProgress', 7)
suggestedStopPosition = _messages.MessageField('Position', 8)
class WorkItemStatus(_messages.Message):
"""Conveys a worker's progress through the work described by a WorkItem.
Fields:
completed: True if the WorkItem was completed (successfully or
unsuccessfully).
counterUpdates: Worker output counters for this WorkItem.
dynamicSourceSplit: See documentation of stop_position.
errors: Specifies errors which occurred during processing. If errors are
provided, and completed = true, then the WorkItem is considered to have
failed.
metricUpdates: DEPRECATED in favor of counter_updates.
progress: DEPRECATED in favor of reported_progress.
reportIndex: The report index. When a WorkItem is leased, the lease will
contain an initial report index. When a WorkItem's status is reported
to the system, the report should be sent with that report index, and the
response will contain the index the worker should use for the next
report. Reports received with unexpected index values will be rejected
by the service. In order to preserve idempotency, the worker should not
alter the contents of a report, even if the worker must submit the same
report multiple times before getting back a response. The worker should
not submit a subsequent report until the response for the previous
report had been received from the service.
reportedProgress: The worker's progress through this WorkItem.
requestedLeaseDuration: Amount of time the worker requests for its lease.
sourceFork: DEPRECATED in favor of dynamic_source_split.
sourceOperationResponse: If the work item represented a
SourceOperationRequest, and the work is completed, contains the result
of the operation.
stopPosition: A worker may split an active map task in two parts,
"primary" and "residual", continuing to process the primary part and
returning the residual part into the pool of available work. This event
is called a "dynamic split" and is critical to the dynamic work
rebalancing feature. The two obtained sub-tasks are called "parts" of
the split. The parts, if concatenated, must represent the same input as
would be read by the current task if the split did not happen. The exact
way in which the original task is decomposed into the two parts is
specified either as a position demarcating them (stop_position), or
explicitly as two DerivedSources, if this task consumes a user-defined
source type (dynamic_source_split). The "current" task is adjusted as a
result of the split: after a task with range [A, B) sends a
stop_position update at C, its range is considered to be [A, C), e.g.: *
Progress should be interpreted relative to the new range, e.g. "75%
completed" means "75% of [A, C) completed" * The worker should interpret
proposed_stop_position relative to the new range, e.g. "split at 68%"
should be interpreted as "split at 68% of [A, C)". * If the worker
chooses to split again using stop_position, only stop_positions in [A,
C) will be accepted. * Etc. dynamic_source_split has similar semantics:
e.g., if a task with source S splits using dynamic_source_split into {P,
R} (where P and R must be together equivalent to S), then subsequent
progress and proposed_stop_position should be interpreted relative to P,
and in a potential subsequent dynamic_source_split into {P', R'}, P' and
R' must be together equivalent to P, etc.
workItemId: Identifies the WorkItem.
"""
completed = _messages.BooleanField(1)
counterUpdates = _messages.MessageField('CounterUpdate', 2, repeated=True)
dynamicSourceSplit = _messages.MessageField('DynamicSourceSplit', 3)
errors = _messages.MessageField('Status', 4, repeated=True)
metricUpdates = _messages.MessageField('MetricUpdate', 5, repeated=True)
progress = _messages.MessageField('ApproximateProgress', 6)
reportIndex = _messages.IntegerField(7)
reportedProgress = _messages.MessageField('ApproximateReportedProgress', 8)
requestedLeaseDuration = _messages.StringField(9)
sourceFork = _messages.MessageField('SourceFork', 10)
sourceOperationResponse = _messages.MessageField('SourceOperationResponse', 11)
stopPosition = _messages.MessageField('Position', 12)
workItemId = _messages.StringField(13)
class WorkerHealthReport(_messages.Message):
"""WorkerHealthReport contains information about the health of a worker.
The VM should be identified by the labels attached to the WorkerMessage that
this health ping belongs to.
Messages:
PodsValueListEntry: A PodsValueListEntry object.
Fields:
pods: The pods running on the worker. See: http://kubernetes.io/v1.1/docs
/api-reference/v1/definitions.html#_v1_pod This field is used by the
worker to send the status of the indvidual containers running on each
worker.
reportInterval: The interval at which the worker is sending health
reports. The default value of 0 should be interpreted as the field is
not being explicitly set by the worker.
vmIsHealthy: Whether the VM is healthy.
vmStartupTime: The time the VM was booted.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PodsValueListEntry(_messages.Message):
"""A PodsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a PodsValueListEntry
object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PodsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
pods = _messages.MessageField('PodsValueListEntry', 1, repeated=True)
reportInterval = _messages.StringField(2)
vmIsHealthy = _messages.BooleanField(3)
vmStartupTime = _messages.StringField(4)
class WorkerHealthReportResponse(_messages.Message):
"""WorkerHealthReportResponse contains information returned to the worker in
response to a health ping.
Fields:
reportInterval: A positive value indicates the worker should change its
reporting interval to the specified value. The default value of zero
means no change in report rate is requested by the server.
"""
reportInterval = _messages.StringField(1)
class WorkerMessage(_messages.Message):
"""WorkerMessage provides information to the backend about a worker.
Messages:
LabelsValue: Labels are used to group WorkerMessages. For example, a
worker_message about a particular container might have the labels: {
"JOB_ID": "2015-04-22", "WORKER_ID": "wordcount-vm-2015\u2026"
"CONTAINER_TYPE": "worker", "CONTAINER_ID": "ac1234def"} Label tags
typically correspond to Label enum values. However, for ease of
development other strings can be used as tags. LABEL_UNSPECIFIED should
not be used here.
Fields:
labels: Labels are used to group WorkerMessages. For example, a
worker_message about a particular container might have the labels: {
"JOB_ID": "2015-04-22", "WORKER_ID": "wordcount-vm-2015\u2026"
"CONTAINER_TYPE": "worker", "CONTAINER_ID": "ac1234def"} Label tags
typically correspond to Label enum values. However, for ease of
development other strings can be used as tags. LABEL_UNSPECIFIED should
not be used here.
time: The timestamp of the worker_message.
workerHealthReport: The health of a worker.
workerMessageCode: A worker message code.
workerMetrics: Resource metrics reported by workers.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""Labels are used to group WorkerMessages. For example, a worker_message
about a particular container might have the labels: { "JOB_ID":
"2015-04-22", "WORKER_ID": "wordcount-vm-2015\u2026" "CONTAINER_TYPE":
"worker", "CONTAINER_ID": "ac1234def"} Label tags typically correspond
to Label enum values. However, for ease of development other strings can
be used as tags. LABEL_UNSPECIFIED should not be used here.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
labels = _messages.MessageField('LabelsValue', 1)
time = _messages.StringField(2)
workerHealthReport = _messages.MessageField('WorkerHealthReport', 3)
workerMessageCode = _messages.MessageField('WorkerMessageCode', 4)
workerMetrics = _messages.MessageField('ResourceUtilizationReport', 5)
class WorkerMessageCode(_messages.Message):
"""A message code is used to report status and error messages to the
service. The message codes are intended to be machine readable. The service
will take care of translating these into user understandable messages if
necessary. Example use cases: 1. Worker processes reporting successful
startup. 2. Worker processes reporting specific errors (e.g. package
staging failure).
Messages:
ParametersValue: Parameters contains specific information about the code.
This is a struct to allow parameters of different types. Examples: 1.
For a "HARNESS_STARTED" message parameters might provide the name of
the worker and additional data like timing information. 2. For a
"GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS
objects being downloaded and fields containing errors. In general
complex data structures should be avoided. If a worker needs to send a
specific and complicated data structure then please consider defining a
new proto and adding it to the data oneof in WorkerMessageResponse.
Conventions: Parameters should only be used for information that isn't
typically passed as a label. hostname and other worker identifiers
should almost always be passed as labels since they will be included on
most messages.
Fields:
code: The code is a string intended for consumption by a machine that
identifies the type of message being sent. Examples: 1.
"HARNESS_STARTED" might be used to indicate the worker harness has
started. 2. "GCS_DOWNLOAD_ERROR" might be used to indicate an error
downloading a GCS file as part of the boot process of one of the
worker containers. This is a string and not an enum to make it easy to
add new codes without waiting for an API change.
parameters: Parameters contains specific information about the code. This
is a struct to allow parameters of different types. Examples: 1. For a
"HARNESS_STARTED" message parameters might provide the name of the
worker and additional data like timing information. 2. For a
"GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS
objects being downloaded and fields containing errors. In general
complex data structures should be avoided. If a worker needs to send a
specific and complicated data structure then please consider defining a
new proto and adding it to the data oneof in WorkerMessageResponse.
Conventions: Parameters should only be used for information that isn't
typically passed as a label. hostname and other worker identifiers
should almost always be passed as labels since they will be included on
most messages.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParametersValue(_messages.Message):
"""Parameters contains specific information about the code. This is a
struct to allow parameters of different types. Examples: 1. For a
"HARNESS_STARTED" message parameters might provide the name of the
worker and additional data like timing information. 2. For a
"GCS_DOWNLOAD_ERROR" parameters might contain fields listing the GCS
objects being downloaded and fields containing errors. In general complex
data structures should be avoided. If a worker needs to send a specific
and complicated data structure then please consider defining a new proto
and adding it to the data oneof in WorkerMessageResponse. Conventions:
Parameters should only be used for information that isn't typically passed
as a label. hostname and other worker identifiers should almost always be
passed as labels since they will be included on most messages.
Messages:
AdditionalProperty: An additional property for a ParametersValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ParametersValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.StringField(1)
parameters = _messages.MessageField('ParametersValue', 2)
class WorkerMessageResponse(_messages.Message):
"""A worker_message response allows the server to pass information to the
sender.
Fields:
workerHealthReportResponse: The service's response to a worker's health
report.
workerMetricsResponse: Service's response to reporting worker metrics
(currently empty).
"""
workerHealthReportResponse = _messages.MessageField('WorkerHealthReportResponse', 1)
workerMetricsResponse = _messages.MessageField('ResourceUtilizationReportResponse', 2)
class WorkerPool(_messages.Message):
"""Describes one particular pool of Cloud Dataflow workers to be
instantiated by the Cloud Dataflow service in order to perform the
computations required by a job. Note that a workflow job may use multiple
pools, in order to match the various computational requirements of the
various stages of the job.
Enums:
DefaultPackageSetValueValuesEnum: The default package set to install.
This allows the service to select a default set of packages which are
useful to worker harnesses written in a particular language.
IpConfigurationValueValuesEnum: Configuration for VM IPs.
TeardownPolicyValueValuesEnum: Sets the policy for determining when to
turndown worker pool. Allowed values are: `TEARDOWN_ALWAYS`,
`TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means
workers are always torn down regardless of whether the job succeeds.
`TEARDOWN_ON_SUCCESS` means workers are torn down if the job succeeds.
`TEARDOWN_NEVER` means the workers are never torn down. If the workers
are not torn down by the service, they will continue to run and use
Google Compute Engine VM resources in the user's project until they are
explicitly terminated by the user. Because of this, Google recommends
using the `TEARDOWN_ALWAYS` policy except for small, manually supervised
test jobs. If unknown or unspecified, the service will attempt to
choose a reasonable default.
Messages:
MetadataValue: Metadata to set on the Google Compute Engine VMs.
PoolArgsValue: Extra arguments for this worker pool.
Fields:
autoscalingSettings: Settings for autoscaling of this WorkerPool.
dataDisks: Data disks that are used by a VM in this workflow.
defaultPackageSet: The default package set to install. This allows the
service to select a default set of packages which are useful to worker
harnesses written in a particular language.
diskSizeGb: Size of root disk for VMs, in GB. If zero or unspecified, the
service will attempt to choose a reasonable default.
diskSourceImage: Fully qualified source image for disks.
diskType: Type of root disk for VMs. If empty or unspecified, the service
will attempt to choose a reasonable default.
ipConfiguration: Configuration for VM IPs.
kind: The kind of the worker pool; currently only `harness` and `shuffle`
are supported.
machineType: Machine type (e.g. "n1-standard-1"). If empty or
unspecified, the service will attempt to choose a reasonable default.
metadata: Metadata to set on the Google Compute Engine VMs.
network: Network to which VMs will be assigned. If empty or unspecified,
the service will use the network "default".
numThreadsPerWorker: The number of threads per worker harness. If empty or
unspecified, the service will choose a number of threads (according to
the number of cores on the selected machine type for batch, or 1 by
convention for streaming).
numWorkers: Number of Google Compute Engine workers in this pool needed to
execute the job. If zero or unspecified, the service will attempt to
choose a reasonable default.
onHostMaintenance: The action to take on host maintenance, as defined by
the Google Compute Engine API.
packages: Packages to be installed on workers.
poolArgs: Extra arguments for this worker pool.
subnetwork: Subnetwork to which VMs will be assigned, if desired.
Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
taskrunnerSettings: Settings passed through to Google Compute Engine
workers when using the standard Dataflow task runner. Users should
ignore this field.
teardownPolicy: Sets the policy for determining when to turndown worker
pool. Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
`TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down
regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means
workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the
workers are never torn down. If the workers are not torn down by the
service, they will continue to run and use Google Compute Engine VM
resources in the user's project until they are explicitly terminated by
the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS`
policy except for small, manually supervised test jobs. If unknown or
unspecified, the service will attempt to choose a reasonable default.
workerHarnessContainerImage: Required. Docker container image that
executes the Cloud Dataflow worker harness, residing in Google Container
Registry.
zone: Zone to run the worker pools in. If empty or unspecified, the
service will attempt to choose a reasonable default.
"""
class DefaultPackageSetValueValuesEnum(_messages.Enum):
"""The default package set to install. This allows the service to select
a default set of packages which are useful to worker harnesses written in
a particular language.
Values:
DEFAULT_PACKAGE_SET_UNKNOWN: The default set of packages to stage is
unknown, or unspecified.
DEFAULT_PACKAGE_SET_NONE: Indicates that no packages should be staged at
the worker unless explicitly specified by the job.
DEFAULT_PACKAGE_SET_JAVA: Stage packages typically useful to workers
written in Java.
DEFAULT_PACKAGE_SET_PYTHON: Stage pacakges typically useful to workers
written in Python.
"""
DEFAULT_PACKAGE_SET_UNKNOWN = 0
DEFAULT_PACKAGE_SET_NONE = 1
DEFAULT_PACKAGE_SET_JAVA = 2
DEFAULT_PACKAGE_SET_PYTHON = 3
class IpConfigurationValueValuesEnum(_messages.Enum):
"""Configuration for VM IPs.
Values:
WORKER_IP_UNSPECIFIED: The configuration is unknown, or unspecified.
WORKER_IP_PUBLIC: Workers should have public IP addresses.
WORKER_IP_PRIVATE: Workers should have private IP addresses.
"""
WORKER_IP_UNSPECIFIED = 0
WORKER_IP_PUBLIC = 1
WORKER_IP_PRIVATE = 2
class TeardownPolicyValueValuesEnum(_messages.Enum):
"""Sets the policy for determining when to turndown worker pool. Allowed
values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
`TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down
regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means
workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the
workers are never torn down. If the workers are not torn down by the
service, they will continue to run and use Google Compute Engine VM
resources in the user's project until they are explicitly terminated by
the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS`
policy except for small, manually supervised test jobs. If unknown or
unspecified, the service will attempt to choose a reasonable default.
Values:
TEARDOWN_POLICY_UNKNOWN: The teardown policy isn't specified, or is
unknown.
TEARDOWN_ALWAYS: Always teardown the resource.
TEARDOWN_ON_SUCCESS: Teardown the resource on success. This is useful
for debugging failures.
TEARDOWN_NEVER: Never teardown the resource. This is useful for
debugging and development.
"""
TEARDOWN_POLICY_UNKNOWN = 0
TEARDOWN_ALWAYS = 1
TEARDOWN_ON_SUCCESS = 2
TEARDOWN_NEVER = 3
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
"""Metadata to set on the Google Compute Engine VMs.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Additional properties of type MetadataValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class PoolArgsValue(_messages.Message):
"""Extra arguments for this worker pool.
Messages:
AdditionalProperty: An additional property for a PoolArgsValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PoolArgsValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
autoscalingSettings = _messages.MessageField('AutoscalingSettings', 1)
dataDisks = _messages.MessageField('Disk', 2, repeated=True)
defaultPackageSet = _messages.EnumField('DefaultPackageSetValueValuesEnum', 3)
diskSizeGb = _messages.IntegerField(4, variant=_messages.Variant.INT32)
diskSourceImage = _messages.StringField(5)
diskType = _messages.StringField(6)
ipConfiguration = _messages.EnumField('IpConfigurationValueValuesEnum', 7)
kind = _messages.StringField(8)
machineType = _messages.StringField(9)
metadata = _messages.MessageField('MetadataValue', 10)
network = _messages.StringField(11)
numThreadsPerWorker = _messages.IntegerField(12, variant=_messages.Variant.INT32)
numWorkers = _messages.IntegerField(13, variant=_messages.Variant.INT32)
onHostMaintenance = _messages.StringField(14)
packages = _messages.MessageField('Package', 15, repeated=True)
poolArgs = _messages.MessageField('PoolArgsValue', 16)
subnetwork = _messages.StringField(17)
taskrunnerSettings = _messages.MessageField('TaskRunnerSettings', 18)
teardownPolicy = _messages.EnumField('TeardownPolicyValueValuesEnum', 19)
workerHarnessContainerImage = _messages.StringField(20)
zone = _messages.StringField(21)
class WorkerSettings(_messages.Message):
"""Provides data to pass through to the worker harness.
Fields:
baseUrl: The base URL for accessing Google Cloud APIs. When workers
access Google Cloud APIs, they logically do so via relative URLs. If
this field is specified, it supplies the base URL to use for resolving
these relative URLs. The normative algorithm used is defined by RFC
1808, "Relative Uniform Resource Locators". If not specified, the
default value is "http://www.googleapis.com/"
reportingEnabled: Whether to send work progress updates to the service.
servicePath: The Cloud Dataflow service path relative to the root URL, for
example, "dataflow/v1b3/projects".
shuffleServicePath: The Shuffle service path relative to the root URL, for
example, "shuffle/v1beta1".
tempStoragePrefix: The prefix of the resources the system should use for
temporary storage. The supported resource type is: Google Cloud
Storage: storage.googleapis.com/{bucket}/{object}
bucket.storage.googleapis.com/{object}
workerId: The ID of the worker running this pipeline.
"""
baseUrl = _messages.StringField(1)
reportingEnabled = _messages.BooleanField(2)
servicePath = _messages.StringField(3)
shuffleServicePath = _messages.StringField(4)
tempStoragePrefix = _messages.StringField(5)
workerId = _messages.StringField(6)
class WriteInstruction(_messages.Message):
"""An instruction that writes records. Takes one input, produces no outputs.
Fields:
input: The input.
sink: The sink to write to.
"""
input = _messages.MessageField('InstructionInput', 1)
sink = _messages.MessageField('Sink', 2)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'dataflow')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'dataflow')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'dataflow')
| apache-2.0 | 2,631,222,436,543,351,000 | 38.667545 | 102 | 0.732394 | false |
meteokid/python-rpn | share/examples/fst_to_lalo.py | 1 | 4082 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Barbara Casati <[email protected]>
# Author: Stephane Chamberland <[email protected]>
"""
Interpolate RPNSTD rec to latlon points
"""
import sys
import optparse
import numpy as np
from scipy import interpolate
import rpnpy.librmn.all as rmn
if __name__ == "__main__":
inttypelist = {
'n' : rmn.EZ_INTERP_NEAREST,
'l' : rmn.EZ_INTERP_LINEAR,
'c' : rmn.EZ_INTERP_CUBIC
}
# Command line arguments
desc="Interpolate RPNSTD rec to latlon points"
usage = """
%prog -f FSTFILE -n VARNAME -o OUTFILE [-l LOLAFILE] [-t INTTYPE]
LOLAFILE format, one destination point per line:
lon1 lat1
lon2 lat2
...
OUTPUT format
lon1, lat1, value1, extrap
lon2, lat2, value2, extrap
"""
parser = optparse.OptionParser(usage=usage,description=desc)
parser.add_option("-f","--fstfile",dest="fstfile",default="",
help="Name of RPN STD file containing records")
parser.add_option("-n","--varname",dest="varname",default="",
help="Varname of the record to interpolate")
parser.add_option("-l","--lolafile",dest="lolafile",default="/cnfs/dev/mrb/armn/armnbca/MesoVIC/VERA/VERA_8km_coordinates_lam_phi.txt",
help="Name of text file with destination coordinates, one 'lon lat' per line")
parser.add_option("-t","--inttype",dest="inttype",default="linear",
help="Interpolation type: nearest, linear or cubic")
parser.add_option("-o","--outfile",dest="outfile",default="",
help="Output file name")
(options,args) = parser.parse_args()
if not (options.varname and options.fstfile and options.outfile and options.lolafile and options.inttype):
sys.stderr.write('Error: You need to specify a varname, an fst filename, an outfile name and a lolafile name.\n')
parser.print_help()
sys.exit(1)
inttype = options.inttype[0].lower()
if not (inttype in inttypelist.keys()):
sys.stderr.write('Error: INTTYPE should be one of: nearest, linear or cubic.\n')
parser.print_help()
sys.exit(1)
# Open and Read RPN STD file
try:
rmn.fstopt(rmn.FSTOP_MSGLVL, rmn.FSTOPI_MSG_CATAST)
funit = rmn.fstopenall(options.fstfile,rmn.FST_RO)
k = rmn.fstinf(funit,nomvar=options.varname)['key']
data = rmn.fstluk(k)['d']
meta = rmn.fstprm(k)
except:
raise rmn.RMNError('Problem opening/reading var=%s in File=%s' % (options.varname,options.fstfile))
# Define input record grid
try:
meta['iunit'] = funit
grid = rmn.ezqkdef(meta)
except:
raise rmn.RMNError('Problem defining input grid for var=%s in File=%s' % (options.varname,options.fstfile))
# Read lat lon file
try:
(lon,lat) = np.loadtxt(options.lolafile, dtype=np.float32, unpack=True)
## lat = np.asfortranarray(lat, dtype=np.float32)
## lon = np.asfortranarray(lon, dtype=np.float32)
except:
raise IOError('Problem reading the lola file: %s' % (options.lolafile))
# Interpolate input data to lat lon and print
rmn.ezsetopt(rmn.EZ_OPT_INTERP_DEGREE,inttypelist[inttype])
#rmn.ezsetopt(rmn.EZ_OPT_EXTRAP_DEGREE,rmn.EZ_EXTRAP_MAX)
(ni,nj) = data.shape
outfile = open(options.outfile, 'w')
for n in range(lat.size):
(lat2,lon2) = (np.asarray([lat[n]]),np.asarray([lon[n]]))
lldata2 = rmn.gdllsval(grid, lat2, lon2, data)
xypos2 = rmn.gdxyfll(grid, lat2, lon2)
extrap = ''
if (xypos2['x'][0] < 1. or xypos2['x'][0] > ni or
xypos2['y'][0] < 1. or xypos2['y'][0] > nj):
extrap='extrap'
outfile.write("%9.5f, %9.5f, %9.5f, %s\n" %
(lon[n], lat[n], lldata2[0], extrap))
del lldata2, lat2, lon2, xypos2
outfile.close()
# Close the RPN STD file
try:
rmn.fstcloseall(funit)
except:
pass
| lgpl-2.1 | 2,521,296,638,424,310,300 | 36.796296 | 139 | 0.60877 | false |
tlangerak/Multi-Agent-Systems | build/lib.win-amd64-2.7/tlslite/utils/cipherfactory.py | 357 | 3177 | """Factory functions for symmetric cryptography."""
import os
import Python_AES
import Python_RC4
import cryptomath
tripleDESPresent = False
if cryptomath.m2cryptoLoaded:
import OpenSSL_AES
import OpenSSL_RC4
import OpenSSL_TripleDES
tripleDESPresent = True
if cryptomath.cryptlibpyLoaded:
import Cryptlib_AES
import Cryptlib_RC4
import Cryptlib_TripleDES
tripleDESPresent = True
if cryptomath.pycryptoLoaded:
import PyCrypto_AES
import PyCrypto_RC4
import PyCrypto_TripleDES
tripleDESPresent = True
# **************************************************************************
# Factory Functions for AES
# **************************************************************************
def createAES(key, IV, implList=None):
"""Create a new AES object.
@type key: str
@param key: A 16, 24, or 32 byte string.
@type IV: str
@param IV: A 16 byte string
@rtype: L{tlslite.utils.AES}
@return: An AES object.
"""
if implList == None:
implList = ["cryptlib", "openssl", "pycrypto", "python"]
for impl in implList:
if impl == "cryptlib" and cryptomath.cryptlibpyLoaded:
return Cryptlib_AES.new(key, 2, IV)
elif impl == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_AES.new(key, 2, IV)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_AES.new(key, 2, IV)
elif impl == "python":
return Python_AES.new(key, 2, IV)
raise NotImplementedError()
def createRC4(key, IV, implList=None):
"""Create a new RC4 object.
@type key: str
@param key: A 16 to 32 byte string.
@type IV: object
@param IV: Ignored, whatever it is.
@rtype: L{tlslite.utils.RC4}
@return: An RC4 object.
"""
if implList == None:
implList = ["cryptlib", "openssl", "pycrypto", "python"]
if len(IV) != 0:
raise AssertionError()
for impl in implList:
if impl == "cryptlib" and cryptomath.cryptlibpyLoaded:
return Cryptlib_RC4.new(key)
elif impl == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RC4.new(key)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RC4.new(key)
elif impl == "python":
return Python_RC4.new(key)
raise NotImplementedError()
#Create a new TripleDES instance
def createTripleDES(key, IV, implList=None):
"""Create a new 3DES object.
@type key: str
@param key: A 24 byte string.
@type IV: str
@param IV: An 8 byte string
@rtype: L{tlslite.utils.TripleDES}
@return: A 3DES object.
"""
if implList == None:
implList = ["cryptlib", "openssl", "pycrypto"]
for impl in implList:
if impl == "cryptlib" and cryptomath.cryptlibpyLoaded:
return Cryptlib_TripleDES.new(key, 2, IV)
elif impl == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_TripleDES.new(key, 2, IV)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_TripleDES.new(key, 2, IV)
raise NotImplementedError() | lgpl-2.1 | 8,611,247,989,210,163,000 | 27.630631 | 76 | 0.60938 | false |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/idlelib/configSectionNameDialog.py | 150 | 3720 | """
Dialog that allows user to specify a new config file section name.
Used to get new highlight theme and keybinding set names.
"""
from Tkinter import *
import tkMessageBox
class GetCfgSectionNameDialog(Toplevel):
def __init__(self,parent,title,message,usedNames):
"""
message - string, informational message to display
usedNames - list, list of names already in use for validity check
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE,width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.message=message
self.usedNames=usedNames
self.result=''
self.CreateWidgets()
self.withdraw() #hide while setting geometry
self.update_idletasks()
#needs to be done here so that the winfo_reqwidth is valid
self.messageInfo.config(width=self.frameMain.winfo_reqwidth())
self.geometry("+%d+%d" %
((parent.winfo_rootx()+((parent.winfo_width()/2)
-(self.winfo_reqwidth()/2)),
parent.winfo_rooty()+((parent.winfo_height()/2)
-(self.winfo_reqheight()/2)) )) ) #centre dialog over parent
self.deiconify() #geometry set, unhide
self.wait_window()
def CreateWidgets(self):
self.name=StringVar(self)
self.fontSize=StringVar(self)
self.frameMain = Frame(self,borderwidth=2,relief=SUNKEN)
self.frameMain.pack(side=TOP,expand=TRUE,fill=BOTH)
self.messageInfo=Message(self.frameMain,anchor=W,justify=LEFT,padx=5,pady=5,
text=self.message)#,aspect=200)
entryName=Entry(self.frameMain,textvariable=self.name,width=30)
entryName.focus_set()
self.messageInfo.pack(padx=5,pady=5)#,expand=TRUE,fill=BOTH)
entryName.pack(padx=5,pady=5)
frameButtons=Frame(self)
frameButtons.pack(side=BOTTOM,fill=X)
self.buttonOk = Button(frameButtons,text='Ok',
width=8,command=self.Ok)
self.buttonOk.grid(row=0,column=0,padx=5,pady=5)
self.buttonCancel = Button(frameButtons,text='Cancel',
width=8,command=self.Cancel)
self.buttonCancel.grid(row=0,column=1,padx=5,pady=5)
def NameOk(self):
#simple validity check for a sensible
#ConfigParser file section name
nameOk=1
name=self.name.get()
name.strip()
if not name: #no name specified
tkMessageBox.showerror(title='Name Error',
message='No name specified.', parent=self)
nameOk=0
elif len(name)>30: #name too long
tkMessageBox.showerror(title='Name Error',
message='Name too long. It should be no more than '+
'30 characters.', parent=self)
nameOk=0
elif name in self.usedNames:
tkMessageBox.showerror(title='Name Error',
message='This name is already in use.', parent=self)
nameOk=0
return nameOk
def Ok(self, event=None):
if self.NameOk():
self.result=self.name.get().strip()
self.destroy()
def Cancel(self, event=None):
self.result=''
self.destroy()
if __name__ == '__main__':
#test the dialog
root=Tk()
def run():
keySeq=''
dlg=GetCfgSectionNameDialog(root,'Get Name',
'The information here should need to be word wrapped. Test.')
print dlg.result
Button(root,text='Dialog',command=run).pack()
root.mainloop()
| gpl-2.0 | -5,951,943,864,343,026,000 | 37.350515 | 84 | 0.606452 | false |
osiell/server-tools | base_user_reset_access/tests/test_base_user_reset_access.py | 21 | 1728 | # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of base_user_reset_access,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# base_user_reset_access is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# base_user_reset_access is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with base_user_reset_access.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestResetUserAccessRight(common.TransactionCase):
def setUp(self):
super(TestResetUserAccessRight, self).setUp()
self.user_obj = self.env['res.users']
def test_reset_demo_user_access_right(self):
# I get the demo user
demo_user = self.env.ref('base.user_demo')
demo_user.groups_id = [(4, self.ref('base.group_no_one'))]
demo_user.reset_access_right()
default_groups_ids = self.user_obj._get_group()
# I check if access right on this user are reset
self.assertEquals(set(demo_user.groups_id.ids),
set(default_groups_ids))
| agpl-3.0 | 8,368,598,698,231,628,000 | 39.186047 | 78 | 0.605324 | false |
Shnatsel/cjdns | node_build/dependencies/libuv/build/gyp/test/win/gyptest-link-generate-manifest.py | 238 | 4708 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure we generate a manifest file when linking binaries, including
handling AdditionalManifestFiles.
"""
import TestGyp
import sys
if sys.platform == 'win32':
import pywintypes
import win32api
import winerror
RT_MANIFEST = 24
class LoadLibrary(object):
"""Context manager for loading and releasing binaries in Windows.
Yields the handle of the binary loaded."""
def __init__(self, path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = win32api.LoadLibrary(self._path)
return self._handle
def __exit__(self, type, value, traceback):
win32api.FreeLibrary(self._handle)
def extract_manifest(path, resource_name):
"""Reads manifest from |path| and returns it as a string.
Returns None is there is no such manifest."""
with LoadLibrary(path) as handle:
try:
return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
except pywintypes.error as error:
if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
return None
else:
raise
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('generate-manifest.gyp', chdir=CHDIR)
test.build('generate-manifest.gyp', test.ALL, chdir=CHDIR)
# Make sure that generation of .generated.manifest does not cause a relink.
test.run_gyp('generate-manifest.gyp', chdir=CHDIR)
test.up_to_date('generate-manifest.gyp', test.ALL, chdir=CHDIR)
def test_manifest(filename, generate_manifest, embedded_manifest,
extra_manifest):
exe_file = test.built_file_path(filename, chdir=CHDIR)
if not generate_manifest:
test.must_not_exist(exe_file + '.manifest')
manifest = extract_manifest(exe_file, 1)
test.fail_test(manifest)
return
if embedded_manifest:
manifest = extract_manifest(exe_file, 1)
test.fail_test(not manifest)
else:
test.must_exist(exe_file + '.manifest')
manifest = test.read(exe_file + '.manifest')
test.fail_test(not manifest)
test.fail_test(extract_manifest(exe_file, 1))
if generate_manifest:
test.must_contain_any_line(manifest, 'requestedExecutionLevel')
if extra_manifest:
test.must_contain_any_line(manifest,
'35138b9a-5d96-4fbd-8e2d-a2440225f93a')
test.must_contain_any_line(manifest,
'e2011457-1546-43c5-a5fe-008deee3d3f0')
test_manifest('test_generate_manifest_true.exe',
generate_manifest=True,
embedded_manifest=False,
extra_manifest=False)
test_manifest('test_generate_manifest_false.exe',
generate_manifest=False,
embedded_manifest=False,
extra_manifest=False)
test_manifest('test_generate_manifest_default.exe',
generate_manifest=True,
embedded_manifest=False,
extra_manifest=False)
test_manifest('test_generate_manifest_true_as_embedded.exe',
generate_manifest=True,
embedded_manifest=True,
extra_manifest=False)
test_manifest('test_generate_manifest_false_as_embedded.exe',
generate_manifest=False,
embedded_manifest=True,
extra_manifest=False)
test_manifest('test_generate_manifest_default_as_embedded.exe',
generate_manifest=True,
embedded_manifest=True,
extra_manifest=False)
test_manifest('test_generate_manifest_true_with_extra_manifest.exe',
generate_manifest=True,
embedded_manifest=False,
extra_manifest=True)
test_manifest('test_generate_manifest_false_with_extra_manifest.exe',
generate_manifest=False,
embedded_manifest=False,
extra_manifest=True)
test_manifest('test_generate_manifest_true_with_extra_manifest_list.exe',
generate_manifest=True,
embedded_manifest=False,
extra_manifest=True)
test_manifest('test_generate_manifest_false_with_extra_manifest_list.exe',
generate_manifest=False,
embedded_manifest=False,
extra_manifest=True)
test_manifest('test_generate_manifest_default_embed_default.exe',
generate_manifest=True,
embedded_manifest=True,
extra_manifest=False)
test.pass_test()
| gpl-3.0 | 1,823,264,268,685,988,400 | 36.070866 | 77 | 0.636151 | false |
cedadev/cloudhands-web | cloudhands/web/main.py | 1 | 41523 | #!/usr/bin/env python3
# encoding: UTF-8
import argparse
import datetime
import functools
import logging
import operator
import os.path
import platform
import re
import sqlite3
import sys
import uuid
import bcrypt
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.config import Configurator
from pyramid.exceptions import Forbidden
from pyramid.exceptions import NotFound
from pyramid.httpexceptions import HTTPBadRequest
from pyramid.httpexceptions import HTTPClientError
from pyramid.httpexceptions import HTTPCreated
from pyramid.httpexceptions import HTTPFound
from pyramid.httpexceptions import HTTPInternalServerError
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.renderers import JSON
from pyramid.security import authenticated_userid
from pyramid.security import forget
from pyramid.security import remember
from pyramid_authstack import AuthenticationStackPolicy
from pyramid_macauth import MACAuthenticationPolicy
from sqlalchemy import desc
from waitress import serve
from cloudhands.common.connectors import initialise
from cloudhands.common.connectors import Registry
from cloudhands.common.discovery import settings
import cloudhands.common.factories
from cloudhands.common.pipes import SimplePipeQueue
from cloudhands.common.schema import Appliance
from cloudhands.common.schema import BcryptedPassword
from cloudhands.common.schema import CatalogueChoice
from cloudhands.common.schema import CatalogueItem
from cloudhands.common.schema import EmailAddress
from cloudhands.common.schema import Host
from cloudhands.common.schema import Label
from cloudhands.common.schema import Membership
from cloudhands.common.schema import Organisation
from cloudhands.common.schema import OSImage
from cloudhands.common.schema import PosixUId
from cloudhands.common.schema import PosixUIdNumber
from cloudhands.common.schema import PosixGId
from cloudhands.common.schema import Provider
from cloudhands.common.schema import PublicKey
from cloudhands.common.schema import Registration
from cloudhands.common.schema import Resource
from cloudhands.common.schema import Serializable
from cloudhands.common.schema import Subscription
from cloudhands.common.schema import State
from cloudhands.common.schema import Touch
from cloudhands.common.schema import User
from cloudhands.common.states import ApplianceState
from cloudhands.common.states import HostState
from cloudhands.common.states import MembershipState
from cloudhands.common.states import RegistrationState
import cloudhands.web
from cloudhands.identity.ldap_account import change_password
from cloudhands.identity.ldap_account import next_uidnumber
from cloudhands.identity.membership import handle_from_email
from cloudhands.identity.membership import Acceptance
from cloudhands.identity.membership import Invitation
from cloudhands.identity.registration import NewAccount
from cloudhands.identity.registration import NewPassword
from cloudhands.web.catalogue import CatalogueItemView
from cloudhands.web.indexer import people
from cloudhands.web import __version__
from cloudhands.web.model import BcryptedPasswordView
from cloudhands.web.model import HostView
from cloudhands.web.model import LabelView
from cloudhands.web.model import MembershipView
from cloudhands.web.model import Page
from cloudhands.web.model import PageInfo
from cloudhands.web.model import PeoplePage
from cloudhands.web.model import PublicKeyView
from cloudhands.web.model import RegistrationView
from cloudhands.web.model import StateView
DFLT_PORT = 8080
DFLT_DB = ":memory:"
DFLT_IX = "cloudhands.wsh"
CRED_TABLE = {}
def cfg_paths(request, cfg=None):
cfg = cfg or {
"paths.assets": dict(
css = "cloudhands.web:static/css",
html = "cloudhands.web:static/html",
img = "cloudhands.web:static/img",
js = "cloudhands.web:static/js")
}
return {p: os.path.dirname(request.static_url(
'/'.join((cfg["paths.assets"][p], f))))
for p, f in (
("css", "any.css"), ("js", "any.js"), ("img", "any.png"))}
def registered_connection(request):
r = Registry()
return r.connect(*next(iter(r.items)))
def authenticate_user(request, refuse:Exception=None):
userId = authenticated_userid(request)
if refuse and userId is None:
raise refuse("Authentication failure")
con = registered_connection(request)
# Persona's user ids are email addresses, whereas Pyramid auth uses
# user names. We want to test for either.
user = (con.session.query(User).filter(User.handle == userId).first() or
con.session.query(User).join(Touch).join(
EmailAddress).filter(EmailAddress.value == userId).first())
if refuse and not user:
nf = refuse("User not found for {}".format(userId))
nf.userId = userId
raise nf
return user
def create_membership_resources(session, m, rTyp, vals):
provider = session.query(Provider).first() # FIXME
latest = m.changes[-1]
for v in vals:
resource = rTyp(value=v, provider=provider)
now = datetime.datetime.utcnow()
act = Touch(artifact=m, actor=latest.actor, state=latest.state, at=now)
m.changes.append(act)
resource.touch = act
try:
session.add(resource)
session.commit()
except Exception as e:
session.rollback()
finally:
yield session.query(rTyp).filter(
rTyp.value == v, rTyp.provider == provider).first()
def datetime_adapter(obj, request):
return str(obj)
def regex_adapter(obj, request):
return obj.pattern
def record_adapter(obj, request):
rv = obj.as_dict()
try:
del rv["id"]
except KeyError:
pass
return rv
def touch_adapter(obj, request):
return {
"at": obj.at,
"state": {
"fsm": obj.state.fsm,
"name": obj.state.name
}
}
class LoginForbidden(Forbidden): pass
class RegistrationForbidden(Forbidden): pass
def top_read(request):
log = logging.getLogger("cloudhands.web.top_read")
con = registered_connection(request)
user = authenticate_user(request)
page = Page(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
page.layout.info.push(PageInfo(refresh=30))
if user:
mships = con.session.query(Membership).join(Touch).join(User).filter(
User.id==user.id).all()
reg = con.session.query(Registration).join(Touch).join(User).filter(
User.uuid == user.uuid).first()
if reg:
page.layout.nav.push(reg)
else:
mships = []
for org in sorted(
{i.organisation for i in mships}, key=operator.attrgetter("name")
):
page.layout.nav.push(org)
for act in con.session.query(Touch).order_by(desc(Touch.at)).limit(5):
page.layout.items.push(act)
return dict(page.termination())
def appliance_read(request):
log = logging.getLogger("cloudhands.web.appliance_read")
con = registered_connection(request)
user = authenticate_user(request)
appUuid = request.matchdict["app_uuid"]
app = con.session.query(Appliance).filter(
Appliance.uuid == appUuid).first()
if not app:
raise NotFound("Appliance {} not found".format(appUuid))
page = Page(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
page.layout.info.push(PageInfo(
title="Configure appliance",
url=request.route_url("appliance", app_uuid=appUuid)))
if user is not None:
user = con.session.merge(user)
mships = con.session.query(Membership).join(Touch).join(User).filter(
User.id==user.id).all()
for o in sorted(
{i.organisation for i in mships}, key=operator.attrgetter("name")
):
page.layout.nav.push(o)
else:
page.layout.nav.push(app.organisation)
resources = [r for c in app.changes for r in c.resources]
for i in resources:
page.layout.items.push(i)
if not any(i for i in resources if isinstance(i, Label)):
label = Label()
label.uuid = appUuid
page.layout.items.push(label)
# option for public IP address
return dict(page.termination())
def appliance_modify(request):
log = logging.getLogger("cloudhands.web.appliance_modify")
con = registered_connection(request)
user = con.session.merge(authenticate_user(request, Forbidden))
appUuid = request.matchdict["app_uuid"]
app = con.session.query(Appliance).filter(
Appliance.uuid == appUuid).first()
if not app:
raise NotFound("Appliance {} not found".format(appUuid))
now = datetime.datetime.utcnow()
data = StateView(request.POST)
if data.invalid:
data = LabelView(request.POST)
if data.invalid:
raise HTTPBadRequest(
"Bad value in '{}' field".format(data.invalid[0].name))
else:
pre_provision = con.session.query(ApplianceState).filter(
ApplianceState.name == "pre_provision").one()
act = Touch(artifact=app, actor=user, state=pre_provision, at=now)
label = Label(
name=data["name"], description=data["description"],
touch=act)
con.session.add(label)
con.session.commit()
else:
state = con.session.query(State).filter(
State.fsm == data["fsm"]).filter(
State.name == data["name"]).first()
if state is None:
raise HTTPBadRequest(
"No such state {fsm} {name}".format(**data))
else:
act = Touch(artifact=app, actor=user, state=state, at=now)
con.session.add(act)
con.session.commit()
raise HTTPFound(
location=request.route_url(
"organisation", org_name=app.organisation.name))
def host_update(request):
log = logging.getLogger("cloudhands.web.host_update")
con = registered_connection(request)
user = con.session.merge(authenticate_user(request))
hUuid = request.matchdict["host_uuid"]
host = con.session.query(Host).filter(
Host.uuid == hUuid).first()
if not host:
raise NotFound("Host {} not found".format(hUuid))
try:
oN = host.organisation.name
except Exception as e:
log.debug(e)
raise NotFound("Organisation not found for host {}".format(hUuid))
data = StateView(request.POST)
try:
badField = data.invalid[0].name
log.debug(request.POST)
log.debug(data)
raise HTTPBadRequest(
"Bad value in '{}' field".format(badField))
except (IndexError, AttributeError):
if data["fsm"] != "host":
raise HTTPBadRequest(
"Bad FSM value: {}".format(data["fsm"]))
state = con.session.query(HostState).filter(
HostState.name==data["name"]).first()
if not state:
raise NotFound("No such state '{}'".format(data["name"]))
now = datetime.datetime.utcnow()
act = Touch(artifact=host, actor=user, state=state, at=now)
host.changes.append(act)
try:
con.session.commit()
except Exception as e:
log.debug(e)
con.session.rollback()
raise HTTPFound(
location=request.route_url("organisation", org_name=oN))
def login_read(request):
log = logging.getLogger("cloudhands.web.login_read")
username = dict(request.GET).get("username", "")
page = Page(
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
if getattr(request, "exception", None) is not None:
page.layout.info.push(request.exception)
user = User(
uuid=uuid.uuid4().hex,
handle=username)
page.layout.options.push(user)
return dict(page.termination())
def login_update(request):
log = logging.getLogger("cloudhands.web.login_update")
con = registered_connection(request)
data = RegistrationView(request.POST)
if data.invalid:
raise HTTPBadRequest(
"Bad value in '{}' field".format(data.invalid[0].name))
user = con.session.query(User).filter(
User.handle == data["username"]).first()
if not user:
raise HTTPClientError("User {} not found".format(data["username"]))
# Find the most recent valid registration for this user
reg = con.session.query(Registration).join(Touch).join(User).join(
State).filter(User.handle == data["username"]).filter(
State.name not in ("expired", "withdrawn")).order_by(
desc(Touch.at)).first()
if not reg:
raise HTTPInternalServerError(
"No valid registration found for {}".format(user.handle))
try:
passwords = sorted(
((c.at, r) for c in reg.changes for r in c.resources
if isinstance(r, BcryptedPassword)),
reverse=True)
hash = passwords[0][1].value
except (AttributeError, IndexError):
raise HTTPInternalServerError(
"Registration {} is missing a password".format(reg.uuid))
if bcrypt.checkpw(data["password"], hash):
headers = remember(request, user.handle)
latest = reg.changes[-1]
if latest.state.name == "pre_user_posixaccount":
taken = {i.value for i in con.session.query(PosixUIdNumber).all()}
uidN = next_uidnumber(taken=taken)
if uidN is None:
raise HTTPInternalServerError(
"UIdNumber could not be allocated")
else:
log.info("Allocating user id number {}".format(uidN))
latest = NewAccount(user, uidN, reg)(con.session)
# TODO: check state and report error
if latest.state.name in ("user_posixaccount", "valid"):
# FIXME: Temporary workaround for race condition (bug #380)
try:
uids = sorted(
((c.at, r) for c in reg.changes for r in c.resources
if isinstance(r, PosixUId)),
reverse=True)
uid = uids[0][1].value
status = change_password(uid, data["password"], timeout=3)
except (AttributeError, IndexError):
raise HTTPInternalServerError(
"Registration {} is missing a uid".format(reg.uuid))
else:
if status is None:
raise HTTPInternalServerError(
"Unable to create password-protected account")
try:
config = request.registry.settings["cfg"]
pxUId = con.session.query(PosixUId).join(Touch).join(
Registration).filter(Registration.uuid == reg.uuid).first()
providers = con.session.query(Provider).join(Subscription).join(
Organisation).join(Membership).join(Touch).join(User).filter(
User.id == user.id).all()
for provider in providers:
# TODO: pipes will be one per provider
path = os.path.expanduser(config["pipe.tokens"]["vcloud"])
msg = (reg.uuid, provider.name, pxUId.value, data["password"])
pq = SimplePipeQueue.pipequeue(path)
pq.put_nowait(msg)
pq.close()
except Exception as e:
log.error(e)
raise HTTPFound(
location = request.route_url("top"), headers = headers)
else:
raise LoginForbidden("Login failed. Please try again.")
def logout_update(request):
log = logging.getLogger("cloudhands.web.logout_update")
headers = forget(request)
log.debug(headers)
raise HTTPFound(
location = request.route_url("top"), headers = headers)
def membership_read(request):
log = logging.getLogger("cloudhands.web.membership_read")
con = registered_connection(request)
user = authenticate_user(request) # NB: may be None
m_uuid = request.matchdict["mship_uuid"]
mship = con.session.query(Membership).filter(
Membership.uuid == m_uuid).first()
if mship is None:
raise NotFound("Membership {} not found".format(m_uuid))
if mship.changes and mship.changes[-1].state.name == "invited":
act = Acceptance(mship, user)(con.session)
log.debug(act)
guest_uuid = act.actor.uuid
reg = con.session.query(Registration).join(Touch).join(User).filter(
User.uuid == guest_uuid).first()
if not reg:
raise NotFound("Registration not found for {}".format(guest_uuid))
else:
raise HTTPFound(
location=request.route_url("registration", reg_uuid=reg.uuid))
page = Page(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
rsrcs = con.session.query(Resource).join(Touch).join(Membership).filter(
Membership.uuid == m_uuid).all()
for r in rsrcs:
page.layout.items.push(r)
page.layout.options.push(mship)
return dict(page.termination())
def membership_update(request):
log = logging.getLogger("cloudhands.web.membership_update")
user = authenticate_user(request)
con = registered_connection(request)
m_uuid = request.matchdict["mship_uuid"]
mship = con.session.query(Membership).filter(
Membership.uuid == m_uuid).first()
if not mship:
raise NotFound()
prvlg = con.session.query(Membership).join(Organisation).join(
Touch).join(User).filter(
User.id == user.id).filter(
Organisation.id == mship.organisation.id).filter(
Membership.role == "admin").first()
if not prvlg or not prvlg.changes[-1].state.name in ("accepted", "active"):
raise Forbidden("Admin privilege is required to update membership.")
index = request.registry.settings["args"].index
query = dict(request.POST).get("designator", "") # TODO: validate
try:
p = next(people(index, query, field="id"))
except:
raise Forbidden("LDAP record not accessible.")
for typ, vals in zip(
(PosixUId, PosixGId, PublicKey), ([p.uid], p.gids, p.keys)
):
for r in create_membership_resources(con.session, mship, typ, vals):
log.debug(r)
raise HTTPFound(
location=request.route_url("membership", mship_uuid=m_uuid))
def organisation_read(request):
log = logging.getLogger("cloudhands.web.organisation_read")
con = registered_connection(request)
user = con.session.merge(authenticate_user(request, Forbidden))
page = Page(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
mships = con.session.query(Membership).join(Touch).join(User).filter(
User.id==user.id).all()
oN = request.matchdict["org_name"]
org = con.session.query(Organisation).filter(
Organisation.name == oN).first()
if not org:
raise NotFound("Organisation not found for {}".format(oN))
reg = con.session.query(Registration).join(Touch).join(User).filter(
User.uuid == user.uuid).first()
page.layout.nav.push(reg)
for o in sorted(
{i.organisation for i in mships},
key=operator.attrgetter("name")
):
page.layout.nav.push(o, isSelf=o is org)
refresh = 300
seconds = {
"pre_provision": 5,
"provisioning": 15,
"pre_check": 2,
"pre_delete": 2,
"pre_start": 2,
"pre_stop": 2,
"pre_operational": 5,
"operational": 60,
}
for t, s, a in sorted((
(a.changes[-1].at, a.changes[-1].state.name, a)
for a in org.appliances),
reverse=True
):
refresh = min(refresh, seconds.get(s, 300))
page.layout.items.push(a)
page.layout.info.push(PageInfo(title=oN, refresh=refresh))
mships = con.session.query(Membership).join(Organisation).join(
Touch).join(State).join(User).filter(
User.id == user.id).filter(
Organisation.id == org.id).all()
for m in mships:
page.layout.options.push(m, session=con.session)
return dict(page.termination())
def organisation_catalogue_read(request):
log = logging.getLogger("cloudhands.web.organisation_catalogue_read")
con = registered_connection(request)
user = con.session.merge(authenticate_user(request, Forbidden))
page = Page(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
mships = con.session.query(Membership).join(Touch).join(User).filter(
User.id==user.id).all()
oN = request.matchdict["org_name"]
org = con.session.query(Organisation).filter(
Organisation.name == oN).first()
if not org:
raise NotFound("Organisation not found for {}".format(oN))
else:
page.layout.info.push(PageInfo(title=oN))
reg = con.session.query(Registration).join(Touch).join(User).filter(
User.uuid == user.uuid).first()
page.layout.nav.push(reg)
for o in sorted(
{i.organisation for i in mships}, key=operator.attrgetter("name")
):
page.layout.nav.push(o, isSelf=o is org)
for i in org.catalogue:
page.layout.items.push(i)
return dict(page.termination())
# TODO: Remove
def organisation_hosts_create(request):
log = logging.getLogger("cloudhands.web.organisation_hosts_create")
userId = authenticated_userid(request)
if userId is None:
raise Forbidden()
con = registered_connection(request)
user = con.session.query(User).join(Touch).join(
EmailAddress).filter(EmailAddress.value == userId).first()
if not user:
raise NotFound("User not found for {}".format(userId))
data = HostView(request.POST)
if data.invalid:
log.debug(request.POST)
log.debug(data)
raise HTTPBadRequest(
"Bad value in '{}' field".format(data.invalid[0].name))
oN = request.matchdict["org_name"]
if data["jvo"] != oN:
raise HTTPBadRequest("Mismatched organisation field")
org = con.session.query(Organisation).filter(
Organisation.name == oN).first()
if not org:
raise NotFound("Organisation '{}' not found".format(oN))
now = datetime.datetime.utcnow()
requested = con.session.query(HostState).filter(
HostState.name == "requested").one()
host = Host(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
name=data["name"]
)
act = Touch(artifact=host, actor=user, state=requested, at=now)
host.changes.append(act)
con.session.add(OSImage(name=data["image"], touch=act))
log.info(host)
con.session.add(host)
con.session.commit()
raise HTTPFound(
location=request.route_url("organisation", org_name=oN))
def organisation_appliances_create(request):
log = logging.getLogger("cloudhands.web.organisation_appliances_create")
con = registered_connection(request)
user = con.session.merge(authenticate_user(request, Forbidden))
data = CatalogueItemView(request.POST)
if data.invalid:
log.debug(request.POST)
log.debug(data)
raise HTTPBadRequest(
"Bad value in '{}' field".format(data.invalid[0].name))
oN = request.matchdict["org_name"]
org = con.session.query(Organisation).filter(
Organisation.name == oN).first()
if not org:
raise NotFound("Organisation '{}' not found".format(oN))
now = datetime.datetime.utcnow()
configuring = con.session.query(ApplianceState).filter(
ApplianceState.name == "configuring").one()
app = Appliance(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
)
act = Touch(artifact=app, actor=user, state=configuring, at=now)
tmplt = con.session.query(CatalogueItem).filter(
CatalogueItem.uuid == data["uuid"]).first()
choice = CatalogueChoice(
provider=None, touch=act,
**{k: getattr(tmplt, k, None)
for k in ("name", "description", "logo", "natrouted")})
con.session.add(choice)
con.session.commit()
raise HTTPFound(
location=request.route_url("appliance", app_uuid=app.uuid))
def organisation_memberships_create(request):
log = logging.getLogger("cloudhands.web.organisation_memberships_create")
cfg = request.registry.settings.get("cfg", None)
con = registered_connection(request)
data = MembershipView(request.POST)
if data.invalid:
log.debug(request.POST)
log.debug(data)
raise HTTPBadRequest(
"Bad value in '{}' field".format(data.invalid[0].name))
oN = request.matchdict["org_name"]
org = con.session.query(Organisation).filter(
Organisation.name == oN).first()
if not org:
raise NotFound("Organisation '{}' not found".format(oN))
admin = con.session.merge(authenticate_user(request, Forbidden))
invite = Invitation(
admin, org,
data["username"], data["surname"], data["email"]
)(con.session)
if not invite:
raise Forbidden("User {} lacks permission.".format(admin.handle))
else:
log.debug(invite.artifact)
# TODO: calculate this location from membership_read view
locn = request.route_url(
"membership", mship_uuid=invite.artifact.uuid)
raise HTTPFound(location=request.static_url(
"{}/membership-confirm.html".format(
cfg["paths.assets"]["html"])))
def people_read(request):
log = logging.getLogger("cloudhands.web.people")
userId = authenticated_userid(request)
if userId is None:
raise Forbidden()
con = registered_connection(request)
user = con.session.query(User).join(Touch).join(
EmailAddress).filter(EmailAddress.value == userId).first()
page = PeoplePage(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
index = request.registry.settings["args"].index
query = dict(request.GET).get("description", "") # TODO: validate
try:
for p in people(index, query):
page.layout.items.push(p)
except Exception:
log.warning("No access to index {}".format(index))
raise HTTPInternalServerError(
location=request.route_url("people"),
detail="Temporary loss of index. Please try again later.")
return dict(page.termination())
def macauth_creds(request):
userId = authenticated_userid(request)
if userId is None:
raise Forbidden()
# Get a reference to the MACAuthenticationPolicy plugin.
stack = request.registry.getUtility(IAuthenticationPolicy)
policy = stack.policies["apimac"]
try:
id, key = CRED_TABLE[userId]
except KeyError:
id, key = policy.encode_mac_id(request, userId)
CRED_TABLE[userId] = (id, key)
return {"id": id, "key": key}
def registration_passwords(request):
log = logging.getLogger("cloudhands.web.registration_passwords")
con = registered_connection(request)
cfg = request.registry.settings.get("cfg", None)
reg_uuid = request.matchdict["reg_uuid"]
reg = con.session.query(Registration).filter(
Registration.uuid == reg_uuid).first()
if not reg:
raise NotFound("Registration {} not found".format(reg_uuid))
# This page can be visited while unauthenticated but only in the
# first phase of the onboarding process.
sName = reg.changes[-1].state.name
if sName == "pre_registration_person":
user = reg.changes[0].actor
else:
user = con.session.merge(authenticate_user(request, Forbidden))
if not user is reg.changes[0].actor:
raise Forbidden(
"You are not authorized to modify this registration.")
data = BcryptedPasswordView(request.POST)
if data.invalid:
bad = data.invalid[0].name
if bad == "password":
raise RegistrationForbidden(
"The password you entered does not conform to requirements."
" Please choose again.")
else:
raise HTTPBadRequest("Bad value in '{}' field".format(bad))
act = NewPassword(user, data["password"], reg)(con.session)
raise HTTPFound(location=request.route_url(
"login", _query={"username": user.handle}))
def registration_keys(request):
log = logging.getLogger("cloudhands.web.registration_keys")
reg_uuid = request.matchdict["reg_uuid"]
con = registered_connection(request)
reg = con.session.query(Registration).filter(
Registration.uuid == reg_uuid).first()
if not reg:
raise NotFound("Registration {} not found".format(reg_uuid))
user = con.session.merge(authenticate_user(request, Forbidden))
if not user is reg.changes[0].actor:
raise Forbidden(
"You are not authorized to modify this registration.")
data = PublicKeyView(request.POST)
if data.invalid:
raise HTTPBadRequest(
"Bad value in '{}' field".format(data.invalid[0].name))
now = datetime.datetime.utcnow()
state = reg.changes[-1].state
act = Touch(artifact=reg, actor=user, state=state, at=now)
key = PublicKey(touch=act, value=data["value"].strip())
con.session.add(key)
con.session.commit()
raise HTTPFound(
location=request.route_url("registration", reg_uuid=reg.uuid))
def registration_read(request):
log = logging.getLogger("cloudhands.web.registration_read")
con = registered_connection(request)
reg_uuid = request.matchdict["reg_uuid"]
reg = con.session.query(Registration).filter(
Registration.uuid == reg_uuid).first()
if not reg:
raise NotFound("Registration {} not found".format(reg_uuid))
page = Page(
session=con.session,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
page.layout.nav.push(reg)
# This page can be visited while unauthenticated but only in the
# first phase of the onboarding process.
sName = reg.changes[-1].state.name
if sName == "pre_registration_person":
# TODO: Check TimeInterval hasn't expired
user = reg.changes[0].actor
else:
user = con.session.merge(authenticate_user(request, Forbidden))
page.layout.info.push(PageInfo(title=user.handle))
mships = con.session.query(Membership).join(Touch).join(User).filter(
User.id==user.id).all()
for o in sorted(
{i.organisation for i in mships},
key=operator.attrgetter("name")
):
page.layout.nav.push(o)
if sName == "pre_user_inetorgperson_dn":
page.layout.options.push(PosixUId())
return dict(page.termination())
display = (
(PosixUIdNumber, False),
(PosixUId, False),
(EmailAddress, False),
(BcryptedPassword, True),
(PosixGId, False),
(PublicKey, True)
)
for class_, isCreatable in display:
rsrcs = con.session.query(class_).join(Touch).join(Registration).filter(
Registration.uuid == reg_uuid).order_by(desc(Touch.at)).all()
if not rsrcs and isCreatable:
blank = class_()
blank.uuid = reg_uuid
page.layout.options.push(blank)
for r in rsrcs:
page.layout.items.push(r)
return dict(page.termination())
# TODO: Remove this view. Everything happens in registration
def user_read(request):
log = logging.getLogger("cloudhands.web.user_read")
con = registered_connection(request)
user = con.session.merge(authenticate_user(request, Forbidden))
u_uuid = request.matchdict["user_uuid"]
actor = con.session.query(User).filter(User.uuid == u_uuid).first()
mships = con.session.query(Membership).join(Touch).join(User).filter(
User.id==user.id).all()
page = Page(
session=con.session, user=user,
paths=cfg_paths(request, request.registry.settings.get("cfg", None)))
for o in sorted(
{i.organisation for i in mships}, key=operator.attrgetter("name")
):
page.layout.nav.push(o)
regs = con.session.query(Registration).join(Touch).join(User).filter(
User.uuid == u_uuid).all()
resources = [r for reg in regs for c in reg.changes for r in c.resources]
for i in resources:
page.layout.items.push(i)
#if not any(i for i in resources if isinstance(i, Label)):
# label = Label()
# label.uuid = appUuid
# page.layout.items.push(label)
return dict(page.termination())
def wsgi_app(args, cfg):
attribs = {
"macauth.master_secret": cfg["auth.macauth"]["secret"],
"args": args,
"cfg": cfg
}
config = Configurator(settings=attribs)
config.include("pyramid_chameleon")
if (cfg.has_section("auth.persona")
and cfg.getboolean("auth.persona", "enable")):
config.add_settings({
"persona.secret": cfg["auth.persona"]["secret"],
"persona.audiences": [
cfg["auth.persona"]["host"],
"http://{}:{}".format(platform.node(), args.port)],
})
config.include("pyramid_persona")
hateoas = JSON(indent=4)
hateoas.add_adapter(datetime.datetime, datetime_adapter)
hateoas.add_adapter(type(re.compile("")), regex_adapter)
hateoas.add_adapter(Serializable, record_adapter)
hateoas.add_adapter(Touch, touch_adapter)
config.add_renderer("hateoas", hateoas)
config.add_route(
"appliance", "/appliance/{app_uuid}")
config.add_view(
appliance_read,
route_name="appliance", request_method="GET",
renderer=cfg["paths.templates"]["appliance"])
config.add_view(
appliance_read,
route_name="appliance", request_method="GET",
renderer="hateoas", accept="application/json", xhr=True)
config.add_view(
appliance_modify,
route_name="appliance", request_method="POST",
renderer=cfg["paths.templates"]["appliance"])
config.add_route("top", "/")
config.add_view(
top_read, route_name="top", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["home"])
config.add_route("login", "/login")
config.add_view(
login_read,
route_name="login", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["login"])
config.add_view(
login_read, context=LoginForbidden,
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["login"])
config.add_view(
login_update, route_name="login", request_method="POST")
#renderer="hateoas", accept="application/json", xhr=None)
config.add_route("logout", "/logout")
config.add_view(
logout_update, route_name="logout", request_method="GET")
config.add_route("host", "/host/{host_uuid}")
config.add_view(
host_update, route_name="host", request_method="POST",
renderer="hateoas", accept="application/json", xhr=None)
config.add_route("membership", "/membership/{mship_uuid}")
config.add_view(
membership_read, route_name="membership", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["membership"])
config.add_view(
membership_update,
route_name="membership", request_method="POST",
renderer="hateoas", accept="application/json", xhr=None)
config.add_route("organisation", "/organisation/{org_name}")
config.add_view(
organisation_read, route_name="organisation", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["organisation"])
config.add_route(
"organisation_appliances", "/organisation/{org_name}/appliances")
config.add_view(
organisation_appliances_create,
route_name="organisation_appliances", request_method="POST")
config.add_route(
"organisation_memberships", "/organisation/{org_name}/memberships")
config.add_view(
organisation_memberships_create,
route_name="organisation_memberships", request_method="POST",
renderer="hateoas", accept="application/json", xhr=None)
config.add_route(
"organisation_catalogue", "/organisation/{org_name}/catalogue")
config.add_view(
organisation_catalogue_read,
route_name="organisation_catalogue", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["catalogue"])
config.add_route("people", "/people")
config.add_view(
people_read, route_name="people", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["people"])
config.add_view(
login_read, context=RegistrationForbidden,
renderer=cfg["paths.templates"]["registration"])
config.add_route("account", "/account/{reg_uuid}")
config.add_route("registration", "/registration/{reg_uuid}")
config.add_view(
registration_read, route_name="account", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["registration"])
config.add_view(
registration_read, route_name="registration", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["registration"])
config.add_route("registration_keys", "/registration/{reg_uuid}/keys")
config.add_view(
registration_keys,
route_name="registration_keys", request_method="POST")
#renderer="hateoas", accept="application/json", xhr=None)
config.add_route(
"registration_passwords",
"/registration/{reg_uuid}/passwords"
)
config.add_view(
registration_passwords, route_name="registration_passwords",
request_method="POST")
config.add_route("creds", "/creds")
config.add_view(
macauth_creds, route_name="creds", request_method="GET",
renderer="json", accept="application/json")
#renderer="cloudhands.web:templates/creds.pt")
config.add_route("user", "/user/{user_uuid}")
config.add_view(
user_read, route_name="user", request_method="GET",
#renderer="hateoas", accept="application/json", xhr=None)
renderer=cfg["paths.templates"]["user"])
config.add_static_view(name="css", path=cfg["paths.assets"]["css"])
config.add_static_view(name="html", path=cfg["paths.assets"]["html"])
config.add_static_view(name="js", path=cfg["paths.assets"]["js"])
config.add_static_view(name="img", path=cfg["paths.assets"]["img"])
authn_policy = AuthenticationStackPolicy()
authn_policy.add_policy(
"auth_tkt",
AuthTktAuthenticationPolicy(
cfg["auth.macauth"]["secret"],
callback=None)
)
authn_policy.add_policy(
"apimac",
MACAuthenticationPolicy(
attribs["macauth.master_secret"],
))
authz_policy = ACLAuthorizationPolicy()
config.set_authentication_policy(authn_policy)
config.set_authorization_policy(authz_policy)
config.scan()
app = config.make_wsgi_app()
return app
def configure(args):
logging.basicConfig(
level=args.log_level,
format="%(asctime)s %(levelname)-7s %(name)s|%(message)s")
cfgN, cfg = next(iter(settings.items()))
r = Registry()
session = r.connect(sqlite3, args.db).session
initialise(session)
return cfg, session
def main(args):
cfg, session = configure(args)
app = wsgi_app(args, cfg)
serve(app, host=platform.node(), port=args.port, url_scheme="http")
return 1
def parser(description=__doc__):
rv = argparse.ArgumentParser(description)
rv.add_argument(
"--version", action="store_true", default=False,
help="Print the current version number")
rv.add_argument(
"-v", "--verbose", required=False,
action="store_const", dest="log_level",
const=logging.DEBUG, default=logging.INFO,
help="Increase the verbosity of output")
rv.add_argument(
"--port", type=int, default=DFLT_PORT,
help="Set the port number [{}]".format(DFLT_PORT))
rv.add_argument(
"--db", default=DFLT_DB,
help="Set the path to the database [{}]".format(DFLT_DB))
rv.add_argument(
"--index", default=DFLT_IX,
help="Set the path to the index directory [{}]".format(DFLT_IX))
rv.add_argument(
"--log", default=None, dest="log_path",
help="Set a file path for log output")
return rv
def run():
p = parser()
args = p.parse_args()
if args.version:
sys.stdout.write(__version__ + "\n")
rv = 0
else:
rv = main(args)
sys.exit(rv)
if __name__ == "__main__":
run()
| bsd-3-clause | -7,116,761,765,409,089,000 | 33.981466 | 80 | 0.645305 | false |
fedora-infra/packagedb | pkgdb/lib/validators.py | 2 | 10016 | # -*- coding: utf-8 -*-
#
# Copyright © 2008, 2010 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2, or (at your option) any later version. This
# program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the GNU
# General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public License and
# may only be used or replicated with the express permission of Red Hat, Inc.
#
# Red Hat Author(s): Toshio Kuratomi <[email protected]>
#
'''
Collection of validators for parameters coming to pkgdb URLs.
'''
#
#pylint Explanations
#
# :E1101: SQLAlchemy monkey patches database fields into the mapper classes so
# we have to disable this when accessing an attribute of a mapped class.
# Validators also have a message() method which FormEncode adds in a way
# that pylint can't detect.
# :W0232: Validators don't need an__init__method
# :W0613: Only a few validators use the state parameter
# :W0622: We have to redefine _ due to a FormEncode limitation
# :R0201: Validators are following an API specification so need certain
# methods that would otherwise be functions
# :R0903: Validators will usually only have two methods
#pylint:disable-msg=W0232,R0201,R0903,W0613
import re
from turbogears.validators import Invalid, FancyValidator, Set, Regex, \
UnicodeString
from sqlalchemy.exceptions import InvalidRequestError
try:
from fedora.textutils import to_unicode
except ImportError:
from pkgdb.lib.utils import to_unicode
from pkgdb.model import Collection
from pkgdb.lib.utils import STATUS
#pylint:disable-msg=W0622
def _(string):
''' *HACK*: TurboGears/FormEncode requires that we use a dummy _ function.
Internationalizing error messages won't work otherwise.
http://docs.turbogears.org/1.0/Internationalization#id13
'''
return string
#pylint:enable-msg=W0622
#
# SetOf validator can validate its elements
#
class SetOf(Set):
'''formencode Set() validator with the ability to validate its elements.
:kwarg element_validator: Validator to run on each of the elements of the set.
'''
element_validator = None
messages = {'incorrect_value': 'list values did not satisfy the element_validator'}
def validate_python(self, value, state):
if self.element_validator:
try:
value = map(self.element_validator.to_python, value)
except Invalid:
raise
except:
# Just in case the element validator doesn't throw an Invalid
# exception
raise Invalid(self.message('incorrect_value', state),
value, state)
#
# Three sorts of validators:
#
# 1) does minimal checking that a string looks sort of right
# - For these we'll mostly just use the standard tg and formencode
# validators.
# 2) Hits the db to verify that the string exists in the proper field
# - These are appropriate where we're going to use the string anyways. For
# instance, in a select statement.
# - These should be checked by making calls against something that's easily
# sent to a memcached or redis server.
# 3) Looks in the db and transforms the string into the type of thing that it
# is a key for
# - This will do an actual call into the database and load an ORM mapped
# object.
#
class IsCollectionSimpleNameRegex(Regex):
'''Test the collection simple name against a simple heuristic
:kwarg strip: If True, strips whitespace from the beginnng and end of the
value. (default True)
:kwarg regex: regular expression object or string to be compiled to match
the simple name against. Default: r'^[A-Z]+-([0-9]+|devel)$'
'''
strip = True
regex = re.compile(r'^((FC|fc|f|F|EL|el|)-?[0-9]+|devel)$')
messages = {'no_collection': _('%(collection)s does not match the pattern'
' for collection names')}
def _to_python(self, value, state):
value = Regex._to_python(self, value, state)
return to_unicode(value)
def validate_python(self, value, state):
if not self.regex.match(value):
raise Invalid(self.message('no_collection', state,
collection=value), value, state)
class IsCollectionSimpleName(UnicodeString):
'''Test that the value is a recognized collection short name.
:kwarg eol: If True, include eol releases. (default False)
:kwarg strip: If True, strips whitespace from the beginnng and end of the
value. (default True)
'''
strip = True
eol = False
messages = {'no_collection': _('A collection named %(collection)s does'
' not exist'),
'eol_collection': _('Collection named %(collection)s is eol')
}
def validate_python(self, value, state):
try:
collection = Collection.by_simple_name(value)
except InvalidRequestError:
raise Invalid(self.message('no_collection', state,
collection=value), value, state)
if not self.eol and (collection.statuscode ==
STATUS['EOL']):
raise Invalid(self.message('eol_collection', state,
collection=value), value, state)
return value
class IsCollection(IsCollectionSimpleName):
'''Transforms a Collection simplename into a Collection.
:kwarg eol: If True, include eol releases. (default False)
:kwarg strip: If True, strips whitespace from the beginnng and end of the
value. (default True)
:rtype: Collection
:returns: Collection that the simplename we were given references.
'''
messages = {'no_collection': _('A collection named %(collection)s does'
' not exist'),
'eol_collection': _('Collection named %(collection)s is eol')
}
def validate_python(self, value, state):
try:
collection = Collection.by_simple_name(value)
except InvalidRequestError:
raise Invalid(self.message('no_collection', state,
collection=value), value, state)
if not self.eol and (collection.statuscode ==
STATUS['EOL']):
raise Invalid(self.message('eol_collection', state,
collection=value), value, state)
return collection
#
# Legacy -- Remove when we update the API
#
class CollectionName(FancyValidator):
'''Test that the value is a recognized collection name.'''
messages = {'no_collection': _('A collection named %(collection)s does'
' not exist.')}
def _to_python(self, value, state):
'''Just remove leading and trailing whitespace.'''
return value.strip()
def validate_python(self, value, state):
'''Make sure the collection is in the database.'''
#pylint:disable-msg=E1101
try:
Collection.query.filter_by(name=value).first()
except InvalidRequestError:
raise Invalid(self.message('no_collection', state,
collection=value), value, state)
#pylint:enable-msg=E1101
#
# Chained Validators
#
# Note: Chained validators take different params so they are not interchangable
# with normal validators:
# validate_python: field_dict instead of value. This is a dictionary of the
# fields passed into the schema.
#
# raising Invalid: error_dict. In addition to the other values to Invalid()
# we send an error_dict that maps the field to display an error with to the
# message.
class CollectionNameVersion(FancyValidator):
'''Test the combination of a Collection and Version for validity.'''
messages = {'nameless_version': _('Version specified without a collection'
' name'),
'no_version': _('There is no collection for %(name)s-%(version)s'),
'no_collection': _('Collection named %(name)s does not exist')}
def validate_python(self, field_dict, state):
'''Make sure the Collection with the given `name` and `version` exists.
We want to allow for:
1) Neither to be set
2) Name to exist in the db and version unset
3) Name and version to exist in the db
'''
if not field_dict:
# It's okay for both to be none
return
errors = {}
name = field_dict.get('name')
version = field_dict.get('version')
if (not name) and version:
#pylint:disable-msg=E1101
errors['version'] = self.message('nameless_version', state)
elif name and version:
#pylint:disable-msg=E1101
try:
Collection.query.filter_by(name=name, version=version).one()
except InvalidRequestError:
errors['version'] = self.message('no_version', state,
name=name, version=version)
elif name and not version:
#pylint:disable-msg=E1101
try:
Collection.query.filter_by(name=name).first()
except InvalidRequestError:
errors['name'] = self.message('no_collection', state, name=name)
if errors:
error_list = sorted(errors.iteritems())
error_message = '\n'.join([u'%s: %s' % (error, msg)
for error, msg in error_list])
raise Invalid(error_message, field_dict, state,
error_dict=errors)
| gpl-2.0 | -560,606,515,482,172,000 | 37.079848 | 87 | 0.650325 | false |
ekg/multichoose | multipermute.py | 6 | 2680 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# multipermute.py - permutations of a multiset
# Erik Garrison <[email protected]> 2010
"""
This module encodes functions to generate the permutations of a multiset
following this algorithm:
Algorithm 1
Visits the permutations of multiset E. The permutations are stored
in a singly-linked list pointed to by head pointer h. Each node in the linked
list has a value field v and a next field n. The init(E) call creates a
singly-linked list storing the elements of E in non-increasing order with h, i,
and j pointing to its first, second-last, and last nodes, respectively. The
null pointer is given by φ. Note: If E is empty, then init(E) should exit.
Also, if E contains only one element, then init(E) does not need to provide a
value for i.
[h, i, j] ← init(E)
visit(h)
while j.n ≠ φ orj.v <h.v do
if j.n ≠ φ and i.v ≥ j.n.v then
s←j
else
s←i
end if
t←s.n
s.n ← t.n
t.n ← h
if t.v < h.v then
i←t
end if
j←i.n
h←t
visit(h)
end while
... from "Loopless Generation of Multiset Permutations using a Constant Number
of Variables by Prefix Shifts." Aaron Williams, 2009
"""
class ListElement:
def __init__(self, value, next):
self.value = value
self.next = next
def nth(self, n):
o = self
i = 0
while i < n and o.next is not None:
o = o.next
i += 1
return o
def init(multiset):
multiset.sort() # ensures proper non-increasing order
h = ListElement(multiset[0], None)
for item in multiset[1:]:
h = ListElement(item, h)
return h, h.nth(len(multiset) - 2), h.nth(len(multiset) - 1)
def visit(h):
"""Converts our bespoke linked list to a python list."""
o = h
l = []
while o is not None:
l.append(o.value)
o = o.next
return l
def permutations(multiset):
"""Generator providing all multiset permutations of a multiset."""
h, i, j = init(multiset)
yield visit(h)
while j.next is not None or j.value < h.value:
if j.next is not None and i.value >= j.next.value:
s = j
else:
s = i
t = s.next
s.next = t.next
t.next = h
if t.value < h.value:
i = t
j = i.next
h = t
yield visit(h)
if __name__ == '__main__':
import sys
multiset = sys.argv[1:]
if multiset != []:
for permutation in permutations(multiset):
for item in permutation:
print item,
print
else:
print "usage", sys.argv[0], "<multiset>"
| mit | 6,965,374,735,325,676,000 | 26.091837 | 79 | 0.587947 | false |
eltonsantos/django | django/core/management/validation.py | 107 | 23840 | import collections
import sys
from django.conf import settings
from django.core.management.color import color_style
from django.utils.encoding import force_str
from django.utils.itercompat import is_iterable
from django.utils import six
class ModelErrorCollection:
def __init__(self, outfile=sys.stdout):
self.errors = []
self.outfile = outfile
self.style = color_style()
def add(self, context, error):
self.errors.append((context, error))
self.outfile.write(self.style.ERROR(force_str("%s: %s\n" % (context, error))))
def get_validation_errors(outfile, app=None):
"""
Validates all models that are part of the specified app. If no app name is provided,
validates all models of all installed apps. Writes errors, if any, to outfile.
Returns number of errors.
"""
from django.db import models, connection
from django.db.models.loading import get_app_errors
from django.db.models.deletion import SET_NULL, SET_DEFAULT
e = ModelErrorCollection(outfile)
for (app_name, error) in get_app_errors().items():
e.add(app_name, error)
for cls in models.get_models(app, include_swapped=True):
opts = cls._meta
# Check swappable attribute.
if opts.swapped:
try:
app_label, model_name = opts.swapped.split('.')
except ValueError:
e.add(opts, "%s is not of the form 'app_label.app_name'." % opts.swappable)
continue
if not models.get_model(app_label, model_name):
e.add(opts, "Model has been swapped out for '%s' which has not been installed or is abstract." % opts.swapped)
# No need to perform any other validation checks on a swapped model.
continue
# If this is the current User model, check known validation problems with User models
if settings.AUTH_USER_MODEL == '%s.%s' % (opts.app_label, opts.object_name):
# Check that REQUIRED_FIELDS is a list
if not isinstance(cls.REQUIRED_FIELDS, (list, tuple)):
e.add(opts, 'The REQUIRED_FIELDS must be a list or tuple.')
# Check that the USERNAME FIELD isn't included in REQUIRED_FIELDS.
if cls.USERNAME_FIELD in cls.REQUIRED_FIELDS:
e.add(opts, 'The field named as the USERNAME_FIELD should not be included in REQUIRED_FIELDS on a swappable User model.')
# Check that the username field is unique
if not opts.get_field(cls.USERNAME_FIELD).unique:
e.add(opts, 'The USERNAME_FIELD must be unique. Add unique=True to the field parameters.')
# Model isn't swapped; do field-specific validation.
for f in opts.local_fields:
if f.name == 'id' and not f.primary_key and opts.pk.name == 'id':
e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name)
if f.name.endswith('_'):
e.add(opts, '"%s": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.' % f.name)
if (f.primary_key and f.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
e.add(opts, '"%s": Primary key fields cannot have null=True.' % f.name)
if isinstance(f, models.CharField):
try:
max_length = int(f.max_length)
if max_length <= 0:
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
except (ValueError, TypeError):
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
if isinstance(f, models.DecimalField):
decimalp_ok, mdigits_ok = False, False
decimalp_msg = '"%s": DecimalFields require a "decimal_places" attribute that is a non-negative integer.'
try:
decimal_places = int(f.decimal_places)
if decimal_places < 0:
e.add(opts, decimalp_msg % f.name)
else:
decimalp_ok = True
except (ValueError, TypeError):
e.add(opts, decimalp_msg % f.name)
mdigits_msg = '"%s": DecimalFields require a "max_digits" attribute that is a positive integer.'
try:
max_digits = int(f.max_digits)
if max_digits <= 0:
e.add(opts, mdigits_msg % f.name)
else:
mdigits_ok = True
except (ValueError, TypeError):
e.add(opts, mdigits_msg % f.name)
invalid_values_msg = '"%s": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.'
if decimalp_ok and mdigits_ok:
if decimal_places > max_digits:
e.add(opts, invalid_values_msg % f.name)
if isinstance(f, models.FileField) and not f.upload_to:
e.add(opts, '"%s": FileFields require an "upload_to" attribute.' % f.name)
if isinstance(f, models.ImageField):
try:
from django.utils.image import Image
except ImportError:
e.add(opts, '"%s": To use ImageFields, you need to install Pillow. Get it at https://pypi.python.org/pypi/Pillow.' % f.name)
if isinstance(f, models.BooleanField) and getattr(f, 'null', False):
e.add(opts, '"%s": BooleanFields do not accept null values. Use a NullBooleanField instead.' % f.name)
if isinstance(f, models.FilePathField) and not (f.allow_files or f.allow_folders):
e.add(opts, '"%s": FilePathFields must have either allow_files or allow_folders set to True.' % f.name)
if isinstance(f, models.GenericIPAddressField) and not getattr(f, 'null', False) and getattr(f, 'blank', False):
e.add(opts, '"%s": GenericIPAddressField can not accept blank values if null values are not allowed, as blank values are stored as null.' % f.name)
if f.choices:
if isinstance(f.choices, six.string_types) or not is_iterable(f.choices):
e.add(opts, '"%s": "choices" should be iterable (e.g., a tuple or list).' % f.name)
else:
for c in f.choices:
if isinstance(c, six.string_types) or not is_iterable(c) or len(c) != 2:
e.add(opts, '"%s": "choices" should be a sequence of two-item iterables (e.g. list of 2 item tuples).' % f.name)
if f.db_index not in (None, True, False):
e.add(opts, '"%s": "db_index" should be either None, True or False.' % f.name)
# Perform any backend-specific field validation.
connection.validation.validate_field(e, opts, f)
# Check if the on_delete behavior is sane
if f.rel and hasattr(f.rel, 'on_delete'):
if f.rel.on_delete == SET_NULL and not f.null:
e.add(opts, "'%s' specifies on_delete=SET_NULL, but cannot be null." % f.name)
elif f.rel.on_delete == SET_DEFAULT and not f.has_default():
e.add(opts, "'%s' specifies on_delete=SET_DEFAULT, but has no default value." % f.name)
# Check to see if the related field will clash with any existing
# fields, m2m fields, m2m related objects or related objects
if f.rel:
if f.rel.to not in models.get_models():
# If the related model is swapped, provide a hint;
# otherwise, the model just hasn't been installed.
if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped:
e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable))
else:
e.add(opts, "'%s' has a relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, six.string_types):
continue
# Make sure the related field specified by a ForeignKey is unique
if f.requires_unique_target:
if len(f.foreign_related_fields) > 1:
has_unique_field = False
for rel_field in f.foreign_related_fields:
has_unique_field = has_unique_field or rel_field.unique
if not has_unique_field:
e.add(opts, "Field combination '%s' under model '%s' must have a unique=True constraint" % (','.join([rel_field.name for rel_field in f.foreign_related_fields]), f.rel.to.__name__))
else:
if not f.foreign_related_fields[0].unique:
e.add(opts, "Field '%s' under model '%s' must have a unique=True constraint." % (f.foreign_related_fields[0].name, f.rel.to.__name__))
rel_opts = f.rel.to._meta
rel_name = f.related.get_accessor_name()
rel_query_name = f.related_query_name()
if not f.rel.is_hidden():
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
seen_intermediary_signatures = []
for i, f in enumerate(opts.local_many_to_many):
# Check to see if the related m2m field will clash with any
# existing fields, m2m fields, m2m related objects or related
# objects
if f.rel.to not in models.get_models():
# If the related model is swapped, provide a hint;
# otherwise, the model just hasn't been installed.
if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped:
e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable))
else:
e.add(opts, "'%s' has an m2m relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, six.string_types):
continue
# Check that the field is not set to unique. ManyToManyFields do not support unique.
if f.unique:
e.add(opts, "ManyToManyFields cannot be unique. Remove the unique argument on '%s'." % f.name)
if f.rel.through is not None and not isinstance(f.rel.through, six.string_types):
from_model, to_model = cls, f.rel.to
if from_model == to_model and f.rel.symmetrical and not f.rel.through._meta.auto_created:
e.add(opts, "Many-to-many fields with intermediate tables cannot be symmetrical.")
seen_from, seen_to, seen_self = False, False, 0
for inter_field in f.rel.through._meta.fields:
rel_to = getattr(inter_field.rel, 'to', None)
if from_model == to_model: # relation to self
if rel_to == from_model:
seen_self += 1
if seen_self > 2:
e.add(opts, "Intermediary model %s has more than "
"two foreign keys to %s, which is ambiguous "
"and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
if rel_to == from_model:
if seen_from:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
seen_from = True
elif rel_to == to_model:
if seen_to:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
rel_to._meta.object_name
)
)
else:
seen_to = True
if f.rel.through not in models.get_models(include_auto_created=True):
e.add(opts, "'%s' specifies an m2m relation through model "
"%s, which has not been installed." % (f.name, f.rel.through)
)
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
e.add(opts, "The model %s has two manually-defined m2m "
"relations through the model %s, which is not "
"permitted. Please consider using an extra field on "
"your intermediary model instead." % (
cls._meta.object_name,
f.rel.through._meta.object_name
)
)
else:
seen_intermediary_signatures.append(signature)
if not f.rel.through._meta.auto_created:
seen_related_fk, seen_this_fk = False, False
for field in f.rel.through._meta.fields:
if field.rel:
if not seen_related_fk and field.rel.to == f.rel.to:
seen_related_fk = True
elif field.rel.to == cls:
seen_this_fk = True
if not seen_related_fk or not seen_this_fk:
e.add(opts, "'%s' is a manually-defined m2m relation "
"through model %s, which does not have foreign keys "
"to %s and %s" % (f.name, f.rel.through._meta.object_name,
f.rel.to._meta.object_name, cls._meta.object_name)
)
elif isinstance(f.rel.through, six.string_types):
e.add(opts, "'%s' specifies an m2m relation through model %s, "
"which has not been installed" % (f.name, f.rel.through)
)
rel_opts = f.rel.to._meta
rel_name = f.related.get_accessor_name()
rel_query_name = f.related_query_name()
# If rel_name is none, there is no reverse accessor (this only
# occurs for symmetrical m2m relations to self). If this is the
# case, there are no clashes to check for this field, as there are
# no reverse descriptors for this field.
if rel_name is not None:
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
# Check ordering attribute.
if opts.ordering:
for field_name in opts.ordering:
if field_name == '?':
continue
if field_name.startswith('-'):
field_name = field_name[1:]
if opts.order_with_respect_to and field_name == '_order':
continue
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field_name:
continue
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
if field_name == 'pk':
continue
try:
opts.get_field(field_name, many_to_many=False)
except models.FieldDoesNotExist:
e.add(opts, '"ordering" refers to "%s", a field that doesn\'t exist.' % field_name)
# Check unique_together.
for ut in opts.unique_together:
validate_local_fields(e, opts, "unique_together", ut)
if not isinstance(opts.index_together, collections.Sequence):
e.add(opts, '"index_together" must a sequence')
else:
for it in opts.index_together:
validate_local_fields(e, opts, "index_together", it)
return len(e.errors)
def validate_local_fields(e, opts, field_name, fields):
from django.db import models
if not isinstance(fields, collections.Sequence):
e.add(opts, 'all %s elements must be sequences' % field_name)
else:
for field in fields:
try:
f = opts.get_field(field, many_to_many=True)
except models.FieldDoesNotExist:
e.add(opts, '"%s" refers to %s, a field that doesn\'t exist.' % (field_name, field))
else:
if isinstance(f.rel, models.ManyToManyRel):
e.add(opts, '"%s" refers to %s. ManyToManyFields are not supported in %s.' % (field_name, f.name, field_name))
if f not in opts.local_fields:
e.add(opts, '"%s" refers to %s. This is not in the same model as the %s statement.' % (field_name, f.name, field_name))
| bsd-3-clause | -1,712,950,021,564,630,800 | 63.086022 | 264 | 0.536242 | false |
vedujoshi/tempest | tempest/lib/services/network/ports_client.py | 2 | 3105 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.network import base
class PortsClient(base.BaseNetworkClient):
def create_port(self, **kwargs):
"""Creates a port on a network.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/networking/v2/index.html#create-port
"""
uri = '/ports'
post_data = {'port': kwargs}
return self.create_resource(uri, post_data)
def update_port(self, port_id, **kwargs):
"""Updates a port.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/networking/v2/index.html#update-port
"""
uri = '/ports/%s' % port_id
post_data = {'port': kwargs}
return self.update_resource(uri, post_data)
def show_port(self, port_id, **fields):
"""Shows details for a port.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/networking/v2/index.html#show-port-details
"""
uri = '/ports/%s' % port_id
return self.show_resource(uri, **fields)
def delete_port(self, port_id):
"""Deletes a port.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/networking/v2/index.html#delete-port
"""
uri = '/ports/%s' % port_id
return self.delete_resource(uri)
def list_ports(self, **filters):
"""Lists ports to which the tenant has access.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/networking/v2/index.html#list-ports
"""
uri = '/ports'
return self.list_resources(uri, **filters)
def create_bulk_ports(self, **kwargs):
"""Create multiple ports in a single request.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/networking/v2/index.html#bulk-create-ports
"""
uri = '/ports'
return self.create_resource(uri, kwargs)
def is_resource_deleted(self, id):
try:
self.show_port(id)
except lib_exc.NotFound:
return True
return False
| apache-2.0 | -2,296,830,568,639,693,000 | 35.104651 | 89 | 0.643156 | false |
xbezdick/tempest | tempest/services/compute/json/floating_ip_pools_client.py | 6 | 1314 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.api_schema.response.compute.v2_1 import floating_ips as schema
from tempest.common import service_client
class FloatingIPPoolsClient(service_client.ServiceClient):
def list_floating_ip_pools(self, params=None):
"""Gets all floating IP Pools list."""
url = 'os-floating-ip-pools'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_floating_ip_pools, resp, body)
return service_client.ResponseBody(resp, body)
| apache-2.0 | 2,848,230,105,475,330,000 | 37.647059 | 78 | 0.71309 | false |
tudorian/eden | tests/unit_tests/modules/s3/s3gis/YahooLayer.py | 43 | 1342 |
s3gis_tests = load_module("tests.unit_tests.modules.s3.s3gis")
test_utils = local_import("test_utils")
yahoo_layer = dict(
name = "Test Yahoo Layer",
description = "Test Yahoo",
enabled = True,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now(),
satellite_enabled = True,
maps_enabled = True,
hybrid_enabled = True,
apikey = "FAKEAPIKEY",
)
def test_YahooLayer():
s3gis_tests.layer_test(
db,
db.gis_layer_yahoo,
yahoo_layer,
"S3.gis.Yahoo",
{
"Hybrid": u"Yahoo Hybrid",
"Maps": u"Yahoo Maps",
"Satellite": u"Yahoo Satellite",
},
session = session,
request = request,
)
def test_yahoo_scripts():
with s3gis_tests.InsertedRecord(db, db.gis_layer_yahoo, yahoo_layer):
with s3gis_tests.AddedRole(session, session.s3.system_roles.MAP_ADMIN):
actual_output = str(
s3base.GIS().show_map(
catalogue_layers = True,
projection = 900913,
)
)
s3gis_tests.check_scripts(
actual_output,
[
"http://api.maps.yahoo.com/ajaxymap?v=3.8&appid=FAKEAPIKEY"
],
request
)
| mit | 8,925,445,179,543,354,000 | 26.958333 | 83 | 0.528316 | false |
qedsoftware/commcare-hq | corehq/apps/reports/standard/__init__.py | 1 | 8604 | from datetime import datetime
import dateutil
from django.core.cache import cache
from django.core.urlresolvers import reverse
from corehq.apps.casegroups.models import CommCareCaseGroup
from corehq.apps.groups.models import Group
from corehq.apps.reports import util
from corehq.apps.reports.dispatcher import ProjectReportDispatcher, CustomProjectReportDispatcher
from corehq.apps.reports.exceptions import BadRequestError
from corehq.apps.reports.filters.users import UserTypeFilter
from corehq.apps.reports.generic import GenericReportView
from corehq.apps.reports.filters.select import MonthFilter, YearFilter
from corehq.apps.users.models import CommCareUser
from dimagi.utils.dates import DateSpan
from django.utils.translation import ugettext_noop
from dimagi.utils.decorators.memoized import memoized
class ProjectReport(GenericReportView):
# overriding properties from GenericReportView
section_name = ugettext_noop("Project Reports")
base_template = 'reports/base_template.html'
dispatcher = ProjectReportDispatcher
asynchronous = True
@property
def default_report_url(self):
return reverse('reports_home', args=[self.request.project])
class CustomProjectReport(ProjectReport):
dispatcher = CustomProjectReportDispatcher
emailable = True
class CommCareUserMemoizer(object):
@memoized
def by_domain(self, domain, is_active=True):
users = CommCareUser.by_domain(domain, is_active=is_active)
for user in users:
# put users in the cache for get_by_user_id
# so that function never has to touch the database
self.get_by_user_id.get_cache(self)[(self, user.user_id)] = user
return users
@memoized
def get_by_user_id(self, user_id):
return CommCareUser.get_by_user_id(user_id)
class ProjectReportParametersMixin(object):
"""
All the parameters necessary for the project reports.
Intended to be mixed in with a GenericReportView object.
"""
default_case_type = None
filter_group_name = None
filter_users_field_class = UserTypeFilter
include_inactive = False
# set this to set the report's user ids from within the report
# (i.e. based on a filter's return value).
override_user_ids = None
@property
@memoized
def CommCareUser(self):
return CommCareUserMemoizer()
@memoized
def get_all_users_by_domain(self, group=None, user_ids=None, user_filter=None, simplified=False):
return list(util.get_all_users_by_domain(
domain=self.domain,
group=group,
user_ids=user_ids,
user_filter=user_filter,
simplified=simplified,
CommCareUser=self.CommCareUser
))
@property
@memoized
def user_filter(self):
return self.filter_users_field_class.get_user_filter(self.request)[0]
@property
@memoized
def default_user_filter(self):
return self.filter_users_field_class.get_user_filter(None)[0]
@property
def group_id(self):
return self.request.GET.get('group', '')
@property
@memoized
def group(self):
return Group.get(self.group_id) if self.group_id else None
@property
def individual(self):
"""
todo: remember this: if self.individual and self.users:
self.name = "%s for %s" % (self.name, self.users[0].raw_username)
"""
return self.request_params.get('individual', '')
@property
def mobile_worker_ids(self):
ids = self.request.GET.getlist('select_mw')
if '_all' in ids or self.request.GET.get('all_mws', 'off') == 'on':
cache_str = "mw_ids:%s" % self.domain
ids = cache.get(cache_str)
if not ids:
cc_users = CommCareUser.by_domain(self.domain)
if self.include_inactive:
cc_users += CommCareUser.by_domain(self.domain, is_active=False)
ids = [ccu._id for ccu in cc_users]
cache.set(cache_str, ids, 24*60*60)
return ids
@property
@memoized
def users(self):
if self.filter_group_name and not (self.group_id or self.individual):
group = Group.by_name(self.domain, self.filter_group_name)
else:
group = self.group
if self.override_user_ids is not None:
user_ids = self.override_user_ids
else:
user_ids = [self.individual]
return self.get_all_users_by_domain(
group=group,
user_ids=tuple(user_ids),
user_filter=tuple(self.user_filter),
simplified=True
)
@property
@memoized
def user_ids(self):
return [user.user_id for user in self.users]
@property
@memoized
def usernames(self):
return {user.user_id: user.username_in_report for user in self.users}
@property
def history(self):
history = self.request_params.get('history', '')
if history:
try:
return dateutil.parser.parse(history)
except ValueError:
pass
@property
def case_type(self):
return self.default_case_type or self.request_params.get('case_type', '')
@property
def case_status(self):
from corehq.apps.reports.filters.select import SelectOpenCloseFilter
return self.request_params.get(SelectOpenCloseFilter.slug, '')
@property
def case_group_ids(self):
return filter(None, self.request.GET.getlist('case_group'))
@property
@memoized
def case_groups(self):
return [CommCareCaseGroup.get(g) for g in self.case_group_ids]
@property
@memoized
def cases_by_case_group(self):
case_ids = []
for group in self.case_groups:
case_ids.extend(group.cases)
return case_ids
class CouchCachedReportMixin(object):
"""
Use this mixin for caching reports as objects in couch.
"""
_cached_report = None
@property
def cached_report(self):
if not self._cached_report:
self._cached_report = self.fetch_cached_report()
return self._cached_report
def fetch_cached_report(self):
"""
Here's where you generate your cached report.
"""
raise NotImplementedError
class DatespanMixin(object):
"""
Use this where you'd like to include the datespan field.
"""
datespan_field = 'corehq.apps.reports.filters.dates.DatespanFilter'
datespan_default_days = 7
datespan_max_days = None
inclusive = True
_datespan = None
@property
def datespan(self):
if self._datespan is None:
datespan = self.default_datespan
if self.request.datespan.is_valid() and not self.request.datespan.is_default:
datespan.enddate = self.request.datespan.enddate
datespan.startdate = self.request.datespan.startdate
datespan.is_default = False
elif self.request.datespan.get_validation_reason() == "You can't use dates earlier than the year 1900":
raise BadRequestError()
self.request.datespan = datespan
# todo: don't update self.context here. find a better place! AGH! Sorry, sorry.
self.context.update(dict(datespan=datespan))
self._datespan = datespan
return self._datespan
@property
def default_datespan(self):
datespan = DateSpan.since(self.datespan_default_days, timezone=self.timezone, inclusive=self.inclusive)
datespan.max_days = self.datespan_max_days
datespan.is_default = True
return datespan
class MonthYearMixin(object):
"""
Similar to DatespanMixin, but works with MonthField and YearField
"""
fields = [MonthFilter, YearFilter]
_datespan = None
@property
def datespan(self):
if self._datespan is None:
datespan = DateSpan.from_month(self.month, self.year)
self.request.datespan = datespan
self.context.update(dict(datespan=datespan))
self._datespan = datespan
return self._datespan
@property
def month(self):
if 'month' in self.request_params:
return int(self.request_params['month'])
else:
return datetime.utcnow().month
@property
def year(self):
if 'year' in self.request_params:
return int(self.request_params['year'])
else:
return datetime.utcnow().year
| bsd-3-clause | 1,340,669,533,181,470,000 | 30.516484 | 115 | 0.640748 | false |
yanchen036/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/matrix_inverse_tril.py | 3 | 5272 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MatrixInverseTriL bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
__all__ = [
"MatrixInverseTriL",
]
class MatrixInverseTriL(bijector.Bijector):
"""Computes `g(L) = inv(L)`, where `L` is a lower-triangular matrix.
`L` must be nonsingular; equivalently, all diagonal entries of `L` must be
nonzero.
The input must have `rank >= 2`. The input is treated as a batch of matrices
with batch shape `input.shape[:-2]`, where each matrix has dimensions
`input.shape[-2]` by `input.shape[-1]` (hence `input.shape[-2]` must equal
`input.shape[-1]`).
#### Examples
```python
tfd.bijectors.MatrixInverseTriL().forward(x=[[1., 0], [2, 1]])
# Result: [[1., 0], [-2, 1]], i.e., inv(x)
tfd.bijectors.MatrixInverseTriL().inverse(y=[[1., 0], [-2, 1]])
# Result: [[1., 0], [2, 1]], i.e., inv(y).
```
"""
def __init__(self, validate_args=False, name="matrix_inverse_tril"):
"""Instantiates the `MatrixInverseTriL` bijector.
Args:
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._graph_parents = []
self._name = name
super(MatrixInverseTriL, self).__init__(
forward_min_event_ndims=2,
validate_args=validate_args,
name=name)
def _forward(self, x):
with ops.control_dependencies(self._assertions(x)):
shape = array_ops.shape(x)
return linalg_ops.matrix_triangular_solve(
x, linalg_ops.eye(shape[-1], batch_shape=shape[:-2]), lower=True)
def _inverse(self, y):
return self._forward(y)
def _forward_log_det_jacobian(self, x):
# Calculation of the Jacobian:
#
# Let X = (x_{ij}), 0 <= i,j < n, be a matrix of indeterminates. Let Z =
# X^{-1} where Z = (z_{ij}). Then
#
# dZ/dx_{ij} = (d/dt | t=0) Y(t)^{-1},
#
# where Y(t) = X + t*E_{ij} and E_{ij} is the matrix with a 1 in the (i,j)
# entry and zeros elsewhere. By the product rule,
#
# 0 = d/dt [Identity matrix]
# = d/dt [Y Y^{-1}]
# = Y d/dt[Y^{-1}] + dY/dt Y^{-1}
#
# so
#
# d/dt[Y^{-1}] = -Y^{-1} dY/dt Y^{-1}
# = -Y^{-1} E_{ij} Y^{-1}.
#
# Evaluating at t=0,
#
# dZ/dx_{ij} = -Z E_{ij} Z.
#
# Taking the (r,s) entry of each side,
#
# dz_{rs}/dx_{ij} = -z_{ri}z_{sj}.
#
# Now, let J be the Jacobian dZ/dX, arranged as the n^2-by-n^2 matrix whose
# (r*n + s, i*n + j) entry is dz_{rs}/dx_{ij}. Considering J as an n-by-n
# block matrix with n-by-n blocks, the above expression for dz_{rs}/dx_{ij}
# shows that the block at position (r,i) is -z_{ri}Z. Hence
#
# J = -KroneckerProduct(Z, Z),
# det(J) = (-1)^(n^2) (det Z)^(2n)
# = (-1)^n (det X)^(-2n).
with ops.control_dependencies(self._assertions(x)):
return (-2. * math_ops.cast(array_ops.shape(x)[-1], x.dtype.base_dtype) *
math_ops.reduce_sum(
math_ops.log(math_ops.abs(array_ops.matrix_diag_part(x))),
axis=-1))
def _assertions(self, x):
if not self.validate_args:
return []
shape = array_ops.shape(x)
is_matrix = check_ops.assert_rank_at_least(
x, 2, message="Input must have rank at least 2.")
is_square = check_ops.assert_equal(
shape[-2], shape[-1], message="Input must be a square matrix.")
above_diagonal = array_ops.matrix_band_part(
array_ops.matrix_set_diag(
x, array_ops.zeros(shape[:-1], dtype=dtypes.float32)),
0, -1)
is_lower_triangular = check_ops.assert_equal(
above_diagonal, array_ops.zeros_like(above_diagonal),
message="Input must be lower triangular.")
# A lower triangular matrix is nonsingular iff all its diagonal entries are
# nonzero.
diag_part = array_ops.matrix_diag_part(x)
is_nonsingular = check_ops.assert_none_equal(
diag_part, array_ops.zeros_like(diag_part),
message="Input must have all diagonal entries nonzero.")
return [is_matrix, is_square, is_lower_triangular, is_nonsingular]
| apache-2.0 | -1,486,163,193,245,675,300 | 35.358621 | 80 | 0.609067 | false |
webgeodatavore/django | django/templatetags/static.py | 197 | 4052 | from django import template
from django.utils.encoding import iri_to_uri
from django.utils.six.moves.urllib.parse import urljoin
register = template.Library()
class PrefixNode(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return.")
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
# token.split_contents() isn't useful here because tags using this method don't accept variable as arguments
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'as':
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0])
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ''
@register.tag
def get_static_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populates a template variable with the media prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
class StaticNode(template.Node):
def __init__(self, varname=None, path=None):
if path is None:
raise template.TemplateSyntaxError(
"Static template nodes must be given a path to return.")
self.path = path
self.varname = varname
def url(self, context):
path = self.path.resolve(context)
return self.handle_simple(path)
def render(self, context):
url = self.url(context)
if self.varname is None:
return url
context[self.varname] = url
return ''
@classmethod
def handle_simple(cls, path):
return urljoin(PrefixNode.handle_simple("STATIC_URL"), path)
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse prefix node and return a Node.
"""
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError(
"'%s' takes at least one argument (path to file)" % bits[0])
path = parser.compile_filter(bits[1])
if len(bits) >= 2 and bits[-2] == 'as':
varname = bits[3]
else:
varname = None
return cls(varname, path)
@register.tag('static')
def do_static(parser, token):
"""
Joins the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
return StaticNode.handle_token(parser, token)
def static(path):
return StaticNode.handle_simple(path)
| bsd-3-clause | -8,520,686,526,535,047,000 | 25.311688 | 116 | 0.586624 | false |
sammyshj/gci | modules/s3db/fire.py | 4 | 21906 | # -*- coding: utf-8 -*-
""" Sahana Eden Fire Models
@copyright: 2009-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3FireModel",
"S3FireStationModel",
]
from gluon import *
from gluon.dal import Row
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
# =============================================================================
class S3FireModel(S3Model):
"""
Fire Zones: Burn Perimeter, Burnt zone, Evacuation Zone, etc
"""
names = ["fire_zone_type",
"fire_zone",
]
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# -----------------------------------------------------------
# Fire Zone Types
tablename = "fire_zone_type"
table = define_table(tablename,
Field("name",
label=T("Name")),
# @ToDo: Currently unused - apply in layer_feature for now
Field("style", "text",
label=T("Style")),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ZONE_TYPE = T("Add Zone Type")
crud_strings[tablename] = Storage(
title_create = ADD_ZONE_TYPE,
title_display = T("Zone Type Details"),
title_list = T("Zone Types"),
title_update = T("Edit Zone Type"),
title_search = T("Search Zone Types"),
title_upload = T("Import Zone Types"),
subtitle_create = T("Add New Zone Type"),
label_list_button = T("List Zone Types"),
label_create_button = T("Add New Zone Type"),
label_delete_button = T("Delete Zone Type"),
msg_record_created = T("Zone Type added"),
msg_record_modified = T("Zone Type updated"),
msg_record_deleted = T("Zone Type deleted"),
msg_list_empty = T("No Zone Types currently registered"))
zone_type_represent = S3Represent(lookup=tablename)
self.configure(tablename,
deduplicate = self.fire_zone_type_duplicate,
)
# -----------------------------------------------------------
# Fire Zones
tablename = "fire_zone"
table = define_table(tablename,
Field("name",
label=T("Name")),
Field("zone_type_id", db.fire_zone_type,
requires = IS_NULL_OR(
IS_ONE_OF(db, "fire_zone_type.id",
zone_type_represent,
sort=True)),
represent = zone_type_represent,
comment = S3AddResourceLink(c="fire",
f="zone_type",
label=ADD_ZONE_TYPE,
tooltip=T("Select a Zone Type from the list or click 'Add Zone Type'")),
label=T("Type")),
self.gis_location_id(
widget = S3LocationSelectorWidget(
catalog_layers=True,
polygon=True
)
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
title_create = T("Add Zone"),
title_display = T("Zone Details"),
title_list = T("Zones"),
title_update = T("Edit Zone"),
title_search = T("Search Zones"),
title_upload = T("Import Zones"),
subtitle_create = T("Add New Zone"),
label_list_button = T("List Zones"),
label_create_button = T("Add New Zone"),
label_delete_button = T("Delete Zone"),
msg_record_created = T("Zone added"),
msg_record_modified = T("Zone updated"),
msg_record_deleted = T("Zone deleted"),
msg_list_empty = T("No Zones currently registered"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def fire_zone_type_duplicate(item):
"""
Zone Type record duplicate detection, used for the deduplicate hook
@param item: the S3ImportItem to check
"""
if item.tablename == "fire_zone_type":
table = item.table
query = (table.name == item.data.name)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if row:
item.id = row.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3FireStationModel(S3Model):
"""
A Model to manage Fire Stations:
http://eden.sahanafoundation.org/wiki/Deployments/Bombeiros
"""
names = ["fire_station",
"fire_station_vehicle",
"fire_water_source",
"fire_hazard_point",
"fire_staff_on_duty"
]
def model(self):
T = current.T
db = current.db
request = current.request
person_id = self.pr_person_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
human_resource_id = self.hrm_human_resource_id
ireport_id = self.irs_ireport_id
vehicle_id = self.vehicle_vehicle_id
add_component = self.add_component
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# =====================================================================
# Fire Station
#
fire_station_types = {
1: T("Fire Station"),
9: T("Unknown type of facility"),
}
tablename = "fire_station"
table = define_table(tablename,
self.super_link("site_id", "org_site"),
Field("name", notnull=True, length=64,
label = T("Name")),
Field("code", unique=True, length=64,
label = T("Code")),
Field("facility_type", "integer",
label = T("Facility Type"),
requires = IS_NULL_OR(IS_IN_SET(fire_station_types)),
default = 1,
represent = lambda opt: \
fire_station_types.get(opt, T("not specified"))
),
organisation_id(),
location_id(),
Field("phone", label = T("Phone"),
requires = IS_NULL_OR(s3_phone_requires)),
Field("website", label=T("Website"),
requires = IS_NULL_OR(IS_URL()),
represent = lambda url: s3_url_represent(url)),
Field("email", label = T("Email"),
requires = IS_NULL_OR(IS_EMAIL())
),
Field("fax", label = T("Fax"),
requires = IS_NULL_OR(s3_phone_requires)),
Field("obsolete", "boolean",
label = T("Obsolete"),
represent = lambda bool: \
(bool and [T("Obsolete")] or [current.messages["NONE"]])[0],
default = False,
readable = False,
writable = False),
s3_comments(),
*s3_meta_fields())
self.configure("fire_station",
super_entity="org_site")
station_id = S3ReusableField("station_id", table,
requires = IS_NULL_OR(
IS_ONE_OF(db, "fire_station.id",
self.fire_station_represent)),
represent = self.fire_station_represent,
label = T("Station"),
ondelete = "CASCADE"
)
# CRUD strings
ADD_FIRE_STATION = T("Add Fire Station")
crud_strings[tablename] = Storage(
title_create = ADD_FIRE_STATION,
title_display = T("Fire Station Details"),
title_list = T("Fire Stations"),
title_update = T("Edit Station Details"),
title_search = T("Search for Fire Station"),
title_upload = T("Upload Fire Stations List"),
title_map = T("Map of Fire Stations"),
subtitle_create = T("Add New Fire Station"),
label_list_button = T("List Fire Stations"),
label_create_button = ADD_FIRE_STATION,
label_delete_button = T("Delete Fire Station"),
msg_record_created = T("Fire Station added"),
msg_record_modified = T("Fire Station updated"),
msg_record_deleted = T("Fire Station deleted"),
msg_no_match = T("No Fire Stations could be found"),
msg_list_empty = T("No Fire Stations currently registered"))
add_component("vehicle_vehicle",
fire_station = Storage(link="fire_station_vehicle",
joinby="station_id",
key="vehicle_id",
actuate="replace"))
add_component("fire_shift",
fire_station = "station_id")
add_component("fire_shift_staff",
fire_station = "station_id")
# =====================================================================
# Vehicles of Fire stations
#
tablename = "fire_station_vehicle"
table = define_table(tablename,
station_id(),
vehicle_id(),
*s3_meta_fields()
)
# CRUD strings
ADD_VEHICLE = T("Add Vehicle")
crud_strings[tablename] = Storage(
title_create = ADD_VEHICLE,
title_display = T("Vehicle Details"),
title_list = T("Vehicles"),
title_update = T("Edit Vehicle Details"),
title_search = T("Search for Vehicles"),
title_upload = T("Upload Vehicles List"),
subtitle_create = T("Add New Vehicle"),
label_list_button = T("List Vehicles"),
label_create_button = ADD_VEHICLE,
label_delete_button = T("Delete Vehicle"),
msg_record_created = T("Vehicle added"),
msg_record_modified = T("Vehicle updated"),
msg_record_deleted = T("Vehicle deleted"),
msg_no_match = T("No Vehicles could be found"),
msg_list_empty = T("No Vehicles currently registered"))
self.set_method("fire", "station",
method="vehicle_report",
action=self.vehicle_report)
# =====================================================================
# Water Sources
#
tablename = "fire_water_source"
table = define_table(tablename,
Field("name", "string"),
location_id(),
#Field("good_for_human_usage", "boolean"),
#Field("fresh", "boolean"),
#Field("Salt", "boolean"),
#Field("toponymy", "string"),
#Field("parish", "string"),
#Field("type", "string"),
#Field("owner", "string"),
#person_id(),
#organisation_id(),
#Field("shape", "string"),
#Field("diameter", "string"),
#Field("depth", "string"),
#Field("volume", "integer"),
#Field("lenght", "integer"),
#Field("height", "integer"),
#Field("usefull_volume", "integer"),
#Field("catchment", "integer"),
#Field("area", "integer"),
#Field("date", "date"),
#Field("access_type", "string"),
#Field("previews_usage", "boolean"),
#Field("car_access", "string"),
#Field("mid_truck_access", "string"),
#Field("truck_access", "string"),
#Field("distance_from_trees", "integer"),
#Field("distance_from_buildings", "integer"),
#Field("helicopter_access", "string"),
#Field("previews_usage_air", "boolean"),
#Field("car_movment_conditions", "string"),
#Field("midtruck_movment_conditions", "string"),
#Field("truck_movment_conditions", "string"),
#Field("powerline_distance", "integer"),
#Field("distance_other_risks", "integer"),
#Field("anti_seismic_construction", "boolean"),
#Field("isolated_from_air", "boolean"),
#Field("hermetic", "boolean"),
s3_comments(),
*s3_meta_fields())
# =====================================================================
# Hazards
# - this is long-term hazards, not incidents
#
tablename = "fire_hazard_point"
table = define_table(tablename,
location_id(),
Field("name", "string"),
# What are the Org & Person for? Contacts?
organisation_id(),
person_id(),
s3_comments(),
*s3_meta_fields())
# =====================================================================
# Shifts
#
tablename = "fire_shift"
table = define_table(tablename,
station_id(),
Field("name"),
s3_datetime("start_time",
empty=False,
default="now"
),
s3_datetime("end_time",
empty=False,
default="now"
),
*s3_meta_fields())
shift_id = S3ReusableField("shift_id", table,
requires = IS_NULL_OR(
IS_ONE_OF(db, "fire_shift.id",
self.fire_shift_represent)),
represent = self.fire_shift_represent,
label = T("Shift"),
ondelete = "CASCADE")
# ---------------------------------------------------------------------
tablename = "fire_shift_staff"
table = define_table(tablename,
station_id(),
#shift_id(),
human_resource_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return Storage(
# used by IRS
fire_staff_on_duty = self.fire_staff_on_duty
)
# -------------------------------------------------------------------------
@staticmethod
def fire_station_represent(id, row=None):
""" FK representation """
if row:
return row.name
elif not id:
return current.messages["NONE"]
db = current.db
table = db.fire_station
r = db(table.id == id).select(table.name,
limitby = (0, 1)).first()
try:
return r.name
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def fire_shift_represent(id, row=None):
"""
Represent a Shift by Start and End times
"""
if row:
pass
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.fire_shift
row = db(table.id == id).select(table.start_time,
table.end_time,
limitby=(0, 1)).first()
try:
return "%s - %s" % (row.start_time, row.end_time)
except:
current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def fire_staff_on_duty(station_id=None):
"""
Return a query for hrm_human_resource filtering
for entries which are linked to a current shift
"""
db = current.db
staff = db.hrm_human_resource
roster = db.fire_shift_staff
query = (staff.id == roster.human_resource_id) & \
(roster.deleted != True)
if station_id is not None:
query &= (roster.station_id == station_id)
return query
# -------------------------------------------------------------------------
@staticmethod
def vehicle_report(r, **attr):
"""
Custom method to provide a report on Vehicle Deployment Times
- this is one of the main tools currently used to manage an Incident
"""
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
station_id = r.id
if station_id:
s3db = current.s3db
dtable = s3db.irs_ireport_vehicle
vtable = s3db.vehicle_vehicle
stable = s3db.fire_station_vehicle
query = (stable.station_id == station_id) & \
(stable.vehicle_id == vtable.id) & \
(vtable.asset_id == dtable.asset_id)
current.response.s3.crud_strings["irs_ireport_vehicle"] = Storage(
title_report = "Vehicle Deployment Times"
)
req = r.factory(prefix="irs",
name="ireport_vehicle",
args=["report"],
vars=Storage(
rows = "asset_id",
cols = "ireport_id",
fact = "minutes",
aggregate = "sum")
)
req.set_handler("report", S3Report())
req.resource.add_filter(query)
return req(rheader=rheader)
# END =========================================================================
| mit | 944,320,506,550,705,900 | 41.618677 | 135 | 0.411988 | false |
thilaire/CodingGameServer | games/Networks/server/AliceRandomPlayer.py | 1 | 4279 | """
* --------------------- *
| |
| Coding Game Server |
| |
* --------------------- *
Authors: M. Pecheux (based on T. Hilaire and J. Brajard template file)
Licence: GPL
File: aliceRandomPlayer.py
Contains the class aliceRandomPlayer
-> defines a dummy Alice player that play randomly every time (but do not loose)
Copyright 2017 M. Pecheux
"""
from CGSserver.Player import TrainingPlayer
from random import choice
from .Constants import CAPTURE, DESTROY, LINK_H, LINK_V, DO_NOTHING, \
LINK_ENERGY, DESTROY_ENERGY
boolConv = {'true': True, 'false': False}
def check_type(element, typecheck):
"""Function that checks for class type (class is not yet
defined, so cannot use type() built-in...)"""
return element is not None and element.__class__.__name__ == typecheck
class AliceRandomPlayer(TrainingPlayer):
"""
This class implements Alice: a training player that plays... randomly
Every player should be able to beat him
"""
def __init__(self, **options):
"""
Initialize the training player
The option "advanced=true" (default) or "advanced=false" is possible
This option indicates if the player can also destroy/create links
"""
super().__init__('ALICE')
# check "advanced" option
if "advanced" not in options:
self.advanced = True
elif options["advanced"].lower() in boolConv:
self.advanced = boolConv[options["advanced"].lower()]
else:
raise ValueError("The option advanced=%s is incorrect." % options["advanced"])
def neighbours(self, x, y, us):
"""
:param x: coordinate of a point
:param y: coordinate of a point
:return: list of neighbours of the point (x,y)
"""
neighbours = []
if x > 1:
n = self.game.board[x-2][y]
l = self.game.board[x-1][y]
if check_type(n, "Node") and ( n.owner != us) and \
check_type(l, "Link") and l.direction == 0:
neighbours.append(n)
if x < self.game.L-2:
n = self.game.board[x+2][y]
l = self.game.board[x+1][y]
if check_type(n, "Node") and (n.owner != us) and \
check_type(l, "Link") and l.direction == 0:
neighbours.append(n)
if y > 1:
n = self.game.board[x][y-2]
l = self.game.board[x][y-1]
if check_type(n, "Node") and (n.owner != us) and \
check_type(l, "Link") and l.direction == 1:
neighbours.append(n)
if y < self.game.H-2:
n = self.game.board[x][y+2]
l = self.game.board[x][y+1]
if check_type(n, "Node") and (n.owner != us) and \
check_type(l, "Link") and l.direction == 1:
neighbours.append(n)
return neighbours
def playMove(self):
"""
Plays the move -> here a random move
Returns the move (string %d %d %d)
"""
# get our player number
us = 0 if (self.game.players[0] is self) else 1
# build the list of the possible moves
moves = []
# capture node
# get currently owned nodes neighbours and add them to the moved list
for node in self.game.playerNode[us]:
for n in self.neighbours(node.x, node.y, us):
moves.append("%d %d %d" % (CAPTURE, n.x, n.y))
# advanced moves
if self.advanced:
# destroy link
if self.game.playerEnergy[us] >= DESTROY_ENERGY:
linkCells = [(x,y) for x in range(self.game.L-1) for y in range(self.game.H-1) if check_type(self.game.board[x][y], "link")]
if len(linkCells) > 0:
lx, ly = choice(linkCells)
moves.append("%d %d %d" % (DESTROY, lx, ly))
# create link
if self.game.playerEnergy[us] >= LINK_ENERGY:
blankCells = []
for x in range(1, self.game.L-1):
for y in range(1, self.game.H-1):
if self.game.board[x][y] is None:
if check_type(self.game.board[x-1][y], "Node") and \
check_type(self.game.board[x+1][y], "Node"):
blankCells.append((x, y, 0))
elif check_type(self.game.board[x][y-1], "Node") and \
check_type(self.game.board[x][y+1], "Node"):
blankCells.append((x, y, 1))
if len(blankCells) > 0:
cx, cy, d = choice(blankCells)
if d == 0:
moves.append("%d %d %d" % (LINK_H, cx, cy))
elif d == 1:
moves.append("%d %d %d" % (LINK_V, cx, cy))
# choose one possible move
if moves:
return choice(moves)
else:
# sometimes, we cannot move...
self.game.sendComment(self, "I am blocked... I cannot play...")
return "%d 0 0" % DO_NOTHING
| gpl-3.0 | -3,197,027,953,746,433,500 | 29.564286 | 128 | 0.618369 | false |
mmnelemane/nova | nova/api/openstack/compute/legacy_v2/contrib/agents.py | 7 | 7919 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import objects
from nova import utils
authorize = extensions.extension_authorizer('compute', 'agents')
class AgentController(object):
"""The agent is talking about guest agent.The host can use this for
things like accessing files on the disk, configuring networking,
or running other applications/scripts in the guest while it is
running. Typically this uses some hypervisor-specific transport
to avoid being dependent on a working network configuration.
Xen, VMware, and VirtualBox have guest agents,although the Xen
driver is the only one with an implementation for managing them
in openstack. KVM doesn't really have a concept of a guest agent
(although one could be written).
You can find the design of agent update in this link:
http://wiki.openstack.org/AgentUpdate
and find the code in nova.virt.xenapi.vmops.VMOps._boot_new_instance.
In this design We need update agent in guest from host, so we need
some interfaces to update the agent info in host.
You can find more information about the design of the GuestAgent in
the following link:
http://wiki.openstack.org/GuestAgent
http://wiki.openstack.org/GuestAgentXenStoreCommunication
"""
def index(self, req):
"""Return a list of all agent builds. Filter by hypervisor."""
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
hypervisor = None
agents = []
if 'hypervisor' in req.GET:
hypervisor = req.GET['hypervisor']
builds = objects.AgentList.get_all(context, hypervisor=hypervisor)
for agent_build in builds:
agents.append({'hypervisor': agent_build.hypervisor,
'os': agent_build.os,
'architecture': agent_build.architecture,
'version': agent_build.version,
'md5hash': agent_build.md5hash,
'agent_id': agent_build.id,
'url': agent_build.url})
return {'agents': agents}
def update(self, req, id, body):
"""Update an existing agent build."""
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
try:
para = body['para']
url = para['url']
md5hash = para['md5hash']
version = para['version']
except (TypeError, KeyError) as ex:
msg = _("Invalid request body: %s") % ex
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
utils.validate_integer(id, 'id')
utils.check_string_length(url, 'url', max_length=255)
utils.check_string_length(md5hash, 'md5hash', max_length=255)
utils.check_string_length(version, 'version', max_length=255)
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
try:
agent = objects.Agent(context=context, id=id)
agent.obj_reset_changes()
agent.version = version
agent.url = url
agent.md5hash = md5hash
agent.save()
except exception.AgentBuildNotFound as ex:
raise webob.exc.HTTPNotFound(explanation=ex.format_message())
# NOTE(alex_xu): The agent_id should be integer that consistent with
# create/index actions. But parameter 'id' is string type that parsed
# from url. This is a bug, but because back-compatibility, it can't be
# fixed for v2 API. This will be fixed after v3 API feature exposed by
# micro-version in the future. lp bug #1333494
return {"agent": {'agent_id': id, 'version': version,
'url': url, 'md5hash': md5hash}}
def delete(self, req, id):
"""Deletes an existing agent build."""
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
try:
utils.validate_integer(id, 'id')
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
try:
agent = objects.Agent(context=context, id=id)
agent.destroy()
except exception.AgentBuildNotFound as ex:
raise webob.exc.HTTPNotFound(explanation=ex.format_message())
def create(self, req, body):
"""Creates a new agent build."""
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
try:
agent = body['agent']
hypervisor = agent['hypervisor']
os = agent['os']
architecture = agent['architecture']
version = agent['version']
url = agent['url']
md5hash = agent['md5hash']
except (TypeError, KeyError) as ex:
msg = _("Invalid request body: %s") % ex
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
utils.check_string_length(hypervisor, 'hypervisor', max_length=255)
utils.check_string_length(os, 'os', max_length=255)
utils.check_string_length(architecture, 'architecture',
max_length=255)
utils.check_string_length(version, 'version', max_length=255)
utils.check_string_length(url, 'url', max_length=255)
utils.check_string_length(md5hash, 'md5hash', max_length=255)
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
try:
agent_obj = objects.Agent(context=context)
agent_obj.hypervisor = hypervisor
agent_obj.os = os
agent_obj.architecture = architecture
agent_obj.version = version
agent_obj.url = url
agent_obj.md5hash = md5hash
agent_obj.create()
agent['agent_id'] = agent_obj.id
except exception.AgentBuildExists as ex:
raise webob.exc.HTTPConflict(explanation=ex.format_message())
return {'agent': agent}
class Agents(extensions.ExtensionDescriptor):
"""Agents support."""
name = "Agents"
alias = "os-agents"
namespace = "http://docs.openstack.org/compute/ext/agents/api/v2"
updated = "2012-10-28T00:00:00Z"
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('os-agents',
AgentController())
resources.append(resource)
return resources
| apache-2.0 | 2,919,998,625,518,551,600 | 40.031088 | 79 | 0.623185 | false |
pyhmsa/pyhmsa | pyhmsa/fileformat/xmlhandler/condition/region.py | 1 | 1491 | """
XML handler for region condition
"""
# Standard library modules.
# Third party modules.
# Local modules.
from pyhmsa.spec.condition.region import RegionOfInterest
from pyhmsa.fileformat.xmlhandler.condition.condition import _ConditionXMLHandler
# Globals and constants variables.
class RegionOfInterestXMLHandler(_ConditionXMLHandler):
def __init__(self, version):
super().__init__(RegionOfInterest, version)
def parse(self, element):
obj = super().parse(element)
subelement = element.find('StartChannel')
if subelement is None:
raise ValueError('Element StartChannel is missing')
start = self._parse_numerical_attribute(subelement)
subelement = element.find('EndChannel')
if subelement is None:
raise ValueError('Element EndChannel is missing')
end = self._parse_numerical_attribute(subelement)
obj.channels = (start, end)
return obj
def convert(self, obj):
element = super().convert(obj)
value = obj.start_channel
attrib = type('MockAttribute', (object,), {'xmlname': 'StartChannel'})
subelements = self._convert_numerical_attribute(value, attrib)
element.extend(subelements)
value = obj.end_channel
attrib = type('MockAttribute', (object,), {'xmlname': 'EndChannel'})
subelements = self._convert_numerical_attribute(value, attrib)
element.extend(subelements)
return element
| mit | -4,484,722,228,571,843,600 | 29.428571 | 81 | 0.669349 | false |
CXQERP/ODOOERP | addons/mail/tests/test_mail_features.py | 76 | 59326 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.mail_mail import mail_mail
from openerp.addons.mail.mail_thread import mail_thread
from openerp.addons.mail.tests.common import TestMail
from openerp.tools import mute_logger, email_split, html2plaintext
from openerp.tools.mail import html_sanitize
class test_mail(TestMail):
def test_000_alias_setup(self):
""" Test basic mail.alias setup works, before trying to use them for routing """
cr, uid = self.cr, self.uid
self.user_valentin_id = self.res_users.create(cr, uid,
{'name': 'Valentin Cognito', 'email': '[email protected]', 'login': 'valentin.cognito', 'alias_name': 'valentin.cognito'})
self.user_valentin = self.res_users.browse(cr, uid, self.user_valentin_id)
self.assertEquals(self.user_valentin.alias_name, self.user_valentin.login, "Login should be used as alias")
self.user_pagan_id = self.res_users.create(cr, uid,
{'name': 'Pagan Le Marchant', 'email': '[email protected]', 'login': '[email protected]', 'alias_name': '[email protected]'})
self.user_pagan = self.res_users.browse(cr, uid, self.user_pagan_id)
self.assertEquals(self.user_pagan.alias_name, 'plmarchant', "If login is an email, the alias should keep only the local part")
self.user_barty_id = self.res_users.create(cr, uid,
{'name': 'Bartholomew Ironside', 'email': '[email protected]', 'login': 'b4r+_#_R3wl$$', 'alias_name': 'b4r+_#_R3wl$$'})
self.user_barty = self.res_users.browse(cr, uid, self.user_barty_id)
self.assertEquals(self.user_barty.alias_name, 'b4r+_-_r3wl-', 'Disallowed chars should be replaced by hyphens')
def test_00_followers_function_field(self):
""" Tests designed for the many2many function field 'follower_ids'.
We will test to perform writes using the many2many commands 0, 3, 4,
5 and 6. """
cr, uid, user_admin, partner_bert_id, group_pigs = self.cr, self.uid, self.user_admin, self.partner_bert_id, self.group_pigs
# Data: create 'disturbing' values in mail.followers: same res_id, other res_model; same res_model, other res_id
group_dummy_id = self.mail_group.create(cr, uid,
{'name': 'Dummy group'}, {'mail_create_nolog': True})
self.mail_followers.create(cr, uid,
{'res_model': 'mail.thread', 'res_id': self.group_pigs_id, 'partner_id': partner_bert_id})
self.mail_followers.create(cr, uid,
{'res_model': 'mail.group', 'res_id': group_dummy_id, 'partner_id': partner_bert_id})
# Pigs just created: should be only Admin as follower
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([user_admin.partner_id.id]), 'Admin should be the only Pigs fan')
# Subscribe Bert through a '4' command
group_pigs.write({'message_follower_ids': [(4, partner_bert_id)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id, user_admin.partner_id.id]), 'Bert and Admin should be the only Pigs fans')
# Unsubscribe Bert through a '3' command
group_pigs.write({'message_follower_ids': [(3, partner_bert_id)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([user_admin.partner_id.id]), 'Admin should be the only Pigs fan')
# Set followers through a '6' command
group_pigs.write({'message_follower_ids': [(6, 0, [partner_bert_id])]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id]), 'Bert should be the only Pigs fan')
# Add a follower created on the fly through a '0' command
group_pigs.write({'message_follower_ids': [(0, 0, {'name': 'Patrick Fiori'})]})
partner_patrick_id = self.res_partner.search(cr, uid, [('name', '=', 'Patrick Fiori')])[0]
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id, partner_patrick_id]), 'Bert and Patrick should be the only Pigs fans')
# Finally, unlink through a '5' command
group_pigs.write({'message_follower_ids': [(5, 0)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertFalse(follower_ids, 'Pigs group should not have fans anymore')
# Test dummy data has not been altered
fol_obj_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.thread'), ('res_id', '=', self.group_pigs_id)])
follower_ids = set([follower.partner_id.id for follower in self.mail_followers.browse(cr, uid, fol_obj_ids)])
self.assertEqual(follower_ids, set([partner_bert_id]), 'Bert should be the follower of dummy mail.thread data')
fol_obj_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', group_dummy_id)])
follower_ids = set([follower.partner_id.id for follower in self.mail_followers.browse(cr, uid, fol_obj_ids)])
self.assertEqual(follower_ids, set([partner_bert_id, user_admin.partner_id.id]), 'Bert and Admin should be the followers of dummy mail.group data')
def test_05_message_followers_and_subtypes(self):
""" Tests designed for the subscriber API as well as message subtypes """
cr, uid, user_admin, user_raoul, group_pigs = self.cr, self.uid, self.user_admin, self.user_raoul, self.group_pigs
# Data: message subtypes
self.mail_message_subtype.create(cr, uid, {'name': 'mt_mg_def', 'default': True, 'res_model': 'mail.group'})
self.mail_message_subtype.create(cr, uid, {'name': 'mt_other_def', 'default': True, 'res_model': 'crm.lead'})
self.mail_message_subtype.create(cr, uid, {'name': 'mt_all_def', 'default': True, 'res_model': False})
mt_mg_nodef = self.mail_message_subtype.create(cr, uid, {'name': 'mt_mg_nodef', 'default': False, 'res_model': 'mail.group'})
mt_all_nodef = self.mail_message_subtype.create(cr, uid, {'name': 'mt_all_nodef', 'default': False, 'res_model': False})
default_group_subtypes = self.mail_message_subtype.search(cr, uid, [('default', '=', True), '|', ('res_model', '=', 'mail.group'), ('res_model', '=', False)])
# ----------------------------------------
# CASE1: test subscriptions with subtypes
# ----------------------------------------
# Do: subscribe Raoul, should have default subtypes
group_pigs.message_subscribe_users([user_raoul.id])
group_pigs.refresh()
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Raoul follows default subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set(default_group_subtypes),
'message_subscribe: Raoul subscription subtypes are incorrect, should be all default ones')
# Do: subscribe Raoul with specified new subtypes
group_pigs.message_subscribe_users([user_raoul.id], subtype_ids=[mt_mg_nodef])
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Test: 2 lines in mail.followers (no duplicate for Raoul)
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
])
self.assertEqual(len(fol_ids), 2,
'message_subscribe: subscribing an already-existing follower should not create new entries in mail.followers')
# Test: Raoul follows only specified subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef]),
'message_subscribe: Raoul subscription subtypes are incorrect, should be only specified')
# Do: Subscribe Raoul without specified subtypes: should not erase existing subscription subtypes
group_pigs.message_subscribe_users([user_raoul.id, user_raoul.id])
group_pigs.message_subscribe_users([user_raoul.id])
group_pigs.refresh()
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Test: Raoul follows default subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef]),
'message_subscribe: Raoul subscription subtypes are incorrect, should be only specified')
# Do: Unsubscribe Raoul twice through message_unsubscribe_users
group_pigs.message_unsubscribe_users([user_raoul.id, user_raoul.id])
group_pigs.refresh()
# Test: 1 follower (Admin)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(follower_ids, [user_admin.partner_id.id], 'Admin must be the only Pigs fan')
# Test: 1 lines in mail.followers (no duplicate for Raoul)
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id)
])
self.assertEqual(len(fol_ids), 1,
'message_subscribe: group should have only 1 entry in mail.follower for 1 follower')
# Do: subscribe Admin with subtype_ids
group_pigs.message_subscribe_users([uid], [mt_mg_nodef, mt_all_nodef])
fol_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id), ('partner_id', '=', user_admin.partner_id.id)])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef, mt_all_nodef]), 'subscription subtypes are incorrect')
# ----------------------------------------
# CASE2: test mail_thread fields
# ----------------------------------------
subtype_data = group_pigs._get_subscription_data(None, None)[group_pigs.id]['message_subtype_data']
self.assertEqual(set(subtype_data.keys()), set(['Discussions', 'mt_mg_def', 'mt_all_def', 'mt_mg_nodef', 'mt_all_nodef']), 'mail.group available subtypes incorrect')
self.assertFalse(subtype_data['Discussions']['followed'], 'Admin should not follow Discussions in pigs')
self.assertTrue(subtype_data['mt_mg_nodef']['followed'], 'Admin should follow mt_mg_nodef in pigs')
self.assertTrue(subtype_data['mt_all_nodef']['followed'], 'Admin should follow mt_all_nodef in pigs')
def test_11_notification_url(self):
""" Tests designed to test the URL added in notification emails. """
cr, uid, group_pigs = self.cr, self.uid, self.group_pigs
# Test URL formatting
base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# Partner data
partner_raoul = self.res_partner.browse(cr, uid, self.partner_raoul_id)
partner_bert_id = self.res_partner.create(cr, uid, {'name': 'bert'})
partner_bert = self.res_partner.browse(cr, uid, partner_bert_id)
# Mail data
mail_mail_id = self.mail_mail.create(cr, uid, {'state': 'exception'})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
# Test: link for nobody -> None
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail)
self.assertEqual(url, None,
'notification email: mails not send to a specific partner should not have any URL')
# Test: link for partner -> None
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_bert)
self.assertEqual(url, None,
'notification email: mails send to a not-user partner should not have any URL')
# Test: link for user -> signin
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
# Test: link for user -> with model and res_id
mail_mail_id = self.mail_mail.create(cr, uid, {'model': 'mail.group', 'res_id': group_pigs.id})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
self.assertIn('model=mail.group', url,
'notification email: link should contain the model when having not notification email on a record')
self.assertIn('res_id=%s' % group_pigs.id, url,
'notification email: link should contain the res_id when having not notification email on a record')
# Test: link for user -> with model and res_id
mail_mail_id = self.mail_mail.create(cr, uid, {'notification': True, 'model': 'mail.group', 'res_id': group_pigs.id})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
self.assertIn('message_id=%s' % mail.mail_message_id.id, url,
'notification email: link based on message should contain the mail_message id')
self.assertNotIn('model=mail.group', url,
'notification email: link based on message should not contain model')
self.assertNotIn('res_id=%s' % group_pigs.id, url,
'notification email: link based on message should not contain res_id')
@mute_logger('openerp.addons.mail.mail_thread', 'openerp.models')
def test_12_inbox_redirection(self):
""" Tests designed to test the inbox redirection of emails notification URLs. """
cr, uid, user_admin, group_pigs = self.cr, self.uid, self.user_admin, self.group_pigs
model, act_id = self.ir_model_data.get_object_reference(cr, uid, 'mail', 'action_mail_inbox_feeds')
# Data: post a message on pigs
msg_id = self.group_pigs.message_post(body='My body', partner_ids=[self.partner_bert_id], type='comment', subtype='mail.mt_comment')
# No specific parameters -> should redirect to Inbox
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
# Raoul has read access to Pigs -> should redirect to form view of Pigs
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {'message_id': msg_id}})
self.assertEqual(
action.get('type'), 'ir.actions.act_window',
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
self.assertEqual(
action.get('res_id'), group_pigs.id,
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {'model': 'mail.group', 'res_id': group_pigs.id}})
self.assertEqual(
action.get('type'), 'ir.actions.act_window',
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
self.assertEqual(
action.get('res_id'), group_pigs.id,
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
# Bert has no read access to Pigs -> should redirect to Inbox
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_bert_id, {'params': {'message_id': msg_id}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_bert_id, {'params': {'model': 'mail.group', 'res_id': group_pigs.id}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
def test_20_message_post(self):
""" Tests designed for message_post. """
cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs
# --------------------------------------------------
# Data creation
# --------------------------------------------------
# 0 - Update existing users-partners
self.res_users.write(cr, uid, [uid], {'email': 'a@a', 'notify_email': 'always'})
self.res_users.write(cr, uid, [self.user_raoul_id], {'email': 'r@r'})
# 1 - Bert Tartopoils, with email, should receive emails for comments and emails
p_b_id = self.res_partner.create(cr, uid, {'name': 'Bert Tartopoils', 'email': 'b@b'})
# 2 - Carine Poilvache, with email, should receive emails for emails
p_c_id = self.res_partner.create(cr, uid, {'name': 'Carine Poilvache', 'email': 'c@c', 'notify_email': 'none'})
# 3 - Dédé Grosbedon, without email, to test email verification; should receive emails for every message
p_d_id = self.res_partner.create(cr, uid, {'name': 'Dédé Grosbedon', 'email': 'd@d', 'notify_email': 'always'})
# 4 - Attachments
attach1_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach1', 'datas_fname': 'Attach1',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
attach2_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach2', 'datas_fname': 'Attach2',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
attach3_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach3', 'datas_fname': 'Attach3',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
# 5 - Mail data
_subject = 'Pigs'
_mail_subject = 'Re: %s' % (group_pigs.name)
_body1 = '<p>Pigs rules</p>'
_body2 = '<html>Pigs rocks</html>'
_attachments = [
('List1', 'My first attachment'),
('List2', 'My second attachment')
]
# --------------------------------------------------
# CASE1: post comment + partners + attachments
# --------------------------------------------------
# Data: set alias_domain to see emails with alias
self.registry('ir.config_parameter').set_param(self.cr, self.uid, 'mail.catchall.domain', 'schlouby.fr')
# Data: change Pigs name to test reply_to
self.mail_group.write(cr, uid, [self.group_pigs_id], {'name': '"Pigs" !ù $%-'})
# Do: subscribe Raoul
new_follower_ids = [self.partner_raoul_id]
group_pigs.message_subscribe(new_follower_ids)
# Test: group followers = Raoul + uid
group_fids = [follower.id for follower in group_pigs.message_follower_ids]
test_fids = new_follower_ids + [self.partner_admin_id]
self.assertEqual(set(test_fids), set(group_fids),
'message_subscribe: incorrect followers after subscribe')
# Do: Raoul message_post on Pigs
self._init_mock_build_email()
msg1_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id,
body=_body1, subject=_subject, partner_ids=[p_b_id, p_c_id],
attachment_ids=[attach1_id, attach2_id], attachments=_attachments,
type='comment', subtype='mt_comment')
msg = self.mail_message.browse(cr, uid, msg1_id)
msg_message_id = msg.message_id
msg_pids = [partner.id for partner in msg.notified_partner_ids]
msg_aids = [attach.id for attach in msg.attachment_ids]
sent_emails = self._build_email_kwargs_list
# Test: mail_message: subject and body not modified
self.assertEqual(_subject, msg.subject, 'message_post: mail.message subject incorrect')
self.assertEqual(_body1, msg.body, 'message_post: mail.message body incorrect')
# Test: mail_message: notified_partner_ids = group followers + partner_ids - author
test_pids = set([self.partner_admin_id, p_b_id, p_c_id])
self.assertEqual(test_pids, set(msg_pids), 'message_post: mail.message notified partners incorrect')
# Test: mail_message: attachments (4, attachment_ids + attachments)
test_aids = set([attach1_id, attach2_id])
msg_attach_names = set([attach.name for attach in msg.attachment_ids])
test_attach_names = set(['Attach1', 'Attach2', 'List1', 'List2'])
self.assertEqual(len(msg_aids), 4,
'message_post: mail.message wrong number of attachments')
self.assertEqual(msg_attach_names, test_attach_names,
'message_post: mail.message attachments incorrectly added')
self.assertTrue(test_aids.issubset(set(msg_aids)),
'message_post: mail.message attachments duplicated')
for attach in msg.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group',
'message_post: mail.message attachments were not linked to the document')
self.assertEqual(attach.res_id, group_pigs.id,
'message_post: mail.message attachments were not linked to the document')
if 'List' in attach.name:
self.assertIn((attach.name, attach.datas.decode('base64')), _attachments,
'message_post: mail.message attachment name / data incorrect')
dl_attach = self.mail_message.download_attachment(cr, user_raoul.id, id_message=msg.id, attachment_id=attach.id)
self.assertIn((dl_attach['filename'], dl_attach['base64'].decode('base64')), _attachments,
'message_post: mail.message download_attachment is incorrect')
# Test: followers: same as before (author was already subscribed)
group_pigs.refresh()
group_fids = [follower.id for follower in group_pigs.message_follower_ids]
test_fids = new_follower_ids + [self.partner_admin_id]
self.assertEqual(set(test_fids), set(group_fids),
'message_post: wrong followers after posting')
# Test: mail_mail: notifications have been deleted
self.assertFalse(self.mail_mail.search(cr, uid, [('mail_message_id', '=', msg1_id)]),
'message_post: mail.mail notifications should have been auto-deleted!')
# Test: notifications emails: to a and b, c is email only, r is author
test_emailto = ['Administrator <a@a>', 'Bert Tartopoils <b@b>']
# test_emailto = ['"Followers of -Pigs-" <a@a>', '"Followers of -Pigs-" <b@b>']
self.assertEqual(len(sent_emails), 2,
'message_post: notification emails wrong number of send emails')
self.assertEqual(set([m['email_to'][0] for m in sent_emails]), set(test_emailto),
'message_post: notification emails wrong recipients (email_to)')
for sent_email in sent_emails:
self.assertEqual(sent_email['email_from'], 'Raoul Grosbedon <[email protected]>',
'message_post: notification email wrong email_from: should use alias of sender')
self.assertEqual(len(sent_email['email_to']), 1,
'message_post: notification email sent to more than one email address instead of a precise partner')
self.assertIn(sent_email['email_to'][0], test_emailto,
'message_post: notification email email_to incorrect')
self.assertEqual(sent_email['reply_to'], u'"YourCompany \\"Pigs\\" !ù $%-" <[email protected]>',
'message_post: notification email reply_to incorrect')
self.assertEqual(_subject, sent_email['subject'],
'message_post: notification email subject incorrect')
self.assertIn(_body1, sent_email['body'],
'message_post: notification email body incorrect')
self.assertIn('Pigs rules', sent_email['body_alternative'],
'message_post: notification email body alternative should contain the body')
self.assertNotIn('<p>', sent_email['body_alternative'],
'message_post: notification email body alternative still contains html')
self.assertFalse(sent_email['references'],
'message_post: references should be False when sending a message that is not a reply')
# Test: notification linked to this message = group followers = notified_partner_ids
notif_ids = self.mail_notification.search(cr, uid, [('message_id', '=', msg1_id)])
notif_pids = set([notif.partner_id.id for notif in self.mail_notification.browse(cr, uid, notif_ids)])
self.assertEqual(notif_pids, test_pids,
'message_post: mail.message created mail.notification incorrect')
# Data: Pigs name back to normal
self.mail_group.write(cr, uid, [self.group_pigs_id], {'name': 'Pigs'})
# --------------------------------------------------
# CASE2: reply + parent_id + parent notification
# --------------------------------------------------
# Data: remove alias_domain to see emails with alias
param_ids = self.registry('ir.config_parameter').search(cr, uid, [('key', '=', 'mail.catchall.domain')])
self.registry('ir.config_parameter').unlink(cr, uid, param_ids)
# Do: Raoul message_post on Pigs
self._init_mock_build_email()
msg2_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id,
body=_body2, type='email', subtype='mt_comment',
partner_ids=[p_d_id], parent_id=msg1_id, attachment_ids=[attach3_id],
context={'mail_post_autofollow': True})
msg = self.mail_message.browse(cr, uid, msg2_id)
msg_pids = [partner.id for partner in msg.notified_partner_ids]
msg_aids = [attach.id for attach in msg.attachment_ids]
sent_emails = self._build_email_kwargs_list
# Test: mail_message: subject is False, body, parent_id is msg_id
self.assertEqual(msg.subject, False, 'message_post: mail.message subject incorrect')
self.assertEqual(msg.body, html_sanitize(_body2), 'message_post: mail.message body incorrect')
self.assertEqual(msg.parent_id.id, msg1_id, 'message_post: mail.message parent_id incorrect')
# Test: mail_message: notified_partner_ids = group followers
test_pids = [self.partner_admin_id, p_d_id]
self.assertEqual(set(test_pids), set(msg_pids), 'message_post: mail.message partners incorrect')
# Test: mail_message: notifications linked to this message = group followers = notified_partner_ids
notif_ids = self.mail_notification.search(cr, uid, [('message_id', '=', msg2_id)])
notif_pids = [notif.partner_id.id for notif in self.mail_notification.browse(cr, uid, notif_ids)]
self.assertEqual(set(test_pids), set(notif_pids), 'message_post: mail.message notification partners incorrect')
# Test: mail_mail: notifications deleted
self.assertFalse(self.mail_mail.search(cr, uid, [('mail_message_id', '=', msg2_id)]), 'mail.mail notifications should have been auto-deleted!')
# Test: emails send by server (to a, b, c, d)
test_emailto = [u'Administrator <a@a>', u'Bert Tartopoils <b@b>', u'Carine Poilvache <c@c>', u'D\xe9d\xe9 Grosbedon <d@d>']
# test_emailto = [u'"Followers of Pigs" <a@a>', u'"Followers of Pigs" <b@b>', u'"Followers of Pigs" <c@c>', u'"Followers of Pigs" <d@d>']
# self.assertEqual(len(sent_emails), 3, 'sent_email number of sent emails incorrect')
for sent_email in sent_emails:
self.assertEqual(sent_email['email_from'], 'Raoul Grosbedon <r@r>',
'message_post: notification email wrong email_from: should use email of sender when no alias domain set')
self.assertEqual(len(sent_email['email_to']), 1,
'message_post: notification email sent to more than one email address instead of a precise partner')
self.assertIn(sent_email['email_to'][0], test_emailto,
'message_post: notification email email_to incorrect')
self.assertEqual(email_split(sent_email['reply_to']), ['r@r'], # was '"Followers of Pigs" <r@r>', but makes no sense
'message_post: notification email reply_to incorrect: should have raoul email')
self.assertEqual(_mail_subject, sent_email['subject'],
'message_post: notification email subject incorrect')
self.assertIn(html_sanitize(_body2), sent_email['body'],
'message_post: notification email does not contain the body')
self.assertIn('Pigs rocks', sent_email['body_alternative'],
'message_post: notification email body alternative should contain the body')
self.assertNotIn('<p>', sent_email['body_alternative'],
'message_post: notification email body alternative still contains html')
self.assertIn(msg_message_id, sent_email['references'],
'message_post: notification email references lacks parent message message_id')
# Test: attachments + download
for attach in msg.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group',
'message_post: mail.message attachment res_model incorrect')
self.assertEqual(attach.res_id, self.group_pigs_id,
'message_post: mail.message attachment res_id incorrect')
# Test: Dédé has been notified -> should also have been notified of the parent message
msg = self.mail_message.browse(cr, uid, msg1_id)
msg_pids = set([partner.id for partner in msg.notified_partner_ids])
test_pids = set([self.partner_admin_id, p_b_id, p_c_id, p_d_id])
self.assertEqual(test_pids, msg_pids, 'message_post: mail.message parent notification not created')
# Do: reply to last message
msg3_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id, body='Test', parent_id=msg2_id)
msg = self.mail_message.browse(cr, uid, msg3_id)
# Test: check that its parent will be the first message
self.assertEqual(msg.parent_id.id, msg1_id, 'message_post did not flatten the thread structure')
def test_25_message_compose_wizard(self):
""" Tests designed for the mail.compose.message wizard. """
cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs
mail_compose = self.registry('mail.compose.message')
# --------------------------------------------------
# Data creation
# --------------------------------------------------
# 0 - Update existing users-partners
self.res_users.write(cr, uid, [uid], {'email': 'a@a'})
self.res_users.write(cr, uid, [self.user_raoul_id], {'email': 'r@r'})
# 1 - Bert Tartopoils, with email, should receive emails for comments and emails
p_b_id = self.res_partner.create(cr, uid, {'name': 'Bert Tartopoils', 'email': 'b@b'})
# 2 - Carine Poilvache, with email, should receive emails for emails
p_c_id = self.res_partner.create(cr, uid, {'name': 'Carine Poilvache', 'email': 'c@c', 'notify_email': 'always'})
# 3 - Dédé Grosbedon, without email, to test email verification; should receive emails for every message
p_d_id = self.res_partner.create(cr, uid, {'name': 'Dédé Grosbedon', 'email': 'd@d', 'notify_email': 'always'})
# 4 - Create a Bird mail.group, that will be used to test mass mailing
group_bird_id = self.mail_group.create(cr, uid,
{
'name': 'Bird',
'description': 'Bird resistance',
}, context={'mail_create_nolog': True})
group_bird = self.mail_group.browse(cr, uid, group_bird_id)
# 5 - Mail data
_subject = 'Pigs'
_body = 'Pigs <b>rule</b>'
_reply_subject = 'Re: %s' % _subject
_attachments = [
{'name': 'First', 'datas_fname': 'first.txt', 'datas': 'My first attachment'.encode('base64')},
{'name': 'Second', 'datas_fname': 'second.txt', 'datas': 'My second attachment'.encode('base64')}
]
_attachments_test = [('first.txt', 'My first attachment'), ('second.txt', 'My second attachment')]
# 6 - Subscribe Bert to Pigs
group_pigs.message_subscribe([p_b_id])
# --------------------------------------------------
# CASE1: wizard + partners + context keys
# --------------------------------------------------
# Do: Raoul wizard-composes on Pigs with auto-follow for partners, not for author
compose_id = mail_compose.create(cr, user_raoul.id,
{
'subject': _subject,
'body': _body,
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
})
compose = mail_compose.browse(cr, uid, compose_id)
# Test: mail.compose.message: composition_mode, model, res_id
self.assertEqual(compose.composition_mode, 'comment', 'compose wizard: mail.compose.message incorrect composition_mode')
self.assertEqual(compose.model, 'mail.group', 'compose wizard: mail.compose.message incorrect model')
self.assertEqual(compose.res_id, self.group_pigs_id, 'compose wizard: mail.compose.message incorrect res_id')
# Do: Post the comment
mail_compose.send_mail(cr, user_raoul.id, [compose_id], {'mail_post_autofollow': True, 'mail_create_nosubscribe': True})
group_pigs.refresh()
message = group_pigs.message_ids[0]
# Test: mail.group: followers (c and d added by auto follow key; raoul not added by nosubscribe key)
pigs_pids = [p.id for p in group_pigs.message_follower_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(pigs_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
# Test: mail.message: subject, body inside p
self.assertEqual(message.subject, _subject, 'compose wizard: mail.message incorrect subject')
self.assertEqual(message.body, '<p>%s</p>' % _body, 'compose wizard: mail.message incorrect body')
# Test: mail.message: notified_partner_ids = admin + bert (followers) + c + d (recipients)
msg_pids = [partner.id for partner in message.notified_partner_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(msg_pids), set(test_pids),
'compose wizard: mail.message notified_partner_ids incorrect')
# --------------------------------------------------
# CASE2: reply + attachments
# --------------------------------------------------
# Do: Reply with attachments
compose_id = mail_compose.create(cr, user_raoul.id,
{
'attachment_ids': [(0, 0, _attachments[0]), (0, 0, _attachments[1])]
}, context={
'default_composition_mode': 'comment',
'default_res_id': self.group_pigs_id,
'default_parent_id': message.id
})
compose = mail_compose.browse(cr, uid, compose_id)
# Test: mail.compose.message: model, res_id, parent_id
self.assertEqual(compose.model, 'mail.group', 'compose wizard: mail.compose.message incorrect model')
self.assertEqual(compose.res_id, self.group_pigs_id, 'compose wizard: mail.compose.message incorrect res_id')
self.assertEqual(compose.parent_id.id, message.id, 'compose wizard: mail.compose.message incorrect parent_id')
# Test: mail.compose.message: subject as Re:.., body, parent_id
self.assertEqual(compose.subject, _reply_subject, 'compose wizard: mail.compose.message incorrect subject')
self.assertFalse(compose.body, 'compose wizard: mail.compose.message body should not contain parent message body')
self.assertEqual(compose.parent_id and compose.parent_id.id, message.id, 'compose wizard: mail.compose.message parent_id incorrect')
# Test: mail.compose.message: attachments
for attach in compose.attachment_ids:
self.assertIn((attach.datas_fname, attach.datas.decode('base64')), _attachments_test,
'compose wizard: mail.message attachment name / data incorrect')
# --------------------------------------------------
# CASE3: mass_mail on Pigs and Bird
# --------------------------------------------------
# Do: Compose in mass_mail_mode on pigs and bird
compose_id = mail_compose.create(
cr, user_raoul.id, {
'subject': _subject,
'body': '${object.description}',
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'mass_mail',
'default_model': 'mail.group',
'default_res_id': False,
'active_ids': [self.group_pigs_id, group_bird_id],
})
compose = mail_compose.browse(cr, uid, compose_id)
# Do: Post the comment, get created message for each group
mail_compose.send_mail(cr, user_raoul.id, [compose_id], context={
'default_res_id': -1,
'active_ids': [self.group_pigs_id, group_bird_id]
})
# check mail_mail
mail_mail_ids = self.mail_mail.search(cr, uid, [('subject', '=', _subject)])
for mail_mail in self.mail_mail.browse(cr, uid, mail_mail_ids):
self.assertEqual(set([p.id for p in mail_mail.recipient_ids]), set([p_c_id, p_d_id]),
'compose wizard: mail_mail mass mailing: mail.mail in mass mail incorrect recipients')
# check logged messages
group_pigs.refresh()
group_bird.refresh()
message1 = group_pigs.message_ids[0]
message2 = group_bird.message_ids[0]
# Test: Pigs and Bird did receive their message
test_msg_ids = self.mail_message.search(cr, uid, [], limit=2)
self.assertIn(message1.id, test_msg_ids, 'compose wizard: Pigs did not receive its mass mailing message')
self.assertIn(message2.id, test_msg_ids, 'compose wizard: Bird did not receive its mass mailing message')
# Test: mail.message: subject, body, subtype, notified partners (nobody + specific recipients)
self.assertEqual(message1.subject, _subject,
'compose wizard: message_post: mail.message in mass mail subject incorrect')
self.assertEqual(message1.body, '<p>%s</p>' % group_pigs.description,
'compose wizard: message_post: mail.message in mass mail body incorrect')
# self.assertEqual(set([p.id for p in message1.notified_partner_ids]), set([p_c_id, p_d_id]),
# 'compose wizard: message_post: mail.message in mass mail incorrect notified partners')
self.assertEqual(message2.subject, _subject,
'compose wizard: message_post: mail.message in mass mail subject incorrect')
self.assertEqual(message2.body, '<p>%s</p>' % group_bird.description,
'compose wizard: message_post: mail.message in mass mail body incorrect')
# self.assertEqual(set([p.id for p in message2.notified_partner_ids]), set([p_c_id, p_d_id]),
# 'compose wizard: message_post: mail.message in mass mail incorrect notified partners')
# Test: mail.group followers: author not added as follower in mass mail mode
pigs_pids = [p.id for p in group_pigs.message_follower_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(pigs_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
bird_pids = [p.id for p in group_bird.message_follower_ids]
test_pids = [self.partner_admin_id]
self.assertEqual(set(bird_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
# Do: Compose in mass_mail, coming from list_view, we have an active_domain that should be supported
compose_id = mail_compose.create(cr, user_raoul.id,
{
'subject': _subject,
'body': '${object.description}',
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'mass_mail',
'default_model': 'mail.group',
'default_res_id': False,
'active_ids': [self.group_pigs_id],
'active_domain': [('name', 'in', ['Pigs', 'Bird'])],
})
compose = mail_compose.browse(cr, uid, compose_id)
# Do: Post the comment, get created message for each group
mail_compose.send_mail(
cr, user_raoul.id, [compose_id], context={
'default_res_id': -1,
'active_ids': [self.group_pigs_id, group_bird_id]
})
group_pigs.refresh()
group_bird.refresh()
message1 = group_pigs.message_ids[0]
message2 = group_bird.message_ids[0]
# Test: Pigs and Bird did receive their message
test_msg_ids = self.mail_message.search(cr, uid, [], limit=2)
self.assertIn(message1.id, test_msg_ids, 'compose wizard: Pigs did not receive its mass mailing message')
self.assertIn(message2.id, test_msg_ids, 'compose wizard: Bird did not receive its mass mailing message')
def test_30_needaction(self):
""" Tests for mail.message needaction. """
cr, uid, user_admin, user_raoul, group_pigs = self.cr, self.uid, self.user_admin, self.user_raoul, self.group_pigs
na_admin_base = self.mail_message._needaction_count(cr, uid, domain=[])
na_demo_base = self.mail_message._needaction_count(cr, user_raoul.id, domain=[])
# Test: number of unread notification = needaction on mail.message
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_admin.partner_id.id),
('is_read', '=', False)
])
na_count = self.mail_message._needaction_count(cr, uid, domain=[])
self.assertEqual(len(notif_ids), na_count, 'unread notifications count does not match needaction count')
# Do: post 2 message on group_pigs as admin, 3 messages as demo user
for dummy in range(2):
group_pigs.message_post(body='My Body', subtype='mt_comment')
raoul_pigs = group_pigs.sudo(user_raoul)
for dummy in range(3):
raoul_pigs.message_post(body='My Demo Body', subtype='mt_comment')
# Test: admin has 3 new notifications (from demo), and 3 new needaction
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_admin.partner_id.id),
('is_read', '=', False)
])
self.assertEqual(len(notif_ids), na_admin_base + 3, 'Admin should have 3 new unread notifications')
na_admin = self.mail_message._needaction_count(cr, uid, domain=[])
na_admin_group = self.mail_message._needaction_count(cr, uid, domain=[('model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id)])
self.assertEqual(na_admin, na_admin_base + 3, 'Admin should have 3 new needaction')
self.assertEqual(na_admin_group, 3, 'Admin should have 3 needaction related to Pigs')
# Test: demo has 0 new notifications (not a follower, not receiving its own messages), and 0 new needaction
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_raoul.partner_id.id),
('is_read', '=', False)
])
self.assertEqual(len(notif_ids), na_demo_base + 0, 'Demo should have 0 new unread notifications')
na_demo = self.mail_message._needaction_count(cr, user_raoul.id, domain=[])
na_demo_group = self.mail_message._needaction_count(cr, user_raoul.id, domain=[('model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id)])
self.assertEqual(na_demo, na_demo_base + 0, 'Demo should have 0 new needaction')
self.assertEqual(na_demo_group, 0, 'Demo should have 0 needaction related to Pigs')
def test_40_track_field(self):
""" Testing auto tracking of fields. """
def _strip_string_spaces(body):
return body.replace(' ', '').replace('\n', '')
# Data: subscribe Raoul to Pigs, because he will change the public attribute and may loose access to the record
cr, uid = self.cr, self.uid
self.mail_group.message_subscribe_users(cr, uid, [self.group_pigs_id], [self.user_raoul_id])
# Data: res.users.group, to test group_public_id automatic logging
group_system_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_system')
group_system_id = group_system_ref and group_system_ref[1] or False
# Data: custom subtypes
mt_private_id = self.mail_message_subtype.create(cr, uid, {'name': 'private', 'description': 'Private public'})
self.ir_model_data.create(cr, uid, {'name': 'mt_private', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_private_id})
mt_name_supername_id = self.mail_message_subtype.create(cr, uid, {'name': 'name_supername', 'description': 'Supername name'})
self.ir_model_data.create(cr, uid, {'name': 'mt_name_supername', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_name_supername_id})
mt_group_public_set_id = self.mail_message_subtype.create(cr, uid, {'name': 'group_public_set', 'description': 'Group set'})
self.ir_model_data.create(cr, uid, {'name': 'mt_group_public_set', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_group_public_set_id})
mt_group_public_id = self.mail_message_subtype.create(cr, uid, {'name': 'group_public', 'description': 'Group changed'})
self.ir_model_data.create(cr, uid, {'name': 'mt_group_public', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_group_public_id})
# Data: alter mail_group model for testing purposes (test on classic, selection and many2one fields)
cls = type(self.mail_group)
self.assertNotIn('_track', cls.__dict__)
cls._track = {
'public': {
'mail.mt_private': lambda self, cr, uid, obj, ctx=None: obj.public == 'private',
},
'name': {
'mail.mt_name_supername': lambda self, cr, uid, obj, ctx=None: obj.name == 'supername',
},
'group_public_id': {
'mail.mt_group_public_set': lambda self, cr, uid, obj, ctx=None: obj.group_public_id,
'mail.mt_group_public': lambda self, cr, uid, obj, ctx=None: True,
},
}
visibility = {'public': 'onchange', 'name': 'always', 'group_public_id': 'onchange'}
for key in visibility:
self.assertFalse(hasattr(getattr(cls, key), 'track_visibility'))
getattr(cls, key).track_visibility = visibility[key]
@self.addCleanup
def cleanup():
delattr(cls, '_track')
for key in visibility:
del getattr(cls, key).track_visibility
# Test: change name -> always tracked, not related to a subtype
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'public': 'public'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 1, 'tracked: a message should have been produced')
# Test: first produced message: no subtype, name change tracked
last_msg = self.group_pigs.message_ids[-1]
self.assertFalse(last_msg.subtype_id, 'tracked: message should not have been linked to a subtype')
self.assertIn(u'SelectedGroupOnly\u2192Public', _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
self.assertIn('Pigs', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change name as supername, public as private -> 2 subtypes
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'name': 'supername', 'public': 'private'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 3, 'tracked: two messages should have been produced')
# Test: first produced message: mt_name_supername
last_msg = self.group_pigs.message_ids[-2]
self.assertEqual(last_msg.subtype_id.id, mt_private_id, 'tracked: message should be linked to mt_private subtype')
self.assertIn('Private public', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Pigs\u2192supername', _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
# Test: second produced message: mt_name_supername
last_msg = self.group_pigs.message_ids[-3]
self.assertEqual(last_msg.subtype_id.id, mt_name_supername_id, 'tracked: message should be linked to mt_name_supername subtype')
self.assertIn('Supername name', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Public\u2192Private', _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
self.assertIn(u'Pigs\u2192supername', _strip_string_spaces(last_msg.body), 'tracked feature: message body does not hold always tracked field')
# Test: change public as public, group_public_id -> 2 subtypes, name always tracked
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'public': 'public', 'group_public_id': group_system_id})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 5, 'tracked: one message should have been produced')
# Test: first produced message: mt_group_public_set_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-4]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_set_id, 'tracked: message should be linked to mt_group_public_set_id')
self.assertIn('Group set', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Private\u2192Public', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold changed tracked field')
self.assertIn(u'HumanResources/Employee\u2192Administration/Settings', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: second produced message: mt_group_public_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-5]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_id, 'tracked: message should be linked to mt_group_public_id')
self.assertIn('Group changed', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Private\u2192Public', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold changed tracked field')
self.assertIn(u'HumanResources/Employee\u2192Administration/Settings', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change group_public_id to False -> 1 subtype, name always tracked
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'group_public_id': False})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 6, 'tracked: one message should have been produced')
# Test: first produced message: mt_group_public_set_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-6]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_id, 'tracked: message should be linked to mt_group_public_id')
self.assertIn('Group changed', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Administration/Settings\u2192', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change not tracked field, no tracking message
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'description': 'Dummy'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 6, 'tracked: No message should have been produced')
| agpl-3.0 | 518,284,203,778,857,500 | 64.108672 | 175 | 0.612924 | false |
jbaiter/plugin.video.brmediathek | resources/lib/xbmcswift2/__init__.py | 2 | 2442 | '''
xbmcswift2
----------
A micro framework to enable rapid development of XBMC plugins.
:copyright: (c) 2012 by Jonathan Beluch
:license: GPLv3, see LICENSE for more details.
'''
from types import ModuleType
class module(ModuleType):
'''A wrapper class for a module used to override __getattr__. This class
will behave normally for any existing module attributes. For any attributes
which do not existi in in the wrapped module, a mock function will be
returned. This function will also return itself enabling multiple mock
function calls.
'''
def __init__(self, wrapped=None):
self.wrapped = wrapped
if wrapped:
self.__dict__.update(wrapped.__dict__)
def __getattr__(self, name):
'''Returns any existing attr for the wrapped module or returns a mock
function for anything else. Never raises an AttributeError.
'''
try:
return getattr(self.wrapped, name)
except AttributeError:
def func(*args, **kwargs):
'''A mock function which returns itself, enabling chainable
function calls.
'''
log.warning('The %s method has not been implented on the CLI. '
'Your code might not work properly when calling '
'it.', name)
return self
return func
try:
import xbmc
import xbmcgui
import xbmcplugin
import xbmcaddon
import xbmcvfs
CLI_MODE = False
except ImportError:
CLI_MODE = True
import sys
from logger import log
# Mock the XBMC modules
from mockxbmc import xbmc, xbmcgui, xbmcplugin, xbmcaddon, xbmcvfs
xbmc = module(xbmc)
xbmcgui = module(xbmcgui)
xbmcplugin = module(xbmcplugin)
xbmcaddon = module(xbmcaddon)
xbmcvfs = module(xbmcvfs)
from xbmcswift2.storage import TimedStorage
from xbmcswift2.request import Request
from xbmcswift2.common import (xbmc_url, enum, clean_dict, pickle_dict,
unpickle_args, unpickle_dict, download_page, unhex)
from xbmcswift2.constants import SortMethod, VIEW_MODES
from xbmcswift2.listitem import ListItem
from xbmcswift2.logger import setup_log
from xbmcswift2.module import Module
from xbmcswift2.urls import AmbiguousUrlException, NotFoundException, UrlRule
from xbmcswift2.xbmcmixin import XBMCMixin
from xbmcswift2.plugin import Plugin
| gpl-3.0 | -3,516,369,758,686,775,000 | 31.131579 | 79 | 0.667895 | false |
jsirois/commons | src/python/twitter/checkstyle/plugins/missing_contextmanager.py | 14 | 1726 | # ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
# TODO(wickman)
#
# 1. open(foo) should always be done in a with context.
#
# 2. if you see acquire/release on the same variable in a particular ast
# body, warn about context manager use.
import ast
from ..common import CheckstylePlugin
class MissingContextManager(CheckstylePlugin):
"""Recommend the use of contextmanagers when it seems appropriate."""
def nits(self):
with_contexts = set(self.iter_ast_types(ast.With))
with_context_calls = set(node.context_expr for node in with_contexts
if isinstance(node.context_expr, ast.Call))
for call in self.iter_ast_types(ast.Call):
if isinstance(call.func, ast.Name) and call.func.id == 'open' and (
call not in with_context_calls):
yield self.warning('T802', 'open() calls should be made within a contextmanager.', call)
| apache-2.0 | -4,280,281,676,137,923,600 | 42.15 | 100 | 0.60197 | false |
eandersson/amqp-storm | amqpstorm/exception.py | 2 | 4462 | """AMQPStorm Exception."""
AMQP_ERROR_MAPPING = {
311: ('CONTENT-TOO-LARGE',
'The client attempted to transfer content larger than the '
'server could accept at the present time. The client may '
'retry at a later time.'),
312: ('NO-ROUTE', 'Undocumented AMQP Soft Error'),
313: ('NO-CONSUMERS',
'When the exchange cannot deliver to a consumer when the '
'immediate flag is set. As a result of pending data on '
'the queue or the absence of any consumers of the queue.'),
320: ('CONNECTION-FORCED',
'An operator intervened to close the connection for some reason. '
'The client may retry at some later date.'),
402: ('INVALID-PATH',
'The client tried to work with an unknown virtual host.'),
403: ('ACCESS-REFUSED',
'The client attempted to work with a server entity to which '
'has no access due to security settings.'),
404: ('NOT-FOUND',
'The client attempted to work with a server '
'entity that does not exist.'),
405: ('RESOURCE-LOCKED',
'The client attempted to work with a server entity to which it '
'has no access because another client is working with it.'),
406: ('PRECONDITION-FAILED',
'The client requested a method that was not '
'allowed because some precondition failed.'),
501: ('FRAME-ERROR',
'The sender sent a malformed frame that the recipient could '
'not decode. This strongly implies a programming error in '
'the sending peer.'),
502: ('SYNTAX-ERROR',
'The sender sent a frame that contained illegal values for '
'one or more fields. This strongly implies a programming '
'error in the sending peer.'),
503: ('COMMAND-INVALID',
'The client sent an invalid sequence of frames, attempting to '
'perform an operation that was considered invalid by the server. '
'This usually implies a programming error in the client.'),
504: ('CHANNEL-ERROR',
'The client attempted to work with a channel that had not '
'been correctly opened. This most likely indicates a '
'fault in the client layer.'),
505: ('UNEXPECTED-FRAME',
'The peer sent a frame that was not expected, usually in the '
'context of a content header and body. This strongly '
'indicates a fault in the peer\'s content processing.'),
506: ('RESOURCE-ERROR',
'The server could not complete the method because it lacked '
'sufficient resources. This may be due to the client '
'creating too many of some type of entity.'),
530: ('NOT-ALLOWED',
'The client tried to work with some entity in a manner '
'that is prohibited by the server, due to security '
'settings or by some other criteria.'),
540: ('NOT-IMPLEMENTED',
'The client tried to use functionality that is '
'notimplemented in the server.'),
541: ('INTERNAL-ERROR',
'The server could not complete the method because of an '
'internal error. The server may require intervention by '
'an operator in order to resume normal operations.')
}
class AMQPError(IOError):
"""General AMQP Error"""
_documentation = None
_error_code = None
_error_type = None
@property
def documentation(self):
"""AMQP Documentation string."""
return self._documentation or bytes()
@property
def error_code(self):
"""AMQP Error Code - A 3-digit reply code."""
return self._error_code
@property
def error_type(self):
"""AMQP Error Type e.g. NOT-FOUND."""
return self._error_type
def __init__(self, *args, **kwargs):
self._error_code = kwargs.pop('reply_code', None)
super(AMQPError, self).__init__(*args, **kwargs)
if self._error_code not in AMQP_ERROR_MAPPING:
return
self._error_type = AMQP_ERROR_MAPPING[self._error_code][0]
self._documentation = AMQP_ERROR_MAPPING[self._error_code][1]
class AMQPConnectionError(AMQPError):
"""AMQP Connection Error"""
pass
class AMQPChannelError(AMQPError):
"""AMQP Channel Error"""
pass
class AMQPMessageError(AMQPChannelError):
"""AMQP Message Error"""
pass
class AMQPInvalidArgument(AMQPError):
"""AMQP Argument Error"""
| mit | 6,412,930,167,972,811,000 | 38.140351 | 76 | 0.625504 | false |
hyperNURb/ggrc-core | src/ggrc_workflows/services/workflow_cycle_calculator/annually_cycle_calculator.py | 5 | 2706 | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
import datetime
from dateutil import relativedelta
from cycle_calculator import CycleCalculator
class AnnuallyCycleCalculator(CycleCalculator):
"""CycleCalculator implementation for annual workflows.
Month domain is 1-12, date domain is 1-31.
"""
time_delta = relativedelta.relativedelta(years=1)
date_domain = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
month_domain = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}
def __init__(self, workflow, base_date=None):
super(AnnuallyCycleCalculator, self).__init__(workflow)
base_date = self.get_base_date(base_date)
self.reified_tasks = {}
for task in self.tasks:
start_date, end_date = self.non_adjusted_task_date_range(
task, base_date, initialisation=True)
self.reified_tasks[task.id] = {
'start_date': start_date,
'end_date': end_date,
'relative_start': (task.relative_start_month, task.relative_start_day),
'relative_end': (task.relative_end_month, task.relative_end_day)
}
def relative_day_to_date(self, relative_day, relative_month=None,
base_date=None):
"""Converts an annual relative day representation to concrete date object
First we ensure that we have both relative_day and relative_month or,
alternatively, that relative_day carries month information as well.
While task_date_range calls with explicit relative_month, reified_tasks
stores relative days as MM/DD and we must first convert these values so
that it can sort and get min and max values for tasks.
Afterwards we repeat the math similar to monthly cycle calculator and
ensure that the day is not overflowing to the next month.
"""
today = datetime.date.today()
relative_day = int(relative_day)
relative_month = int(relative_month)
if not relative_day in AnnuallyCycleCalculator.date_domain:
raise ValueError
if not relative_month in AnnuallyCycleCalculator.month_domain:
raise ValueError
base_date = self.get_base_date(base_date)
start_month = datetime.date(base_date.year, relative_month, 1)
ddate = start_month + relativedelta.relativedelta(days=relative_day - 1)
# We want to go up to the end of the month and not over
if ddate.month != start_month.month:
ddate = ddate - relativedelta.relativedelta(days=ddate.day)
return ddate
| apache-2.0 | -5,756,818,069,688,133,000 | 37.657143 | 79 | 0.691057 | false |
goodwinnk/intellij-community | python/helpers/python-skeletons/nose/tools/__init__.py | 80 | 5457 | """Skeleton for 'nose.tools' module.
Project: nose 1.3 <https://nose.readthedocs.org/>
Skeleton by: Andrey Vlasovskikh <[email protected]>
"""
import sys
def assert_equal(first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '==' operator.
"""
pass
def assert_not_equal(first, second, msg=None):
"""Fail if the two objects are equal as determined by the '==' operator.
"""
pass
def assert_true(expr, msg=None):
"""Check that the expression is true."""
pass
def assert_false(expr, msg=None):
"""Check that the expression is false."""
pass
if sys.version_info >= (2, 7):
def assert_is(expr1, expr2, msg=None):
"""Just like assert_true(a is b), but with a nicer default message."""
pass
def assert_is_not(expr1, expr2, msg=None):
"""Just like assert_true(a is not b), but with a nicer default message.
"""
pass
def assert_is_none(obj, msg=None):
"""Same as assert_true(obj is None), with a nicer default message.
"""
pass
def assert_is_not_none(obj, msg=None):
"""Included for symmetry with assert_is_none."""
pass
def assert_in(member, container, msg=None):
"""Just like assert_true(a in b), but with a nicer default message."""
pass
def assert_not_in(member, container, msg=None):
"""Just like assert_true(a not in b), but with a nicer default message.
"""
pass
def assert_is_instance(obj, cls, msg=None):
"""Same as assert_true(isinstance(obj, cls)), with a nicer default
message.
"""
pass
def assert_not_is_instance(obj, cls, msg=None):
"""Included for symmetry with assert_is_instance."""
pass
def assert_raises(excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown by callableObj when
invoked with arguments args and keyword arguments kwargs.
If called with callableObj omitted or None, will return a
context object used like this::
with assert_raises(SomeException):
do_something()
:rtype: unittest.case._AssertRaisesContext | None
"""
pass
if sys.version_info >= (2, 7):
def assert_raises_regexp(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp.
:rtype: unittest.case._AssertRaisesContext | None
"""
pass
def assert_almost_equal(first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are unequal as determined by their difference
rounded to the given number of decimal places (default 7) and comparing to
zero, or by comparing that the between the two objects is more than the
given delta.
"""
pass
def assert_not_almost_equal(first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are equal as determined by their difference
rounded to the given number of decimal places (default 7) and comparing to
zero, or by comparing that the between the two objects is less than the
given delta.
"""
pass
if sys.version_info >= (2, 7):
def assert_greater(a, b, msg=None):
"""Just like assert_true(a > b), but with a nicer default message."""
pass
def assert_greater_equal(a, b, msg=None):
"""Just like assert_true(a >= b), but with a nicer default message."""
pass
def assert_less(a, b, msg=None):
"""Just like assert_true(a < b), but with a nicer default message."""
pass
def assert_less_equal(a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default
message.
"""
pass
def assert_regexp_matches(text, expected_regexp, msg=None):
"""Fail the test unless the text matches the regular expression."""
pass
def assert_not_regexp_matches(text, unexpected_regexp, msg=None):
"""Fail the test if the text matches the regular expression."""
pass
def assert_items_equal(expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison. It asserts that
actual_seq and expected_seq have the same element counts.
"""
pass
def assert_dict_contains_subset(expected, actual, msg=None):
"""Checks whether actual is a superset of expected."""
pass
def assert_multi_line_equal(first, second, msg=None):
"""Assert that two multi-line strings are equal."""
pass
def assert_sequence_equal(seq1, seq2, msg=None, seq_type=None):
"""An equality assertion for ordered sequences (like lists and tuples).
"""
pass
def assert_list_equal(list1, list2, msg=None):
"""A list-specific equality assertion."""
pass
def assert_tuple_equal(tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion."""
pass
def assert_set_equal(set1, set2, msg=None):
"""A set-specific equality assertion."""
pass
def assert_dict_equal(d1, d2, msg=None):
"""A dict-specific equality assertion."""
pass
assert_equals = assert_equal
assert_not_equals = assert_not_equal
assert_almost_equals = assert_almost_equal
assert_not_almost_equals = assert_not_almost_equal
| apache-2.0 | 6,184,661,431,717,149,000 | 29.149171 | 79 | 0.634415 | false |
rabipanda/tensorflow | tensorflow/python/training/training_ops.py | 131 | 1046 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for training ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import gen_training_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.training.gen_training_ops import *
# pylint: enable=wildcard-import
| apache-2.0 | 4,086,360,359,569,015,000 | 39.230769 | 80 | 0.711281 | false |
tacrow/tacrow | node_modules/node-gyp/gyp/pylib/gyp/generator/android.py | 960 | 45344 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
import subprocess
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
# Map of android build system variables to set.
'aosp_build_settings',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
ALL_MODULES_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all, write_alias_target, sdk_version):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for
this target
sdk_version: what to emit for LOCAL_SDK_VERSION in output
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)')
elif sdk_version > 0:
self.WriteLn('LOCAL_MODULE_TARGET_ARCH := '
'$(TARGET_$(GYP_VAR_PREFIX)ARCH)')
self.WriteLn('LOCAL_SDK_VERSION := %s' % sdk_version)
# Grab output directories; needed for Actions and Rules.
if self.toolset == 'host':
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))')
else:
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all,
write_alias_target)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
# Don't allow spaces in input/output filenames, but make an exception for
# filenames which start with '$(' since it's okay for there to be spaces
# inside of make function/macro invocations.
for input in inputs:
if not input.startswith('$(') and ' ' in input:
raise gyp.common.GypError(
'Action input filename "%s" in target %s contains a space' %
(input, self.target))
for output in outputs:
if not output.startswith('$(') and ' ' in output:
raise gyp.common.GypError(
'Action output filename "%s" in target %s contains a space' %
(output, self.target))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# See explanation in WriteActions.
self.WriteLn('%s: export PATH := '
'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (output, main_output))
self.WriteLn()
self.WriteLn()
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -rpf $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
for configname, config in sorted(configs.iteritems()):
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags', []) + config.get('cflags_c', []))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS_%s' % configname)
self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname,
prefix='-D', quoter=make.EscapeCppDefine)
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname)
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) '
'$(MY_DEFS_$(GYP_CONFIGURATION))')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host
# or target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))')
self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))')
# Android uses separate flags for assembly file invocations, but gyp expects
# the same CFLAGS to be applied:
self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp'
else:
suffix = '_gyp'
if self.path:
middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target))
else:
middle = make.StringToMakefileVariable(self.target)
return ''.join([prefix, middle, suffix])
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = ('$(call intermediates-dir-for,%s,%s,true,,'
'$(GYP_HOST_VAR_PREFIX))' % (self.android_class,
self.android_module))
else:
path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))'
% (self.android_class, self.android_module))
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def FilterLibraries(self, libraries):
"""Filter the 'libraries' key to separate things that shouldn't be ldflags.
Library entries that look like filenames should be converted to android
module names instead of being passed to the linker as flags.
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules, ldflags)
"""
static_lib_modules = []
dynamic_lib_modules = []
ldflags = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
if lib.startswith('-l'):
ldflags.append(lib)
return (static_lib_modules, dynamic_lib_modules, ldflags)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
# Libraries (i.e. -lfoo)
# These must be included even for static libraries as some of them provide
# implicit include paths through the build system.
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs, ldflags_libs = self.FilterLibraries(libraries)
if self.type != 'static_library':
for configname, config in sorted(configs.iteritems()):
ldflags = list(config.get('ldflags', []))
self.WriteLn('')
self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname)
self.WriteList(ldflags_libs, 'LOCAL_GYP_LIBS')
self.WriteLn('LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION)) '
'$(LOCAL_GYP_LIBS)')
# Link dependencies (i.e. other gyp targets this target depends on)
# These need not be included for static libraries as within the gyp build
# we do not use the implicit include path mechanism.
if self.type != 'static_library':
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
else:
static_link_deps = []
shared_link_deps = []
# Only write the lists if they are non-empty.
if static_libs or static_link_deps:
self.WriteLn('')
self.WriteList(static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
if dynamic_libs or shared_link_deps:
self.WriteLn('')
self.WriteList(dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all,
write_alias_target):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for this
target
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
settings = spec.get('aosp_build_settings', {})
if settings:
self.WriteLn('### Set directly by aosp_build_settings.')
for k, v in settings.iteritems():
if isinstance(v, list):
self.WriteList(v, k)
else:
self.WriteLn('%s := %s' % (k, make.QuoteIfNecessary(v)))
self.WriteLn('')
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all and write_alias_target:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module and write_alias_target:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
self.WriteLn('LOCAL_CXX_STL := libc++_static')
# Executables are for build and test purposes only, so they're installed
# to a directory that doesn't get included in the system image.
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
if self.toolset == 'target':
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)')
else:
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
self.WriteLn()
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return os.path.normpath(path)
def PerformBuild(data, configurations, params):
# The android backend only supports the default configuration.
options = params['options']
makefile = os.path.abspath(os.path.join(options.toplevel_dir,
'GypAndroid.mk'))
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules']
print 'Building: %s' % arguments
subprocess.check_call(arguments, env=env)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
write_alias_targets = generator_flags.get('write_alias_targets', True)
sdk_version = generator_flags.get('aosp_sdk_version', 0)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid' + options.suffix + '.mk'
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
relative_build_file = gyp.common.RelativePath(build_file,
options.toplevel_dir)
build_files.add(relative_build_file)
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = qualified_target in needed_targets
if limit_to_target_all and not part_of_all:
continue
relative_target = gyp.common.QualifiedTarget(relative_build_file, target,
toolset)
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, relative_target, base_path,
output_file, spec, configs,
part_of_all=part_of_all,
write_alias_target=write_alias_targets,
sdk_version=sdk_version)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration)
root_makefile.write('GYP_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_MULTILIB ?= first\n')
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
if write_alias_targets:
root_makefile.write(ALL_MODULES_FOOTER)
root_makefile.close()
| mit | -1,713,747,280,117,781,500 | 40.410046 | 80 | 0.640371 | false |
fxfitz/ansible | lib/ansible/modules/cloud/google/gcp_compute_backend_service.py | 14 | 36795 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_backend_service
description:
- Creates a BackendService resource in the specified project using the data included
in the request.
short_description: Creates a GCP BackendService
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices: ['present', 'absent']
default: 'present'
affinity_cookie_ttl_sec:
description:
- Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set to
0, the cookie is non-persistent and lasts only until the end of the browser session
(or equivalent). The maximum allowed value for TTL is one day.
- When the load balancing scheme is INTERNAL, this field is not used.
required: false
backends:
description:
- The list of backends that serve this BackendService.
required: false
suboptions:
balancing_mode:
description:
- Specifies the balancing mode for this backend.
- For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. Valid
values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL).
- This cannot be used for internal load balancing.
required: false
choices: ['UTILIZATION', 'RATE', 'CONNECTION']
capacity_scaler:
description:
- A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION,
RATE or CONNECTION).
- Default value is 1, which means the group will serve up to 100% of its configured
capacity (depending on balancingMode). A setting of 0 means the group is completely
drained, offering 0% of its available Capacity. Valid range is [0.0,1.0].
- This cannot be used for internal load balancing.
required: false
description:
description:
- An optional description of this resource.
- Provide this property when you create the resource.
required: false
group:
description:
- A reference to InstanceGroup resource.
required: false
max_connections:
description:
- The max number of simultaneous connections for the group. Can be used with either
CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be
set.
- This cannot be used for internal load balancing.
required: false
max_connections_per_instance:
description:
- The max number of simultaneous connections that a single backend instance can handle.
This is used to calculate the capacity of the group. Can be used in either CONNECTION
or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be
set.
- This cannot be used for internal load balancing.
required: false
max_rate:
description:
- The max requests per second (RPS) of the group.
- Can be used with either RATE or UTILIZATION balancing modes, but required if RATE
mode. For RATE mode, either maxRate or maxRatePerInstance must be set.
- This cannot be used for internal load balancing.
required: false
max_rate_per_instance:
description:
- The max requests per second (RPS) that a single backend instance can handle. This
is used to calculate the capacity of the group. Can be used in either balancing
mode. For RATE mode, either maxRate or maxRatePerInstance must be set.
- This cannot be used for internal load balancing.
required: false
max_utilization:
description:
- Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization target
for the group. The default is 0.8. Valid range is [0.0, 1.0].
- This cannot be used for internal load balancing.
required: false
cdn_policy:
description:
- Cloud CDN configuration for this BackendService.
required: false
suboptions:
cache_key_policy:
description:
- The CacheKeyPolicy for this CdnPolicy.
required: false
suboptions:
include_host:
description:
- If true requests to different hosts will be cached separately.
required: false
type: bool
include_protocol:
description:
- If true, http and https requests will be cached separately.
required: false
type: bool
include_query_string:
description:
- If true, include query string parameters in the cache key according to query_string_whitelist
and query_string_blacklist. If neither is set, the entire query string will be included.
- If false, the query string will be excluded from the cache key entirely.
required: false
type: bool
query_string_blacklist:
description:
- Names of query string parameters to exclude in cache keys.
- All other parameters will be included. Either specify query_string_whitelist or
query_string_blacklist, not both.
- "'&' and '=' will be percent encoded and not treated as delimiters."
required: false
query_string_whitelist:
description:
- Names of query string parameters to include in cache keys.
- All other parameters will be excluded. Either specify query_string_whitelist or
query_string_blacklist, not both.
- "'&' and '=' will be percent encoded and not treated as delimiters."
required: false
connection_draining:
description:
- Settings for connection draining.
required: false
suboptions:
draining_timeout_sec:
description:
- Time for which instance will be drained (not accept new connections, but still work
to finish started).
required: false
description:
description:
- An optional description of this resource.
required: false
enable_cdn:
description:
- If true, enable Cloud CDN for this BackendService.
- When the load balancing scheme is INTERNAL, this field is not used.
required: false
type: bool
health_checks:
description:
- The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health
checking this BackendService. Currently at most one health check can be specified,
and a health check is required.
- For internal load balancing, a URL to a HealthCheck resource must be specified instead.
required: false
name:
description:
- Name of the resource. Provided by the client when the resource is created. The name
must be 1-63 characters long, and comply with RFC1035. Specifically, the name must
be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot
be a dash.
required: false
port_name:
description:
- Name of backend port. The same name should appear in the instance groups referenced
by this service. Required when the load balancing scheme is EXTERNAL.
- When the load balancing scheme is INTERNAL, this field is not used.
required: false
protocol:
description:
- The protocol this BackendService uses to communicate with backends.
- Possible values are HTTP, HTTPS, TCP, and SSL. The default is HTTP.
- For internal load balancing, the possible values are TCP and UDP, and the default
is TCP.
required: false
choices: ['HTTP', 'HTTPS', 'TCP', 'SSL']
region:
description:
- A reference to Region resource.
required: false
session_affinity:
description:
- Type of session affinity to use. The default is NONE.
- When the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, or GENERATED_COOKIE.
- When the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, CLIENT_IP_PROTO,
or CLIENT_IP_PORT_PROTO.
- When the protocol is UDP, this field is not used.
required: false
choices: ['NONE', 'CLIENT_IP', 'GENERATED_COOKIE', 'CLIENT_IP_PROTO', 'CLIENT_IP_PORT_PROTO']
timeout_sec:
description:
- How many seconds to wait for the backend before considering it a failed request.
Default is 30 seconds. Valid range is [1, 86400].
required: false
aliases: [timeout_seconds]
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a instance group
gcp_compute_instance_group:
name: 'instancegroup-backendservice'
zone: 'us-central1-a'
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
scopes:
- https://www.googleapis.com/auth/compute
state: present
register: instancegroup
- name: create a http health check
gcp_compute_http_health_check:
name: 'httphealthcheck-backendservice'
healthy_threshold: 10
port: 8080
timeout_sec: 2
unhealthy_threshold: 5
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
scopes:
- https://www.googleapis.com/auth/compute
state: present
register: healthcheck
- name: create a backend service
gcp_compute_backend_service:
name: testObject
backends:
- group: "{{ instancegroup }}"
health_checks:
- "{{ healthcheck.selfLink }}"
enable_cdn: true
project: testProject
auth_kind: service_account
service_account_file: /tmp/auth.pem
scopes:
- https://www.googleapis.com/auth/compute
state: present
'''
RETURN = '''
affinity_cookie_ttl_sec:
description:
- Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set to
0, the cookie is non-persistent and lasts only until the end of the browser session
(or equivalent). The maximum allowed value for TTL is one day.
- When the load balancing scheme is INTERNAL, this field is not used.
returned: success
type: int
backends:
description:
- The list of backends that serve this BackendService.
returned: success
type: complex
contains:
balancing_mode:
description:
- Specifies the balancing mode for this backend.
- For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION. Valid
values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL).
- This cannot be used for internal load balancing.
returned: success
type: str
capacity_scaler:
description:
- A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION,
RATE or CONNECTION).
- Default value is 1, which means the group will serve up to 100% of its configured
capacity (depending on balancingMode). A setting of 0 means the group is completely
drained, offering 0% of its available Capacity. Valid range is [0.0,1.0].
- This cannot be used for internal load balancing.
returned: success
type: str
description:
description:
- An optional description of this resource.
- Provide this property when you create the resource.
returned: success
type: str
group:
description:
- A reference to InstanceGroup resource.
returned: success
type: dict
max_connections:
description:
- The max number of simultaneous connections for the group. Can be used with either
CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be
set.
- This cannot be used for internal load balancing.
returned: success
type: int
max_connections_per_instance:
description:
- The max number of simultaneous connections that a single backend instance can handle.
This is used to calculate the capacity of the group. Can be used in either CONNECTION
or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be
set.
- This cannot be used for internal load balancing.
returned: success
type: int
max_rate:
description:
- The max requests per second (RPS) of the group.
- Can be used with either RATE or UTILIZATION balancing modes, but required if RATE
mode. For RATE mode, either maxRate or maxRatePerInstance must be set.
- This cannot be used for internal load balancing.
returned: success
type: int
max_rate_per_instance:
description:
- The max requests per second (RPS) that a single backend instance can handle. This
is used to calculate the capacity of the group. Can be used in either balancing
mode. For RATE mode, either maxRate or maxRatePerInstance must be set.
- This cannot be used for internal load balancing.
returned: success
type: str
max_utilization:
description:
- Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization target
for the group. The default is 0.8. Valid range is [0.0, 1.0].
- This cannot be used for internal load balancing.
returned: success
type: str
cdn_policy:
description:
- Cloud CDN configuration for this BackendService.
returned: success
type: complex
contains:
cache_key_policy:
description:
- The CacheKeyPolicy for this CdnPolicy.
returned: success
type: complex
contains:
include_host:
description:
- If true requests to different hosts will be cached separately.
returned: success
type: bool
include_protocol:
description:
- If true, http and https requests will be cached separately.
returned: success
type: bool
include_query_string:
description:
- If true, include query string parameters in the cache key according to query_string_whitelist
and query_string_blacklist. If neither is set, the entire query string will be included.
- If false, the query string will be excluded from the cache key entirely.
returned: success
type: bool
query_string_blacklist:
description:
- Names of query string parameters to exclude in cache keys.
- All other parameters will be included. Either specify query_string_whitelist or
query_string_blacklist, not both.
- "'&' and '=' will be percent encoded and not treated as delimiters."
returned: success
type: list
query_string_whitelist:
description:
- Names of query string parameters to include in cache keys.
- All other parameters will be excluded. Either specify query_string_whitelist or
query_string_blacklist, not both.
- "'&' and '=' will be percent encoded and not treated as delimiters."
returned: success
type: list
connection_draining:
description:
- Settings for connection draining.
returned: success
type: complex
contains:
draining_timeout_sec:
description:
- Time for which instance will be drained (not accept new connections, but still work
to finish started).
returned: success
type: int
creation_timestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
enable_cdn:
description:
- If true, enable Cloud CDN for this BackendService.
- When the load balancing scheme is INTERNAL, this field is not used.
returned: success
type: bool
health_checks:
description:
- The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health
checking this BackendService. Currently at most one health check can be specified,
and a health check is required.
- For internal load balancing, a URL to a HealthCheck resource must be specified instead.
returned: success
type: list
id:
description:
- The unique identifier for the resource.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The name
must be 1-63 characters long, and comply with RFC1035. Specifically, the name must
be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot
be a dash.
returned: success
type: str
port_name:
description:
- Name of backend port. The same name should appear in the instance groups referenced
by this service. Required when the load balancing scheme is EXTERNAL.
- When the load balancing scheme is INTERNAL, this field is not used.
returned: success
type: str
protocol:
description:
- The protocol this BackendService uses to communicate with backends.
- Possible values are HTTP, HTTPS, TCP, and SSL. The default is HTTP.
- For internal load balancing, the possible values are TCP and UDP, and the default
is TCP.
returned: success
type: str
region:
description:
- A reference to Region resource.
returned: success
type: str
session_affinity:
description:
- Type of session affinity to use. The default is NONE.
- When the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, or GENERATED_COOKIE.
- When the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, CLIENT_IP_PROTO,
or CLIENT_IP_PORT_PROTO.
- When the protocol is UDP, this field is not used.
returned: success
type: str
timeout_sec:
description:
- How many seconds to wait for the backend before considering it a failed request.
Default is 30 seconds. Valid range is [1, 86400].
returned: success
type: int
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import re
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
affinity_cookie_ttl_sec=dict(type='int'),
backends=dict(type='list', elements='dict', options=dict(
balancing_mode=dict(type='str', choices=['UTILIZATION', 'RATE', 'CONNECTION']),
capacity_scaler=dict(type='str'),
description=dict(type='str'),
group=dict(type='dict'),
max_connections=dict(type='int'),
max_connections_per_instance=dict(type='int'),
max_rate=dict(type='int'),
max_rate_per_instance=dict(type='str'),
max_utilization=dict(type='str')
)),
cdn_policy=dict(type='dict', options=dict(
cache_key_policy=dict(type='dict', options=dict(
include_host=dict(type='bool'),
include_protocol=dict(type='bool'),
include_query_string=dict(type='bool'),
query_string_blacklist=dict(type='list', elements='str'),
query_string_whitelist=dict(type='list', elements='str')
))
)),
connection_draining=dict(type='dict', options=dict(
draining_timeout_sec=dict(type='int')
)),
description=dict(type='str'),
enable_cdn=dict(type='bool'),
health_checks=dict(type='list', elements='str'),
name=dict(type='str'),
port_name=dict(type='str'),
protocol=dict(type='str', choices=['HTTP', 'HTTPS', 'TCP', 'SSL']),
region=dict(type='str'),
session_affinity=dict(type='str', choices=['NONE', 'CLIENT_IP', 'GENERATED_COOKIE', 'CLIENT_IP_PROTO', 'CLIENT_IP_PORT_PROTO']),
timeout_sec=dict(type='int', aliases=['timeout_seconds'])
)
)
state = module.params['state']
kind = 'compute#backendService'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
fetch = update(module, self_link(module), kind, fetch)
changed = True
else:
delete(module, self_link(module), kind, fetch)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def delete(module, link, kind, fetch):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#backendService',
u'affinityCookieTtlSec': module.params.get('affinity_cookie_ttl_sec'),
u'backends': BackendServiceBackendArray(module.params.get('backends', []), module).to_request(),
u'cdnPolicy': BackeServiCdnPolic(module.params.get('cdn_policy', {}), module).to_request(),
u'connectionDraining': BackeServiConneDrain(module.params.get('connection_draining', {}), module).to_request(),
u'description': module.params.get('description'),
u'enableCDN': module.params.get('enable_cdn'),
u'healthChecks': module.params.get('health_checks'),
u'name': module.params.get('name'),
u'portName': module.params.get('port_name'),
u'protocol': module.params.get('protocol'),
u'region': region_selflink(module.params.get('region'), module.params),
u'sessionAffinity': module.params.get('session_affinity'),
u'timeoutSec': module.params.get('timeout_sec')
}
return_vals = {}
for k, v in request.items():
if v:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices".format(**module.params)
def return_if_object(module, response, kind):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
if result['kind'] != kind:
module.fail_json(msg="Incorrect result: {kind}".format(**result))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'affinityCookieTtlSec': response.get(u'affinityCookieTtlSec'),
u'backends': BackendServiceBackendArray(response.get(u'backends', []), module).from_response(),
u'cdnPolicy': BackeServiCdnPolic(response.get(u'cdnPolicy', {}), module).from_response(),
u'connectionDraining': BackeServiConneDrain(response.get(u'connectionDraining', {}), module).from_response(),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'enableCDN': response.get(u'enableCDN'),
u'healthChecks': response.get(u'healthChecks'),
u'id': response.get(u'id'),
u'name': response.get(u'name'),
u'portName': response.get(u'portName'),
u'protocol': response.get(u'protocol'),
u'region': response.get(u'region'),
u'sessionAffinity': response.get(u'sessionAffinity'),
u'timeoutSec': response.get(u'timeoutSec')
}
def region_selflink(name, params):
if name is None:
return
url = r"https://www.googleapis.com/compute/v1/projects/.*/regions/[a-z1-9\-]*"
if not re.match(url, name):
name = "https://www.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name
return name
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return None
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#backendService')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], 'message')
time.sleep(1.0)
if status not in ['PENDING', 'RUNNING', 'DONE']:
module.fail_json(msg="Invalid result %s" % status)
op_result = fetch_resource(module, op_uri, 'compute#operation')
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class BackendServiceBackendArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({
u'balancingMode': item.get('balancing_mode'),
u'capacityScaler': item.get('capacity_scaler'),
u'description': item.get('description'),
u'group': replace_resource_dict(item.get(u'group', {}), 'selfLink'),
u'maxConnections': item.get('max_connections'),
u'maxConnectionsPerInstance': item.get('max_connections_per_instance'),
u'maxRate': item.get('max_rate'),
u'maxRatePerInstance': item.get('max_rate_per_instance'),
u'maxUtilization': item.get('max_utilization')
})
def _response_from_item(self, item):
return remove_nones_from_dict({
u'balancingMode': item.get(u'balancingMode'),
u'capacityScaler': item.get(u'capacityScaler'),
u'description': item.get(u'description'),
u'group': item.get(u'group'),
u'maxConnections': item.get(u'maxConnections'),
u'maxConnectionsPerInstance': item.get(u'maxConnectionsPerInstance'),
u'maxRate': item.get(u'maxRate'),
u'maxRatePerInstance': item.get(u'maxRatePerInstance'),
u'maxUtilization': item.get(u'maxUtilization')
})
class BackeServiCdnPolic(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'cacheKeyPolicy': BackServCachKeyPoli(self.request.get('cache_key_policy', {}), self.module).to_request()
})
def from_response(self):
return remove_nones_from_dict({
u'cacheKeyPolicy': BackServCachKeyPoli(self.request.get(u'cacheKeyPolicy', {}), self.module).from_response()
})
class BackServCachKeyPoli(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'includeHost': self.request.get('include_host'),
u'includeProtocol': self.request.get('include_protocol'),
u'includeQueryString': self.request.get('include_query_string'),
u'queryStringBlacklist': self.request.get('query_string_blacklist'),
u'queryStringWhitelist': self.request.get('query_string_whitelist')
})
def from_response(self):
return remove_nones_from_dict({
u'includeHost': self.request.get(u'includeHost'),
u'includeProtocol': self.request.get(u'includeProtocol'),
u'includeQueryString': self.request.get(u'includeQueryString'),
u'queryStringBlacklist': self.request.get(u'queryStringBlacklist'),
u'queryStringWhitelist': self.request.get(u'queryStringWhitelist')
})
class BackeServiConneDrain(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'drainingTimeoutSec': self.request.get('draining_timeout_sec')
})
def from_response(self):
return remove_nones_from_dict({
u'drainingTimeoutSec': self.request.get(u'drainingTimeoutSec')
})
if __name__ == '__main__':
main()
| gpl-3.0 | 7,372,240,479,186,199,000 | 41.984813 | 140 | 0.570186 | false |
ring00/bbl-ucore | related_info/ostep/ostep10-lottery.py | 54 | 3990 | #! /usr/bin/env python
import sys
from optparse import OptionParser
import random
parser = OptionParser()
parser.add_option('-s', '--seed', default=0, help='the random seed', action='store', type='int', dest='seed')
parser.add_option('-j', '--jobs', default=3, help='number of jobs in the system', action='store', type='int', dest='jobs')
parser.add_option('-l', '--jlist', default='', help='instead of random jobs, provide a comma-separated list of run times and ticket values (e.g., 10:100,20:100 would have two jobs with run-times of 10 and 20, each with 100 tickets)', action='store', type='string', dest='jlist')
parser.add_option('-m', '--maxlen', default=10, help='max length of job', action='store', type='int', dest='maxlen')
parser.add_option('-T', '--maxticket', default=100, help='maximum ticket value, if randomly assigned', action='store', type='int', dest='maxticket')
parser.add_option('-q', '--quantum', default=1, help='length of time slice', action='store', type='int', dest='quantum')
parser.add_option('-c', '--compute', help='compute answers for me', action='store_true', default=False, dest='solve')
(options, args) = parser.parse_args()
random.seed(options.seed)
print 'ARG jlist', options.jlist
print 'ARG jobs', options.jobs
print 'ARG maxlen', options.maxlen
print 'ARG maxticket', options.maxticket
print 'ARG quantum', options.quantum
print 'ARG seed', options.seed
print ''
print 'Here is the job list, with the run time of each job: '
import operator
tickTotal = 0
runTotal = 0
joblist = []
if options.jlist == '':
for jobnum in range(0,options.jobs):
runtime = int(options.maxlen * random.random())
tickets = int(options.maxticket * random.random())
runTotal += runtime
tickTotal += tickets
joblist.append([jobnum, runtime, tickets])
print ' Job %d ( length = %d, tickets = %d )' % (jobnum, runtime, tickets)
else:
jobnum = 0
for entry in options.jlist.split(','):
(runtime, tickets) = entry.split(':')
joblist.append([jobnum, int(runtime), int(tickets)])
runTotal += int(runtime)
tickTotal += int(tickets)
jobnum += 1
for job in joblist:
print ' Job %d ( length = %d, tickets = %d )' % (job[0], job[1], job[2])
print '\n'
if options.solve == False:
print 'Here is the set of random numbers you will need (at most):'
for i in range(runTotal):
r = int(random.random() * 1000001)
print 'Random', r
if options.solve == True:
print '** Solutions **\n'
jobs = len(joblist)
clock = 0
for i in range(runTotal):
r = int(random.random() * 1000001)
winner = int(r % tickTotal)
current = 0
for (job, runtime, tickets) in joblist:
current += tickets
if current > winner:
(wjob, wrun, wtix) = (job, runtime, tickets)
break
print 'Random', r, '-> Winning ticket %d (of %d) -> Run %d' % (winner, tickTotal, wjob)
# print 'Winning ticket %d (of %d) -> Run %d' % (winner, tickTotal, wjob)
print ' Jobs:',
for (job, runtime, tickets) in joblist:
if wjob == job:
wstr = '*'
else:
wstr = ' '
if runtime > 0:
tstr = tickets
else:
tstr = '---'
print ' (%s job:%d timeleft:%d tix:%s ) ' % (wstr, job, runtime, tstr),
print ''
# now do the accounting
if wrun >= options.quantum:
wrun -= options.quantum
else:
wrun = 0
clock += options.quantum
# job completed!
if wrun == 0:
print '--> JOB %d DONE at time %d' % (wjob, clock)
tickTotal -= wtix
wtix = 0
jobs -= 1
# update job list
joblist[wjob] = (wjob, wrun, wtix)
if jobs == 0:
print ''
break
| gpl-2.0 | 2,357,221,937,666,445,300 | 32.529412 | 279 | 0.570677 | false |
orangeduck/PyAutoC | Python27/Lib/ctypes/test/test_array_in_pointer.py | 117 | 1729 | import unittest
from ctypes import *
from binascii import hexlify
import re
def dump(obj):
# helper function to dump memory contents in hex, with a hyphen
# between the bytes.
h = hexlify(memoryview(obj))
return re.sub(r"(..)", r"\1-", h)[:-1]
class Value(Structure):
_fields_ = [("val", c_byte)]
class Container(Structure):
_fields_ = [("pvalues", POINTER(Value))]
class Test(unittest.TestCase):
def test(self):
# create an array of 4 values
val_array = (Value * 4)()
# create a container, which holds a pointer to the pvalues array.
c = Container()
c.pvalues = val_array
# memory contains 4 NUL bytes now, that's correct
self.assertEqual("00-00-00-00", dump(val_array))
# set the values of the array through the pointer:
for i in range(4):
c.pvalues[i].val = i + 1
values = [c.pvalues[i].val for i in range(4)]
# These are the expected results: here s the bug!
self.assertEqual(
(values, dump(val_array)),
([1, 2, 3, 4], "01-02-03-04")
)
def test_2(self):
val_array = (Value * 4)()
# memory contains 4 NUL bytes now, that's correct
self.assertEqual("00-00-00-00", dump(val_array))
ptr = cast(val_array, POINTER(Value))
# set the values of the array through the pointer:
for i in range(4):
ptr[i].val = i + 1
values = [ptr[i].val for i in range(4)]
# These are the expected results: here s the bug!
self.assertEqual(
(values, dump(val_array)),
([1, 2, 3, 4], "01-02-03-04")
)
if __name__ == "__main__":
unittest.main()
| bsd-2-clause | 420,706,099,086,015,360 | 26.015625 | 73 | 0.561018 | false |
riteshshrv/django | django/contrib/gis/geos/prototypes/geom.py | 288 | 4069 | from ctypes import POINTER, c_char_p, c_int, c_size_t, c_ubyte
from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_minus_one, check_sized_string, check_string, check_zero,
)
# This is the return type used by binary output (WKB, HEX) routines.
c_uchar_p = POINTER(c_ubyte)
# We create a simple subclass of c_char_p here because when the response
# type is set to c_char_p, you get a _Python_ string and there's no way
# to access the string's address inside the error checking function.
# In other words, you can't free the memory allocated inside GEOS. Previously,
# the return type would just be omitted and the integer address would be
# used -- but this allows us to be specific in the function definition and
# keeps the reference so it may be free'd.
class geos_char_p(c_char_p):
pass
# ### ctypes factory classes ###
class BinConstructor(GEOSFuncFactory):
"Generates a prototype for binary construction (HEX, WKB) GEOS routines."
argtypes = [c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
# HEX & WKB output
class BinOutput(GEOSFuncFactory):
"Generates a prototype for the routines that return a sized string."
argtypes = [GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
class GeomOutput(GEOSFuncFactory):
"For GEOS routines that return a geometry."
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
def get_func(self, argtypes):
self.argtypes = argtypes
return super(GeomOutput, self).get_func()
class IntFromGeom(GEOSFuncFactory):
"Argument is a geometry, return type is an integer."
argtypes = [GEOM_PTR]
restype = c_int
def get_func(self, zero=False):
if zero:
self.errcheck = check_zero
else:
self.errcheck = check_minus_one
return super(IntFromGeom, self).get_func()
class StringFromGeom(GEOSFuncFactory):
"Argument is a Geometry, return type is a string."
argtypes = [GEOM_PTR]
restype = geos_char_p
errcheck = staticmethod(check_string)
# ### ctypes prototypes ###
# Deprecated creation routines from WKB, HEX, WKT
from_hex = BinConstructor('GEOSGeomFromHEX_buf')
from_wkb = BinConstructor('GEOSGeomFromWKB_buf')
from_wkt = GeomOutput('GEOSGeomFromWKT', [c_char_p])
# Deprecated output routines
to_hex = BinOutput('GEOSGeomToHEX_buf')
to_wkb = BinOutput('GEOSGeomToWKB_buf')
to_wkt = StringFromGeom('GEOSGeomToWKT')
# The GEOS geometry type, typeid, num_coordites and number of geometries
geos_normalize = IntFromGeom('GEOSNormalize')
geos_type = StringFromGeom('GEOSGeomType')
geos_typeid = IntFromGeom('GEOSGeomTypeId')
get_dims = IntFromGeom('GEOSGeom_getDimensions', zero=True)
get_num_coords = IntFromGeom('GEOSGetNumCoordinates')
get_num_geoms = IntFromGeom('GEOSGetNumGeometries')
# Geometry creation factories
create_point = GeomOutput('GEOSGeom_createPoint', [CS_PTR])
create_linestring = GeomOutput('GEOSGeom_createLineString', [CS_PTR])
create_linearring = GeomOutput('GEOSGeom_createLinearRing', [CS_PTR])
# Polygon and collection creation routines are special and will not
# have their argument types defined.
create_polygon = GeomOutput('GEOSGeom_createPolygon', None)
create_collection = GeomOutput('GEOSGeom_createCollection', None)
# Ring routines
get_extring = GeomOutput('GEOSGetExteriorRing', [GEOM_PTR])
get_intring = GeomOutput('GEOSGetInteriorRingN', [GEOM_PTR, c_int])
get_nrings = IntFromGeom('GEOSGetNumInteriorRings')
# Collection Routines
get_geomn = GeomOutput('GEOSGetGeometryN', [GEOM_PTR, c_int])
# Cloning
geom_clone = GEOSFuncFactory('GEOSGeom_clone', argtypes=[GEOM_PTR], restype=GEOM_PTR)
# Destruction routine.
destroy_geom = GEOSFuncFactory('GEOSGeom_destroy', argtypes=[GEOM_PTR])
# SRID routines
geos_get_srid = GEOSFuncFactory('GEOSGetSRID', argtypes=[GEOM_PTR], restype=c_int)
geos_set_srid = GEOSFuncFactory('GEOSSetSRID', argtypes=[GEOM_PTR, c_int])
| bsd-3-clause | 5,216,675,870,689,809,000 | 34.382609 | 85 | 0.736299 | false |
sbalde/edxplatform | openedx/core/djangoapps/content/course_overviews/migrations/0006_add_version_and_timestamp.py | 56 | 4600 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseOverview.created'
db.add_column('course_overviews_courseoverview', 'created',
self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CourseOverview.modified'
db.add_column('course_overviews_courseoverview', 'modified',
self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CourseOverview.version'
db.add_column('course_overviews_courseoverview', 'version',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseOverview.created'
db.delete_column('course_overviews_courseoverview', 'created')
# Deleting field 'CourseOverview.modified'
db.delete_column('course_overviews_courseoverview', 'modified')
# Deleting field 'CourseOverview.version'
db.delete_column('course_overviews_courseoverview', 'version')
models = {
'course_overviews.courseoverview': {
'Meta': {'object_name': 'CourseOverview'},
'_location': ('xmodule_django.models.UsageKeyField', [], {'max_length': '255'}),
'_pre_requisite_courses_json': ('django.db.models.fields.TextField', [], {}),
'advertised_start': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'cert_html_view_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cert_name_long': ('django.db.models.fields.TextField', [], {}),
'cert_name_short': ('django.db.models.fields.TextField', [], {}),
'certificates_display_behavior': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'certificates_show_before_end': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_image_url': ('django.db.models.fields.TextField', [], {}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'days_early_for_beta': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'display_name': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'display_number_with_default': ('django.db.models.fields.TextField', [], {}),
'display_org_with_default': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'end_of_course_survey_url': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'enrollment_domain': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'enrollment_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'enrollment_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'facebook_url': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'has_any_active_web_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'primary_key': 'True', 'db_index': 'True'}),
'invitation_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lowest_passing_grade': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'max_student_enrollments_allowed': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'mobile_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'social_sharing_url': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'version': ('django.db.models.fields.IntegerField', [], {}),
'visible_to_staff_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['course_overviews'] | agpl-3.0 | 7,028,308,691,445,154,000 | 60.346667 | 141 | 0.596522 | false |
dd00/commandergenius | project/jni/python/src/Lib/test/test_uu.py | 61 | 5316 | """
Tests for uu module.
Nick Mathewson
"""
import unittest
from test import test_support
import sys, os, uu, cStringIO
import uu
plaintext = "The smooth-scaled python crept over the sleeping dog\n"
encodedtext = """\
M5&AE('-M;V]T:\"US8V%L960@<'ET:&]N(&-R97!T(&]V97(@=&AE('-L965P
(:6YG(&1O9PH """
encodedtextwrapped = "begin %03o %s\n" + encodedtext.replace("%", "%%") + "\n \nend\n"
class UUTest(unittest.TestCase):
def test_encode(self):
inp = cStringIO.StringIO(plaintext)
out = cStringIO.StringIO()
uu.encode(inp, out, "t1")
self.assertEqual(out.getvalue(), encodedtextwrapped % (0666, "t1"))
inp = cStringIO.StringIO(plaintext)
out = cStringIO.StringIO()
uu.encode(inp, out, "t1", 0644)
self.assertEqual(out.getvalue(), encodedtextwrapped % (0644, "t1"))
def test_decode(self):
inp = cStringIO.StringIO(encodedtextwrapped % (0666, "t1"))
out = cStringIO.StringIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
inp = cStringIO.StringIO(
"UUencoded files may contain many lines,\n" +
"even some that have 'begin' in them.\n" +
encodedtextwrapped % (0666, "t1")
)
out = cStringIO.StringIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
def test_truncatedinput(self):
inp = cStringIO.StringIO("begin 644 t1\n" + encodedtext)
out = cStringIO.StringIO()
try:
uu.decode(inp, out)
self.fail("No exception thrown")
except uu.Error, e:
self.assertEqual(str(e), "Truncated input file")
def test_missingbegin(self):
inp = cStringIO.StringIO("")
out = cStringIO.StringIO()
try:
uu.decode(inp, out)
self.fail("No exception thrown")
except uu.Error, e:
self.assertEqual(str(e), "No valid begin line found in input file")
class UUStdIOTest(unittest.TestCase):
def setUp(self):
self.stdin = sys.stdin
self.stdout = sys.stdout
def tearDown(self):
sys.stdin = self.stdin
sys.stdout = self.stdout
def test_encode(self):
sys.stdin = cStringIO.StringIO(plaintext)
sys.stdout = cStringIO.StringIO()
uu.encode("-", "-", "t1", 0666)
self.assertEqual(
sys.stdout.getvalue(),
encodedtextwrapped % (0666, "t1")
)
def test_decode(self):
sys.stdin = cStringIO.StringIO(encodedtextwrapped % (0666, "t1"))
sys.stdout = cStringIO.StringIO()
uu.decode("-", "-")
self.assertEqual(sys.stdout.getvalue(), plaintext)
class UUFileTest(unittest.TestCase):
def _kill(self, f):
# close and remove file
try:
f.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
try:
os.unlink(f.name)
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
def setUp(self):
self.tmpin = test_support.TESTFN + "i"
self.tmpout = test_support.TESTFN + "o"
def tearDown(self):
del self.tmpin
del self.tmpout
def test_encode(self):
fin = fout = None
try:
test_support.unlink(self.tmpin)
fin = open(self.tmpin, 'wb')
fin.write(plaintext)
fin.close()
fin = open(self.tmpin, 'rb')
fout = open(self.tmpout, 'w')
uu.encode(fin, fout, self.tmpin, mode=0644)
fin.close()
fout.close()
fout = open(self.tmpout, 'r')
s = fout.read()
fout.close()
self.assertEqual(s, encodedtextwrapped % (0644, self.tmpin))
# in_file and out_file as filenames
uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0644)
fout = open(self.tmpout, 'r')
s = fout.read()
fout.close()
self.assertEqual(s, encodedtextwrapped % (0644, self.tmpin))
finally:
self._kill(fin)
self._kill(fout)
def test_decode(self):
f = None
try:
test_support.unlink(self.tmpin)
f = open(self.tmpin, 'w')
f.write(encodedtextwrapped % (0644, self.tmpout))
f.close()
f = open(self.tmpin, 'r')
uu.decode(f)
f.close()
f = open(self.tmpout, 'r')
s = f.read()
f.close()
self.assertEqual(s, plaintext)
# XXX is there an xp way to verify the mode?
finally:
self._kill(f)
def test_decodetwice(self):
# Verify that decode() will refuse to overwrite an existing file
f = None
try:
f = cStringIO.StringIO(encodedtextwrapped % (0644, self.tmpout))
f = open(self.tmpin, 'r')
uu.decode(f)
f.close()
f = open(self.tmpin, 'r')
self.assertRaises(uu.Error, uu.decode, f)
f.close()
finally:
self._kill(f)
def test_main():
test_support.run_unittest(UUTest, UUStdIOTest, UUFileTest)
if __name__=="__main__":
test_main()
| lgpl-2.1 | -594,843,575,768,431,400 | 27.891304 | 86 | 0.545523 | false |
abtink/openthread | tests/toranj/test-034-poor-link-parent-child-attach.py | 9 | 3302 | #!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wpan
# -----------------------------------------------------------------------------------------------------------------------
# Test description:
#
# This test covers a situation where a single parent exists in network with poor link quality ensuring the child
# can attach the parent.
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
speedup = 4
wpan.Node.set_time_speedup_factor(speedup)
parent = wpan.Node()
child = wpan.Node()
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# -----------------------------------------------------------------------------------------------------------------------
# Test implementation
parent.form("network")
# Create a poor link between child and parent using MAC fixed RSSI filter
parent.set(wpan.WPAN_MAC_FILTER_FIXED_RSSI, '-99')
parent.add(wpan.WPAN_MAC_FILTER_ENTRIES, child.get(wpan.WPAN_EXT_ADDRESS)[1:-1])
child.set(wpan.WPAN_MAC_FILTER_FIXED_RSSI, '-99')
child.add(wpan.WPAN_MAC_FILTER_ENTRIES, parent.get(wpan.WPAN_EXT_ADDRESS)[1:-1])
# Ensure child can still attach the single low-link quality parent
child.join_node(parent, node_type=wpan.JOIN_TYPE_END_DEVICE)
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
| bsd-3-clause | -8,807,192,311,029,769,000 | 42.447368 | 121 | 0.612962 | false |
jotes/ansible | lib/ansible/runner/return_data.py | 133 | 2102 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils
class ReturnData(object):
''' internal return class for runner execute methods, not part of public API signature '''
__slots__ = [ 'result', 'comm_ok', 'host', 'diff' ]
def __init__(self, conn=None, host=None, result=None,
comm_ok=True, diff=dict()):
# which host is this ReturnData about?
if conn is not None:
self.host = conn.host
delegate = getattr(conn, 'delegate', None)
if delegate is not None:
self.host = delegate
else:
self.host = host
self.result = result
self.comm_ok = comm_ok
# if these values are set and used with --diff we can show
# changes made to particular files
self.diff = diff
if type(self.result) in [ str, unicode ]:
self.result = utils.parse_json(self.result, from_remote=True, no_exceptions=True)
if self.host is None:
raise Exception("host not set")
if type(self.result) != dict:
raise Exception("dictionary result expected")
def communicated_ok(self):
return self.comm_ok
def is_successful(self):
return self.comm_ok and (self.result.get('failed', False) == False) and ('failed_when_result' in self.result and [not self.result['failed_when_result']] or [self.result.get('rc',0) == 0])[0]
| gpl-3.0 | -4,510,780,908,862,235,000 | 35.241379 | 198 | 0.650333 | false |
brototyp/CouchPotato | library/sqlalchemy/dialects/mssql/mxodbc.py | 18 | 3274 | """
Support for MS-SQL via mxODBC.
mxODBC is available at:
http://www.egenix.com/
This was tested with mxODBC 3.1.2 and the SQL Server Native
Client connected to MSSQL 2005 and 2008 Express Editions.
Connecting
~~~~~~~~~~
Connection is via DSN::
mssql+mxodbc://<username>:<password>@<dsnname>
Execution Modes
~~~~~~~~~~~~~~~
mxODBC features two styles of statement execution, using the
``cursor.execute()`` and ``cursor.executedirect()`` methods (the second being
an extension to the DBAPI specification). The former makes use of a particular
API call specific to the SQL Server Native Client ODBC driver known
SQLDescribeParam, while the latter does not.
mxODBC apparently only makes repeated use of a single prepared statement
when SQLDescribeParam is used. The advantage to prepared statement reuse is
one of performance. The disadvantage is that SQLDescribeParam has a limited
set of scenarios in which bind parameters are understood, including that they
cannot be placed within the argument lists of function calls, anywhere outside
the FROM, or even within subqueries within the FROM clause - making the usage
of bind parameters within SELECT statements impossible for all but the most
simplistic statements.
For this reason, the mxODBC dialect uses the "native" mode by default only for
INSERT, UPDATE, and DELETE statements, and uses the escaped string mode for
all other statements.
This behavior can be controlled via
:meth:`~sqlalchemy.sql.expression.Executable.execution_options` using the
``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a
value of ``True`` will unconditionally use native bind parameters and a value
of ``False`` will uncondtionally use string-escaped parameters.
"""
import re
import sys
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.connectors.mxodbc import MxODBCConnector
from sqlalchemy.dialects.mssql.pyodbc import MSExecutionContext_pyodbc
from sqlalchemy.dialects.mssql.base import (MSExecutionContext, MSDialect,
MSSQLCompiler,
MSSQLStrictCompiler,
_MSDateTime, _MSDate, TIME)
class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc):
"""
The pyodbc execution context is useful for enabling
SELECT SCOPE_IDENTITY in cases where OUTPUT clause
does not work (tables with insert triggers).
"""
#todo - investigate whether the pyodbc execution context
# is really only being used in cases where OUTPUT
# won't work.
class MSDialect_mxodbc(MxODBCConnector, MSDialect):
# TODO: may want to use this only if FreeTDS is not in use,
# since FreeTDS doesn't seem to use native binds.
statement_compiler = MSSQLStrictCompiler
execution_ctx_cls = MSExecutionContext_mxodbc
colspecs = {
#sqltypes.Numeric : _MSNumeric,
sqltypes.DateTime : _MSDateTime,
sqltypes.Date : _MSDate,
sqltypes.Time : TIME,
}
def __init__(self, description_encoding='latin-1', **params):
super(MSDialect_mxodbc, self).__init__(**params)
self.description_encoding = description_encoding
dialect = MSDialect_mxodbc
| gpl-3.0 | 2,943,022,538,837,989,000 | 34.978022 | 78 | 0.721747 | false |
jsirois/pex | pex/vendor/_vendored/pip/pip/_vendor/chardet/universaldetector.py | 244 | 12485 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
"""
Module containing the UniversalDetector detector class, which is the primary
class a user of ``chardet`` should use.
:author: Mark Pilgrim (initial port to Python)
:author: Shy Shalom (original C code)
:author: Dan Blanchard (major refactoring for 3.0)
:author: Ian Cordasco
"""
import codecs
import logging
import re
from .charsetgroupprober import CharSetGroupProber
from .enums import InputState, LanguageFilter, ProbingState
from .escprober import EscCharSetProber
from .latin1prober import Latin1Prober
from .mbcsgroupprober import MBCSGroupProber
from .sbcsgroupprober import SBCSGroupProber
class UniversalDetector(object):
"""
The ``UniversalDetector`` class underlies the ``chardet.detect`` function
and coordinates all of the different charset probers.
To get a ``dict`` containing an encoding and its confidence, you can simply
run:
.. code::
u = UniversalDetector()
u.feed(some_bytes)
u.close()
detected = u.result
"""
MINIMUM_THRESHOLD = 0.20
HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]')
ESC_DETECTOR = re.compile(b'(\033|~{)')
WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]')
ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252',
'iso-8859-2': 'Windows-1250',
'iso-8859-5': 'Windows-1251',
'iso-8859-6': 'Windows-1256',
'iso-8859-7': 'Windows-1253',
'iso-8859-8': 'Windows-1255',
'iso-8859-9': 'Windows-1254',
'iso-8859-13': 'Windows-1257'}
def __init__(self, lang_filter=LanguageFilter.ALL):
self._esc_charset_prober = None
self._charset_probers = []
self.result = None
self.done = None
self._got_data = None
self._input_state = None
self._last_char = None
self.lang_filter = lang_filter
self.logger = logging.getLogger(__name__)
self._has_win_bytes = None
self.reset()
def reset(self):
"""
Reset the UniversalDetector and all of its probers back to their
initial states. This is called by ``__init__``, so you only need to
call this directly in between analyses of different documents.
"""
self.result = {'encoding': None, 'confidence': 0.0, 'language': None}
self.done = False
self._got_data = False
self._has_win_bytes = False
self._input_state = InputState.PURE_ASCII
self._last_char = b''
if self._esc_charset_prober:
self._esc_charset_prober.reset()
for prober in self._charset_probers:
prober.reset()
def feed(self, byte_str):
"""
Takes a chunk of a document and feeds it through all of the relevant
charset probers.
After calling ``feed``, you can check the value of the ``done``
attribute to see if you need to continue feeding the
``UniversalDetector`` more data, or if it has made a prediction
(in the ``result`` attribute).
.. note::
You should always call ``close`` when you're done feeding in your
document if ``done`` is not already ``True``.
"""
if self.done:
return
if not len(byte_str):
return
if not isinstance(byte_str, bytearray):
byte_str = bytearray(byte_str)
# First check for known BOMs, since these are guaranteed to be correct
if not self._got_data:
# If the data starts with BOM, we know it is UTF
if byte_str.startswith(codecs.BOM_UTF8):
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith((codecs.BOM_UTF32_LE,
codecs.BOM_UTF32_BE)):
# FF FE 00 00 UTF-32, little-endian BOM
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith(b'\xFE\xFF\x00\x00'):
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith(b'\x00\x00\xFF\xFE'):
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
# FF FE UTF-16, little endian BOM
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16",
'confidence': 1.0,
'language': ''}
self._got_data = True
if self.result['encoding'] is not None:
self.done = True
return
# If none of those matched and we've only see ASCII so far, check
# for high bytes and escape sequences
if self._input_state == InputState.PURE_ASCII:
if self.HIGH_BYTE_DETECTOR.search(byte_str):
self._input_state = InputState.HIGH_BYTE
elif self._input_state == InputState.PURE_ASCII and \
self.ESC_DETECTOR.search(self._last_char + byte_str):
self._input_state = InputState.ESC_ASCII
self._last_char = byte_str[-1:]
# If we've seen escape sequences, use the EscCharSetProber, which
# uses a simple state machine to check for known escape sequences in
# HZ and ISO-2022 encodings, since those are the only encodings that
# use such sequences.
if self._input_state == InputState.ESC_ASCII:
if not self._esc_charset_prober:
self._esc_charset_prober = EscCharSetProber(self.lang_filter)
if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {'encoding':
self._esc_charset_prober.charset_name,
'confidence':
self._esc_charset_prober.get_confidence(),
'language':
self._esc_charset_prober.language}
self.done = True
# If we've seen high bytes (i.e., those with values greater than 127),
# we need to do more complicated checks using all our multi-byte and
# single-byte probers that are left. The single-byte probers
# use character bigram distributions to determine the encoding, whereas
# the multi-byte probers use a combination of character unigram and
# bigram distributions.
elif self._input_state == InputState.HIGH_BYTE:
if not self._charset_probers:
self._charset_probers = [MBCSGroupProber(self.lang_filter)]
# If we're checking non-CJK encodings, use single-byte prober
if self.lang_filter & LanguageFilter.NON_CJK:
self._charset_probers.append(SBCSGroupProber())
self._charset_probers.append(Latin1Prober())
for prober in self._charset_probers:
if prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {'encoding': prober.charset_name,
'confidence': prober.get_confidence(),
'language': prober.language}
self.done = True
break
if self.WIN_BYTE_DETECTOR.search(byte_str):
self._has_win_bytes = True
def close(self):
"""
Stop analyzing the current document and come up with a final
prediction.
:returns: The ``result`` attribute, a ``dict`` with the keys
`encoding`, `confidence`, and `language`.
"""
# Don't bother with checks if we're already done
if self.done:
return self.result
self.done = True
if not self._got_data:
self.logger.debug('no data received!')
# Default to ASCII if it is all we've seen so far
elif self._input_state == InputState.PURE_ASCII:
self.result = {'encoding': 'ascii',
'confidence': 1.0,
'language': ''}
# If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD
elif self._input_state == InputState.HIGH_BYTE:
prober_confidence = None
max_prober_confidence = 0.0
max_prober = None
for prober in self._charset_probers:
if not prober:
continue
prober_confidence = prober.get_confidence()
if prober_confidence > max_prober_confidence:
max_prober_confidence = prober_confidence
max_prober = prober
if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):
charset_name = max_prober.charset_name
lower_charset_name = max_prober.charset_name.lower()
confidence = max_prober.get_confidence()
# Use Windows encoding name instead of ISO-8859 if we saw any
# extra Windows-specific bytes
if lower_charset_name.startswith('iso-8859'):
if self._has_win_bytes:
charset_name = self.ISO_WIN_MAP.get(lower_charset_name,
charset_name)
self.result = {'encoding': charset_name,
'confidence': confidence,
'language': max_prober.language}
# Log all prober confidences if none met MINIMUM_THRESHOLD
if self.logger.getEffectiveLevel() == logging.DEBUG:
if self.result['encoding'] is None:
self.logger.debug('no probers hit minimum threshold')
for group_prober in self._charset_probers:
if not group_prober:
continue
if isinstance(group_prober, CharSetGroupProber):
for prober in group_prober.probers:
self.logger.debug('%s %s confidence = %s',
prober.charset_name,
prober.language,
prober.get_confidence())
else:
self.logger.debug('%s %s confidence = %s',
prober.charset_name,
prober.language,
prober.get_confidence())
return self.result
| apache-2.0 | 5,384,070,764,618,037,000 | 42.653846 | 80 | 0.547938 | false |
erkanay/django | tests/null_fk_ordering/tests.py | 44 | 2014 | from __future__ import unicode_literals
from django.test import TestCase
from .models import Author, Article, SystemInfo, Forum, Post, Comment
class NullFkOrderingTests(TestCase):
def test_ordering_across_null_fk(self):
"""
Regression test for #7512
ordering across nullable Foreign Keys shouldn't exclude results
"""
author_1 = Author.objects.create(name='Tom Jones')
author_2 = Author.objects.create(name='Bob Smith')
Article.objects.create(title='No author on this article')
Article.objects.create(author=author_1, title='This article written by Tom Jones')
Article.objects.create(author=author_2, title='This article written by Bob Smith')
# We can't compare results directly (since different databases sort NULLs to
# different ends of the ordering), but we can check that all results are
# returned.
self.assertTrue(len(list(Article.objects.all())) == 3)
s = SystemInfo.objects.create(system_name='System Info')
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
Comment.objects.create(post=p, comment_text='My first comment')
Comment.objects.create(comment_text='My second comment')
s2 = SystemInfo.objects.create(system_name='More System Info')
f2 = Forum.objects.create(system_info=s2, forum_name='Second forum')
p2 = Post.objects.create(forum=f2, title='Second Post')
Comment.objects.create(comment_text='Another first comment')
Comment.objects.create(post=p2, comment_text='Another second comment')
# We have to test this carefully. Some databases sort NULL values before
# everything else, some sort them afterwards. So we extract the ordered list
# and check the length. Before the fix, this list was too short (some values
# were omitted).
self.assertTrue(len(list(Comment.objects.all())) == 4)
| bsd-3-clause | 6,266,639,399,453,080,000 | 46.952381 | 90 | 0.682721 | false |
radhika-raghavendran/mbed-os5.1-onsemi | tools/host_tests/wait_us_auto.py | 122 | 2871 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from time import time
class WaitusTest():
""" This test is reading single characters from stdio
and measures time between their occurrences.
"""
TICK_LOOP_COUNTER = 13
TICK_LOOP_SUCCESSFUL_COUNTS = 10
DEVIATION = 0.10 # +/-10%
def test(self, selftest):
test_result = True
# First character to start test (to know after reset when test starts)
if selftest.mbed.set_serial_timeout(None) is None:
return selftest.RESULT_IO_SERIAL
c = selftest.mbed.serial_read(1)
if c is None:
return selftest.RESULT_IO_SERIAL
if c == '$': # target will printout TargetID e.g.: $$$$1040e649d5c09a09a3f6bc568adef61375c6
#Read additional 39 bytes of TargetID
if selftest.mbed.serial_read(39) is None:
return selftest.RESULT_IO_SERIAL
c = selftest.mbed.serial_read(1) # Re-read first 'tick'
if c is None:
return selftest.RESULT_IO_SERIAL
start_serial_pool = time()
start = time()
success_counter = 0
for i in range(0, self.TICK_LOOP_COUNTER):
c = selftest.mbed.serial_read(1)
if c is None:
return selftest.RESULT_IO_SERIAL
delta = time() - start
deviation = abs(delta - 1)
# Round values
delta = round(delta, 2)
deviation = round(deviation, 2)
# Check if time measurements are in given range
deviation_ok = True if delta > 0 and deviation <= self.DEVIATION else False
success_counter = success_counter+1 if deviation_ok else 0
msg = "OK" if deviation_ok else "FAIL"
selftest.notify("%s in %.2f sec (%.2f) [%s]"% (c, delta, deviation, msg))
start = time()
if success_counter >= self.TICK_LOOP_SUCCESSFUL_COUNTS:
break
measurement_time = time() - start_serial_pool
selftest.notify("Consecutive OK timer reads: %d"% success_counter)
selftest.notify("Completed in %.2f sec" % (measurement_time))
test_result = True if success_counter >= self.TICK_LOOP_SUCCESSFUL_COUNTS else False
return selftest.RESULT_SUCCESS if test_result else selftest.RESULT_FAILURE
| apache-2.0 | 319,921,671,935,296,450 | 40.608696 | 99 | 0.634274 | false |
xdevelsistemas/taiga-back-community | taiga/base/api/utils/formatting.py | 2 | 4697 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The code is partially taken (and modified) from django rest framework
# that is licensed under the following terms:
#
# Copyright (c) 2011-2014, Tom Christie
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Utility functions to return a formatted name and description for a given view.
"""
from django.utils.html import escape
from django.utils.safestring import mark_safe
from taiga.base.api.settings import api_settings
from textwrap import dedent
import re
# Markdown is optional
try:
import markdown
def apply_markdown(text):
"""
Simple wrapper around :func:`markdown.markdown` to set the base level
of '#' style headers to <h2>.
"""
extensions = ["headerid(level=2)"]
safe_mode = False
md = markdown.Markdown(extensions=extensions, safe_mode=safe_mode)
return md.convert(text)
except ImportError:
apply_markdown = None
def remove_trailing_string(content, trailing):
"""
Strip trailing component `trailing` from `content` if it exists.
Used when generating names from view classes.
"""
if content.endswith(trailing) and content != trailing:
return content[:-len(trailing)]
return content
def dedent(content):
"""
Remove leading indent from a block of text.
Used when generating descriptions from docstrings.
Note that python's `textwrap.dedent` doesn't quite cut it,
as it fails to dedent multiline docstrings that include
unindented text on the initial line.
"""
whitespace_counts = [len(line) - len(line.lstrip(" "))
for line in content.splitlines()[1:] if line.lstrip()]
# unindent the content if needed
if whitespace_counts:
whitespace_pattern = "^" + (" " * min(whitespace_counts))
content = re.sub(re.compile(whitespace_pattern, re.MULTILINE), "", content)
return content.strip()
def camelcase_to_spaces(content):
"""
Translate 'CamelCaseNames' to 'Camel Case Names'.
Used when generating names from view classes.
"""
camelcase_boundry = "(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))"
content = re.sub(camelcase_boundry, " \\1", content).strip()
return " ".join(content.split("_")).title()
def markup_description(description):
"""
Apply HTML markup to the given description.
"""
if apply_markdown:
description = apply_markdown(description)
else:
description = escape(description).replace("\n", "<br />")
return mark_safe(description)
| agpl-3.0 | -7,965,320,073,523,837,000 | 38.125 | 83 | 0.717785 | false |
wangyou/XX-Net | code/default/gae_proxy/server/lib/google/appengine/api/backendinfo.py | 14 | 6551 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A library for working with BackendInfoExternal records, describing backends
configured for an application. Supports loading the records from backend.yaml.
"""
import os
import yaml
from yaml import representer
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
else:
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
NAME_REGEX = r'(?!-)[a-z\d\-]{1,100}'
FILE_REGEX = r'(?!\^).*(?!\$).{1,256}'
CLASS_REGEX = r'^[bB](1|2|4|8|4_1G)$'
OPTIONS_REGEX = r'^[a-z, ]*$'
STATE_REGEX = r'^(START|STOP|DISABLED)$'
BACKENDS = 'backends'
NAME = 'name'
CLASS = 'class'
INSTANCES = 'instances'
OPTIONS = 'options'
PUBLIC = 'public'
DYNAMIC = 'dynamic'
FAILFAST = 'failfast'
MAX_CONCURRENT_REQUESTS = 'max_concurrent_requests'
START = 'start'
VALID_OPTIONS = frozenset([PUBLIC, DYNAMIC, FAILFAST])
STATE = 'state'
class BadConfig(Exception):
"""An invalid configuration was provided."""
class ListWithoutSort(list):
def sort(self):
pass
class SortedDict(dict):
def __init__(self, keys, data):
super(SortedDict, self).__init__()
self.keys = keys
self.update(data)
def items(self):
result = ListWithoutSort()
for key in self.keys:
if type(self.get(key)) != type(None):
result.append((key, self.get(key)))
return result
representer.SafeRepresenter.add_representer(
SortedDict, representer.SafeRepresenter.represent_dict)
class BackendEntry(validation.Validated):
"""A backend entry describes a single backend."""
ATTRIBUTES = {
NAME: NAME_REGEX,
CLASS: validation.Optional(CLASS_REGEX),
INSTANCES: validation.Optional(validation.TYPE_INT),
MAX_CONCURRENT_REQUESTS: validation.Optional(validation.TYPE_INT),
OPTIONS: validation.Optional(OPTIONS_REGEX),
PUBLIC: validation.Optional(validation.TYPE_BOOL),
DYNAMIC: validation.Optional(validation.TYPE_BOOL),
FAILFAST: validation.Optional(validation.TYPE_BOOL),
START: validation.Optional(FILE_REGEX),
STATE: validation.Optional(STATE_REGEX),
}
def __init__(self, *args, **kwargs):
super(BackendEntry, self).__init__(*args, **kwargs)
self.Init()
def Init(self):
if self.public:
raise BadConfig("Illegal field: 'public'")
if self.dynamic:
raise BadConfig("Illegal field: 'dynamic'")
if self.failfast:
raise BadConfig("Illegal field: 'failfast'")
self.ParseOptions()
return self
def set_class(self, Class):
"""Setter for 'class', since an attribute reference is an error."""
self.Set(CLASS, Class)
def get_class(self):
"""Accessor for 'class', since an attribute reference is an error."""
return self.Get(CLASS)
def ToDict(self):
"""Returns a sorted dictionary representing the backend entry."""
self.ParseOptions().WriteOptions()
result = super(BackendEntry, self).ToDict()
return SortedDict([NAME,
CLASS,
INSTANCES,
START,
OPTIONS,
MAX_CONCURRENT_REQUESTS,
STATE],
result)
def ParseOptions(self):
"""Parses the 'options' field and sets appropriate fields."""
if self.options:
options = [option.strip() for option in self.options.split(',')]
else:
options = []
for option in options:
if option not in VALID_OPTIONS:
raise BadConfig('Unrecognized option: %s', option)
self.public = PUBLIC in options
self.dynamic = DYNAMIC in options
self.failfast = FAILFAST in options
return self
def WriteOptions(self):
"""Writes the 'options' field based on other settings."""
options = []
if self.public:
options.append('public')
if self.dynamic:
options.append('dynamic')
if self.failfast:
options.append('failfast')
if options:
self.options = ', '.join(options)
else:
self.options = None
return self
def LoadBackendEntry(backend_entry):
"""Parses a BackendEntry object from a string.
Args:
backend_entry: a backend entry, as a string
Returns:
A BackendEntry object.
"""
builder = yaml_object.ObjectBuilder(BackendEntry)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(backend_entry)
entries = handler.GetResults()
if len(entries) < 1:
raise BadConfig('Empty backend configuration.')
if len(entries) > 1:
raise BadConfig('Multiple backend entries were found in configuration.')
return entries[0].Init()
class BackendInfoExternal(validation.Validated):
"""BackendInfoExternal describes all backend entries for an application."""
ATTRIBUTES = {
BACKENDS: validation.Optional(validation.Repeated(BackendEntry)),
}
def LoadBackendInfo(backend_info, open_fn=None):
"""Parses a BackendInfoExternal object from a string.
Args:
backend_info: a backends stanza (list of backends) as a string
open_fn: Function for opening files. Unused.
Returns:
A BackendInfoExternal object.
"""
builder = yaml_object.ObjectBuilder(BackendInfoExternal)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(backend_info)
backend_info = handler.GetResults()
if len(backend_info) < 1:
return BackendInfoExternal(backends=[])
if len(backend_info) > 1:
raise BadConfig("Only one 'backends' clause is allowed.")
info = backend_info[0]
if not info.backends:
return BackendInfoExternal(backends=[])
for backend in info.backends:
backend.Init()
return info
| bsd-2-clause | 6,294,244,531,255,962,000 | 26.410042 | 78 | 0.684934 | false |
fichter/grpc | src/python/grpcio_test/grpc_test/framework/interfaces/face/_blocking_invocation_inline_service.py | 3 | 10601 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test code for the Face layer of RPC Framework."""
import abc
import unittest
# test_interfaces is referenced from specification in this module.
from grpc.framework.interfaces.face import face
from grpc_test.framework.common import test_constants
from grpc_test.framework.common import test_control
from grpc_test.framework.common import test_coverage
from grpc_test.framework.interfaces.face import _digest
from grpc_test.framework.interfaces.face import _stock_service
from grpc_test.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
class TestCase(test_coverage.Coverage, unittest.TestCase):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
__metaclass__ = abc.ABCMeta
NAME = 'BlockingInvocationInlineServiceTest'
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self._control = test_control.PauseFailControl()
self._digest = _digest.digest(
_stock_service.STOCK_TEST_SERVICE, self._control, None)
generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
self._digest.methods, self._digest.inline_method_implementations, None)
self._invoker = self.invoker_constructor.construct_invoker(
generic_stub, dynamic_stubs, self._digest.methods)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self.implementation.destantiate(self._memo)
def testSuccessfulUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
response = self._invoker.blocking(group, method)(
request, test_constants.LONG_TIMEOUT)
test_messages.verify(request, response, self)
def testSuccessfulUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_iterator = self._invoker.blocking(group, method)(
request, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
response = self._invoker.blocking(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
test_messages.verify(requests, response, self)
def testSuccessfulStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
response_iterator = self._invoker.blocking(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response = self._invoker.blocking(group, method)(
first_request, test_constants.LONG_TIMEOUT)
test_messages.verify(first_request, first_response, self)
second_response = self._invoker.blocking(group, method)(
second_request, test_constants.LONG_TIMEOUT)
test_messages.verify(second_request, second_response, self)
@unittest.skip('Parallel invocations impossible with blocking control flow!')
def testParallelInvocations(self):
raise NotImplementedError()
@unittest.skip('Parallel invocations impossible with blocking control flow!')
def testWaitingForSomeButNotAllParallelInvocations(self):
raise NotImplementedError()
@unittest.skip('Cancellation impossible with blocking control flow!')
def testCancelledUnaryRequestUnaryResponse(self):
raise NotImplementedError()
@unittest.skip('Cancellation impossible with blocking control flow!')
def testCancelledUnaryRequestStreamResponse(self):
raise NotImplementedError()
@unittest.skip('Cancellation impossible with blocking control flow!')
def testCancelledStreamRequestUnaryResponse(self):
raise NotImplementedError()
@unittest.skip('Cancellation impossible with blocking control flow!')
def testCancelledStreamRequestStreamResponse(self):
raise NotImplementedError()
def testExpiredUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause(), self.assertRaises(
face.ExpirationError):
self._invoker.blocking(group, method)(
request, test_constants.SHORT_TIMEOUT)
def testExpiredUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause(), self.assertRaises(
face.ExpirationError):
response_iterator = self._invoker.blocking(group, method)(
request, test_constants.SHORT_TIMEOUT)
list(response_iterator)
def testExpiredStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause(), self.assertRaises(
face.ExpirationError):
self._invoker.blocking(group, method)(
iter(requests), test_constants.SHORT_TIMEOUT)
def testExpiredStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause(), self.assertRaises(
face.ExpirationError):
response_iterator = self._invoker.blocking(group, method)(
iter(requests), test_constants.SHORT_TIMEOUT)
list(response_iterator)
def testFailedUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.fail(), self.assertRaises(face.RemoteError):
self._invoker.blocking(group, method)(
request, test_constants.LONG_TIMEOUT)
def testFailedUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.fail(), self.assertRaises(face.RemoteError):
response_iterator = self._invoker.blocking(group, method)(
request, test_constants.LONG_TIMEOUT)
list(response_iterator)
def testFailedStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.fail(), self.assertRaises(face.RemoteError):
self._invoker.blocking(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
def testFailedStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.fail(), self.assertRaises(face.RemoteError):
response_iterator = self._invoker.blocking(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
list(response_iterator)
| bsd-3-clause | -4,736,542,116,059,037,000 | 41.404 | 96 | 0.72163 | false |
mfazliazran/raft | analysis/resultsclasses/AnalysisResults.py | 11 | 6849 | #
# Author: Justin Engler
#
# Copyright (c) 2011 RAFT Team
#
# This file is part of RAFT.
#
# RAFT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RAFT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RAFT. If not, see <http://www.gnu.org/licenses/>.
from .ResultSet import ResultSet
from .SingleResult import SingleResult
from PyQt4.QtGui import *
from PyQt4.QtCore import Qt
class AnalysisResults(object):
"""Contains all results found for a given analysis."""
def __init__(self , resultfactory=None):
"""Results that span across multiple pages"""
self.overall={}
"""Results that apply to a single page"""
self.pages={}
"""pages scanned with no results"""
self.nofindings={}
"""counts of total results within each grouping"""
self.resultcounts={'Overall':0,'Page':0,'No':0}
self.desc=None
self.friendlyname=None
self.analyzerclass=None
self.resultfactory=resultfactory
#############################Standard Functions
#############################Functions often used when writing an analyzer
def addPageResult(self, pageid, url, type, desc, data, span=None, severity=None, certainty=None, highlightdata=None):
"""Adds a new per-page standard result to the given pageid for this analysis"""
self.addCustomPageResult(pageid,
SingleResult(type, desc, data, span, severity, certainty, highlightdata=highlightdata),url)
def addOverallResult(self, type, desc, data, span=None, severity=None, certainty=None, context=None, highlightdata=None):
"""Adds a new overall result to this analysis"""
self.addCustomOverallResult(SingleResult(type, desc, data, span, severity, certainty, highlightdata=highlightdata),context)
#############################Special Functions
#############################You shouldn't need to call these unless you're doing something crazy.
def addCustomPageResult(self,pageid,result,url):
if pageid not in self.pages:
self.pages[pageid]=ResultSet(pageid,False,url)
self.pages[pageid].addResult(result)
def addCustomOverallResult(self,result,context):
"""Adds an arbitrary result object to the overall results."""
if context not in self.overall:
self.overall[context]=ResultSet(None,True,context)
self.overall[context].addResult(result)
def setAnalyzerInfo(self, newdesc,newfriendlyname, newanalyzerclass):
self.desc=newdesc
self.friendlyname=newfriendlyname
self.analyzerclass=newanalyzerclass
def toHTML(self):
"""returns an HTML representation of the entire analysis"""
finaloutput=self.generalInfoToHTML()
if len(self.overall) > 0:
finaloutput+='<h2>Overall Results</h2>'
for k in list(self.overall.keys()):
finaloutput+=self.overall[k].toHTML()
if len(self.pages)>0:
finaloutput+='<h2>Results for each page analyzed</h2>'
for k in list(self.pages.keys()):
finaloutput+=self.pages[k].toHTML()
return finaloutput
def generalInfoToHTML(self):
"""Returns an HTML 'header' string describing the test performed"""
outstring="""<h1>%s</h1>
<p>(%s)</p>
<p>%s</p>
"""%(self.friendlyname,self.analyzerclass,self.desc)
return outstring
def generateTreeItem(self,parentnode):
tempitem=QTreeWidgetItem(parentnode)
tempitem.setText(0,str(self.friendlyname))
tempitem.setText(1,"".join((str(self.numresults),' results')))
tempitem.setFlags(Qt.ItemIsEnabled|Qt.ItemIsSelectable)
tempitem.customdata=self
return tempitem
def generateTreeChildren(self,db,cursor,parentnode):
if self.resultfactory is not None:
factoryitems=self.resultfactory.createItems(self, self.instanceid,db,cursor)
self.resultcounts, self.overall, self.pages, self.nofindings = factoryitems
else:
#If this tree item came from the db, and we haven't populated it yet, populate it.
if self.dbgenerated and not self.dbretrieved:
resultsets=db.analysis_get_resultsets_per_instance(cursor,self.instanceid)
for resultset in resultsets:
numresults=resultset[5]
if resultset[2]:
store=self.overall
storekey=resultset[3]
self.resultcounts['Overall']+=numresults
tempRS=ResultSet(storekey,None,True)
elif numresults>0:
store=self.pages
storekey=resultset[1]
self.resultcounts['Page']+=numresults
tempRS=ResultSet(resultset[3],storekey,False)
else:
store=self.nofindings
storekey=resultset[1]
tempRS=ResultSet(resultset[3],storekey,False)
tempRS.dbgenerated=True
tempRS.dbretrieved=False
tempRS.resultsetid=resultset[0]
tempRS.numresults=resultset[5]
store[storekey]=tempRS
self.dbretrieved=True
#Now that the tree is populated, make the nodes
childnodes=list()
for name,store in (('Overall',self.overall),('Page',self.pages), ('No',self.nofindings)):
storelen=len(store)
if storelen>0:
tempitem=QTreeWidgetItem(parentnode)
tempitem.setText(0,'%s Results'%name)
tempitem.setText(1,'%s results in %s set%s'%(self.resultcounts[name],str(storelen),'s' if storelen>1 else ''))
tempitem.setFlags(Qt.ItemIsEnabled|Qt.ItemIsSelectable)
childnodes.append(tempitem)
for k in store:
store[k].generateTreeItem(tempitem)
| gpl-3.0 | 2,790,215,453,586,914,000 | 41.018405 | 135 | 0.585779 | false |
dennybaa/st2 | st2common/st2common/models/system/actionchain.py | 8 | 5983 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import string
from st2common.util import schema as util_schema
from st2common.models.api.notification import NotificationSubSchemaAPI
class Node(object):
schema = {
"title": "Node",
"description": "Node of an ActionChain.",
"type": "object",
"properties": {
"name": {
"description": "The name of this node.",
"type": "string",
"required": True
},
"ref": {
"type": "string",
"description": "Ref of the action to be executed.",
"required": True
},
"params": {
"type": "object",
"description": ("Parameter for the execution (old name, here for backward "
"compatibility reasons)."),
"default": {}
},
"parameters": {
"type": "object",
"description": "Parameter for the execution.",
"default": {}
},
"on-success": {
"type": "string",
"description": "Name of the node to invoke on successful completion of action"
" executed for this node.",
"default": ""
},
"on-failure": {
"type": "string",
"description": "Name of the node to invoke on failure of action executed for this"
" node.",
"default": ""
},
"publish": {
"description": "The variables to publish from the result. Should be of the form"
" name.foo. o1: {{node_name.foo}} will result in creation of a"
" variable o1 which is now available for reference through"
" remainder of the chain as a global variable.",
"type": "object",
"patternProperties": {
"^\w+$": {}
}
},
"notify": {
"description": "Notification settings for action.",
"type": "object",
"properties": {
"on-complete": NotificationSubSchemaAPI,
"on-failure": NotificationSubSchemaAPI,
"on-success": NotificationSubSchemaAPI
},
"additionalProperties": False
}
},
"additionalProperties": False
}
def __init__(self, **kw):
for prop in six.iterkeys(self.schema.get('properties', [])):
value = kw.get(prop, None)
# having '-' in the property name lead to challenges in referencing the property.
# At hindsight the schema property should've been on_success rather than on-success.
prop = string.replace(prop, '-', '_')
setattr(self, prop, value)
def validate(self):
params = getattr(self, 'params', {})
parameters = getattr(self, 'parameters', {})
if params and parameters:
msg = ('Either "params" or "parameters" attribute needs to be provided, but not '
'both')
raise ValueError(msg)
return self
def get_parameters(self):
# Note: "params" is old deprecated attribute which will be removed in a future release
params = getattr(self, 'params', {})
parameters = getattr(self, 'parameters', {})
return parameters or params
def __repr__(self):
return ('<Node name=%s, ref=%s, on-success=%s, on-failure=%s>' %
(self.name, self.ref, self.on_success, self.on_failure))
class ActionChain(object):
schema = {
"title": "ActionChain",
"description": "A chain of sequentially executed actions.",
"type": "object",
"properties": {
"chain": {
"description": "The chain.",
"type": "array",
"items": [Node.schema],
"required": True
},
"default": {
"type": "string",
"description": "name of the action to be executed."
},
"vars": {
"description": "",
"type": "object",
"patternProperties": {
"^\w+$": {}
}
}
},
"additionalProperties": False
}
def __init__(self, **kw):
util_schema.validate(instance=kw, schema=self.schema, cls=util_schema.CustomValidator,
use_default=False, allow_default_none=True)
for prop in six.iterkeys(self.schema.get('properties', [])):
value = kw.get(prop, None)
# special handling for chain property to create the Node object
if prop == 'chain':
nodes = []
for node in value:
ac_node = Node(**node)
ac_node.validate()
nodes.append(ac_node)
value = nodes
setattr(self, prop, value)
| apache-2.0 | 3,221,834,506,381,522,400 | 36.39375 | 98 | 0.508608 | false |
peter-jang/ansible-modules-core | network/junos/junos_facts.py | 19 | 4038 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: junos_facts
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Collect facts from remote device running Junos
description:
- Collects fact information from a remote device running the Junos
operating system. By default, the module will collect basic fact
information from the device to be included with the hostvars.
Additional fact information can be collected based on the
configured set of arguments.
extends_documentation_fragment: junos
options:
config:
description:
- The C(config) argument instructs the fact module to collect
the configuration from the remote device. The configuration
is then included in return facts. By default, the configuration
is returned as text. The C(config_format) can be used to return
different Junos configuration formats.
required: false
default: null
config_format:
description:
- The C(config_format) argument is used to specify the desired
format of the configuration file. Devices support three
configuration file formats. By default, the configuration
from the device is returned as text. The other options include
set and xml. If the xml option is choosen, the configuration file
is returned as both xml and json.
required: false
default: text
choices: ['xml', 'text', 'set']
requirements:
- junos-eznc
notes:
- This module requires the netconf system service be enabled on
the remote device being managed
"""
EXAMPLES = """
# the required set of connection arguments have been purposely left off
# the examples for brevity
- name: collect default set of facts
junos_facts:
- name: collect default set of facts and configuration
junos_facts:
config: yes
- name: collect default set of facts and configuration in set format
junos_facts:
config: yes
config_format: set
- name: collect default set of facts and configuration in XML and JSON format
junos_facts:
config: yes
config_format: xml
"""
RETURN = """
ansible_facts:
descrption: Returns the facts collect from the device
returned: always
type: dict
"""
def main():
""" Main entry point for AnsibleModule
"""
spec = dict(
config=dict(type='bool'),
config_format=dict(default='text', choices=['xml', 'set', 'text']),
transport=dict(default='netconf', choices=['netconf'])
)
module = get_module(argument_spec=spec,
supports_check_mode=True)
result = dict(changed=False)
facts = module.get_facts()
if '2RE' in facts:
facts['has_2RE'] = facts['2RE']
del facts['2RE']
facts['version_info'] = dict(facts['version_info'])
if module.params['config'] is True:
config_format = module.params['config_format']
resp_config = module.get_config( config_format=config_format)
if config_format in ['text', 'set']:
facts['config'] = resp_config
elif config_format == "xml":
facts['config'] = xml_to_string(resp_config)
facts['config_json'] = xml_to_json(resp_config)
result['ansible_facts'] = facts
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.junos import *
if __name__ == '__main__':
main()
| gpl-3.0 | 4,739,418,881,217,886,000 | 30.546875 | 77 | 0.688212 | false |
381426068/MissionPlanner | Lib/encodings/unicode_internal.py | 103 | 1241 | """ Python 'unicode-internal' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.unicode_internal_encode
decode = codecs.unicode_internal_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.unicode_internal_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.unicode_internal_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='unicode-internal',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| gpl-3.0 | 7,888,333,760,609,532,000 | 25.577778 | 69 | 0.695407 | false |
lnielsen/invenio | invenio/legacy/bibformat/templates.py | 3 | 75703 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""HTML Templates for BibFormat administration"""
__revision__ = "$Id$"
# non Invenio imports
import cgi
from flask import url_for
# Invenio imports
from invenio.base.i18n import gettext_set_language
from invenio.config import CFG_SITE_URL, CFG_SITE_SECURE_URL
from invenio.base.i18n import language_list_long
MAX_MAPPINGS = 100 #show max this number of mappings on one page
class Template(object):
"""Templating class, refer to bibformat.py for examples of call"""
def tmpl_admin_index(self, ln, warnings, is_admin):
"""
Returns the main BibFormat admin page.
@param ln: language
@param warnings: a list of warnings to display at top of page. None if no warning
@param is_admin: indicate if user is authorized to use BibFormat
@return: main BibFormat admin page
"""
_ = gettext_set_language(ln) # load the right message language
out = ''
if warnings:
out += '''
<table width="66%%" class="errorbox" style="margin-left: auto; margin-right: auto;">
<tr>
<th class="errorboxheader">
%(warnings)s
</th>
</tr>
</table>
''' % {'warnings': '<br/>'.join(warnings)}
out += '''
<p>
This is where you can edit the formatting styles available for the records. '''
if not is_admin:
out += '''You need to
<a href="%(siteurl)s/youraccount/login?referer=%(siteurl)s/admin/bibformat/bibformatadmin.py">login</a> to enter.
''' % {'siteurl': CFG_SITE_URL}
out += '''
</p>
<dl>
<dt><a href="%(siteurl)s/admin/bibformat/bibformatadmin.py/format_templates_manage?ln=%(ln)s">Manage Format Templates</a></dt>
<dd>Define how to format a record.</dd>
</dl>
<dl>
<dt><a href="%(siteurl)s/admin/bibformat/bibformatadmin.py/output_formats_manage?ln=%(ln)s">Manage Output Formats</a></dt>
<dd>Define which template is applied to which record for a given output.</dd>
</dl>
<br/>
<dl>
<dt><a href="%(siteurl)s/admin/bibformat/bibformatadmin.py/format_elements_doc?ln=%(ln)s">Format Elements Documentation</a></dt>
<dd>Documentation of the format elements to be used inside format templates.</dd>
</dl>
<dl>
<dt><a href="%(siteurl)s/help/admin/bibformat-admin-guide">BibFormat Admin Guide</a></dt>
<dd>Documentation about BibFormat administration</dd>
</dl>
'''% {'siteurl': CFG_SITE_URL, 'ln': ln}
return out
def tmpl_admin_format_template_show_attributes(self, ln, name, description, filename, editable,
all_templates=[], new=False):
"""
Returns a page to change format template name and description
If template is new, offer a way to create a duplicate from an
existing template
@param ln: language
@param name: the name of the format
@param description: the description of the format
@param filename: the filename of the template
@param editable: True if we let user edit, else False
@param all_templates: a list of tuples (filename, name) of all other templates
@param new: if True, the format template has just been added (is new)
@return: editor for 'format'
"""
_ = gettext_set_language(ln) # load the right message language
out = ""
out += '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="format_templates_manage?ln=%(ln)s">%(close_editor)s</a></small> </td>
<td>1. <small><a href="format_template_show?ln=%(ln)s&bft=%(filename)s">%(template_editor)s</a></small> </td>
<td>2. <small>%(modify_template_attributes)s</small> </td>
<td>3. <small><a href="format_template_show_dependencies?ln=%(ln)s&bft=%(filename)s">%(check_dependencies)s</a></small> </td>
</tr>
</table><br/>
''' % {'ln': ln,
'menu': _("Menu"),
'filename': filename,
'close_editor': _("Close Editor"),
'modify_template_attributes': _("Modify Template Attributes"),
'template_editor': _("Template Editor"),
'check_dependencies': _("Check Dependencies")
}
disabled = ""
readonly = ""
if not editable:
disabled = 'disabled="disabled"'
readonly = 'readonly="readonly"'
out += '''
<form action="format_template_update_attributes?ln=%(ln)s&bft=%(filename)s" method="POST">
''' % {'ln': ln,
'filename': filename}
if new:
#Offer the possibility to make a duplicate of existing format template code
out += '''
<table><tr>
<th class="adminheaderleft">Make a copy of format template: [<a href="%(siteurl)s/help/admin/bibformat-admin-guide#addFormatTemplate">?</a>]</th>
</tr>
<tr>
<td><select tabindex="1" name="duplicate" id="duplicate" %(readonly)s>
<option value="">None (Blank Page)</option>
<option value="" disabled="disabled">-------------</option>
''' % {'siteurl': CFG_SITE_URL,
'readonly': readonly}
for o_filename, o_name in all_templates:
out += '''<option value="%(template_filename)s">%(template_name)s</option>''' % {'template_name': o_name,
'template_filename': o_filename}
out += ''' </select>
</td></tr></table>'''
out += '''
<table><tr>
<th colspan="2" class="adminheaderleft">%(name)s attributes [<a href="%(siteurl)s/help/admin/bibformat-admin-guide#attrsFormatTemplate">?</a>]</th>
</tr>
<tr>
<td class="admintdright">
<input type="hidden" name="key" value="%(name)s"/>
<label for="name">%(name_label)s</label>: </td>
<td><input tabindex="2" name="name" type="text" id="name" size="25" value="%(name)s" %(readonly)s/>
<input type="hidden" value="%(filename)s"/>
</td>
</tr>
''' % {'name': name,
'filename': filename,
'readonly': readonly,
'name_label': _("Name"),
'siteurl': CFG_SITE_URL
}
out += '''
<tr>
<td class="admintdright" valign="top"><label for="description">%(description_label)s</label>: </td>
<td><textarea tabindex="3" name="description" id="description" rows="4" cols="25" %(readonly)s>%(description)s</textarea> </td>
</tr>
<tr>
<td> </td>
<td align="right"><input tabindex="6" class="adminbutton" type="submit" value="%(update_format_attributes)s" %(disabled)s/></td>
</tr>
</table></form>
''' % {"description": description,
'disabled': disabled,
'readonly': readonly,
'description_label': _("Description"),
'update_format_attributes': _("Update Format Attributes"),
}
return out
def tmpl_admin_format_template_show_dependencies(self, ln, name, filename, output_formats, format_elements, tags):
"""
Shows the dependencies (on elements) of the given format.
@param ln: language
@param name: the name of the template
@param filename: the filename of the template
@param format_elements: the elements (and list of tags in each element) this template depends on
@param output_formats: the output format that depend on this template
@param tags: the tags that are called by format elements this template depends on.
@return: HTML markup
"""
_ = gettext_set_language(ln) # load the right message language
out = '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="format_templates_manage?ln=%(ln)s">%(close_editor)s</a> </small></td>
<td>1. <small><a href="format_template_show?ln=%(ln)s&bft=%(filename)s">%(template_editor)s</a></small> </td>
<td>2. <small><a href="format_template_show_attributes?ln=%(ln)s&bft=%(filename)s">%(modify_template_attributes)s</a></small> </td>
<td>3. <small>%(check_dependencies)s</small> </td>
</tr>
</table>
<table width="90%%" class="admin_wvar" cellspacing="0"><tr>
<th class="adminheaderleft">Output Formats that use %(name)s</th>
<th class="adminheaderleft">Format Elements used by %(name)s*</th>
<th class="adminheaderleft">All Tags Called*</th>
</tr>
<tr>
<td valign="top"> <br/>
''' % {'ln': ln,
'filename': filename,
'menu': _("Menu"),
'close_editor': _("Close Editor"),
'modify_template_attributes': _("Modify Template Attributes"),
'template_editor': _("Template Editor"),
'check_dependencies': _("Check Dependencies"),
'name': name}
#Print output formats
if len(output_formats) == 0:
out += '<p align="center"><i>No output format uses this format template.</i></p>'
for output_format in output_formats:
name = output_format['names']['generic']
filename = output_format['filename']
out += ''' <a href="output_format_show?ln=%(ln)s&bfo=%(filename)s">%(name)s</a>''' % {'filename': filename,
'name': name,
'ln': ln}
if len(output_format['tags']) > 0:
out += "("+", ".join(output_format['tags'])+")"
out += "<br/>"
#Print format elements (and tags)
out += '</td><td valign="top"> <br/>'
if len(format_elements) == 0:
out += '<p align="center"><i>This format template uses no format element.</i></p>'
for format_element in format_elements:
name = format_element['name']
out += ''' <a href="format_elements_doc?ln=%(ln)s#%(anchor)s">%(name)s</a>''' % {'name': "bfe_"+name.lower(),
'anchor': name.upper(),
'ln': ln}
if len(format_element['tags']) > 0:
out += "("+", ".join(format_element['tags'])+")"
out += "<br/>"
#Print tags
out += '</td><td valign="top"> <br/>'
if len(tags) == 0:
out += '<p align="center"><i>This format template uses no tag.</i></p>'
for tag in tags:
out += '''%(tag)s<br/>''' % {'tag': tag}
out += '''
</td>
</tr>
</table>
<b>*Note</b>: Some tags linked with this format template might not be shown. Check manually.
'''
return out
def tmpl_admin_format_template_show(self, ln, code, filename,
ln_for_preview, pattern_for_preview,
editable, content_type_for_preview,
content_types):
"""
Returns the editor for format templates. Edit format with given X{name}
@param ln: language
@param name: the format to edit
@param description: the description of the format template
@param code: the code of the template of the editor
@param filename: the filename of the template
@param ln_for_preview: the language for the preview (for bfo)
@param pattern_for_preview: the search pattern to be used for the preview (for bfo)
@param editable: True if we let user edit, else False
@param content_type_for_preview: content-type to use for preview
@param content_types: list of available content-types
@return: editor for 'format'
"""
_ = gettext_set_language(ln) # load the right message language
out = ""
# If xsl, hide some options in the menu
nb_menu_options = 4
if filename.endswith('.xsl'):
nb_menu_options = 2
out += '''
<style type="text/css">
<!--
.ed_button {
font-size: x-small;
}
-->
</style>
<script src="%(quicktags)s" type="text/javascript"></script>
<script type="text/javascript">
/* Ask user confirmation before leaving page */
var user_must_confirm_before_leaving_page = false;
window.onbeforeunload = confirmExit;
function confirmExit() {
if (user_must_confirm_before_leaving_page)
return "%(leave_editor_message)s";
}
function getByID( id ) {
if (document.getElementById)
var returnVar = document.getElementById(id);
else if (document.all)
var returnVar = document.all[id];
else if (document.layers)
var returnVar = document.layers[id];
return returnVar;
}
window.onresize= resizeViews;
window.onload= prepareLayout;
function prepareLayout(){
resizeViews();
}
function resizeViews(){
var myWidth = 0, myHeight = 0;
if( typeof( window.innerWidth ) == 'number' ) {
//Non-IE
myWidth = window.innerWidth;
myHeight = window.innerHeight;
} else if( document.documentElement && ( document.documentElement.clientWidth || document.documentElement.clientHeight ) ) {
//IE 6+ in 'standards compliant mode'
myWidth = document.documentElement.clientWidth;
myHeight = document.documentElement.clientHeight;
} else if( document.body && ( document.body.clientWidth || document.body.clientHeight ) ) {
//IE 4 compatible
myWidth = document.body.clientWidth;
myHeight = document.body.clientHeight;
}
if (myHeight <= 400) {
getByID("code").style.height=10;
getByID("previewiframe").style.height=10;
} else{
getByID("code").style.height=((myHeight-400)/2);
getByID("previewiframe").style.height=((myHeight-400)/2);
}
getByID("previewiframe").style.height=200;
// Resize documentation
var height = document.documentElement.clientHeight;
height -= getByID('shortDocFrame').offsetTop
//height -= 20;
getByID('shortDocFrame').style.height = height +"px";
}
</script>
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="%(nb_menu_options)s" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="format_templates_manage?ln=%(ln)s">%(close_editor)s</a></small> </td>
<td>1. <small>%(template_editor)s</small> </td>
''' % {'ln': ln,
'menu': _("Menu"),
'close_editor': _("Close Editor"),
'template_editor': _("Template Editor"),
'nb_menu_options': nb_menu_options,
'siteurl': CFG_SITE_SECURE_URL or CFG_SITE_URL,
'leave_editor_message': _('Your modifications will not be saved.').replace('"', '\\"'),
'quicktags': url_for('formatter.static',
filename='js/formatter/quicktags.js'),
}
if not filename.endswith('.xsl'):
out +='''<td>2. <small><a href="format_template_show_attributes?ln=%(ln)s&bft=%(filename)s">%(modify_template_attributes)s</a></small> </td>
<td>3. <small><a href="format_template_show_dependencies?ln=%(ln)s&bft=%(filename)s">%(check_dependencies)s</a></small> </td>
''' % {'ln': ln,
'filename': filename,
'modify_template_attributes': _("Modify Template Attributes"),
'check_dependencies': _("Check Dependencies"),
}
out +='''
</tr>
</table>
<script type="text/javascript">
function toggle_doc_visibility(){
var doc = document.getElementById('docTable');
var link = document.getElementById('docLink');
if (doc.style.display=='none'){
doc.style.display = '';
link.innerHTML = "%(label_hide_doc)s"
} else {
doc.style.display = 'none';
link.innerHTML = "%(label_show_doc)s"
}
}
</script>
''' % {'label_show_doc': _("Show Documentation"),
'label_hide_doc': _("Hide Documentation"),
}
disabled = ""
readonly = ""
toolbar = """<script type="text/javascript">edToolbar('%s/admin/bibformat/bibformatadmin.py/format_elements_doc?ln=%s');</script>""" % (CFG_SITE_URL, ln)
if not editable:
disabled = 'disabled="disabled"'
readonly = 'readonly="readonly"'
toolbar = ''
#First column: template code and preview
out += '''
<table width="90%%" cellspacing="5">
<tr>
<td valign="top">
<form action="format_template_show_preview_or_save?ln=%(ln)s&bft=%(filename)s" method="POST" target="previewiframe">
<table width="100%%" id="mainTable"><tr>
<th class="adminheaderleft"><div style="float:left;">Format template code</div>
<div style="float:right;">
<a id="docLink" href="#" onclick="toggle_doc_visibility()">%(label_hide_doc)s</a>
</div>
</th>
</tr>
<tr><td colspan="2" id="codetd">
%(toolbar)s
<textarea name="code" id="code" rows="25" %(readonly)s
style="width:100%%" onchange="user_must_confirm_before_leaving_page=true;">%(code)s</textarea>
<script type="text/javascript">var edCanvas = document.getElementById('code');</script>
</td></tr>
<tr><td align="right" valign="top">
<input type="submit" class="adminbutton" name="save_action" value="Save Changes" onclick="user_must_confirm_before_leaving_page=false;" %(disabled)s/>
</td>
</tr>
</table>
<table width="100%%">
<tr><th class="adminheaderleft">
Preview
</th>
</tr>
<tr><td align="right" valign="top" style="font-size: small;">
<nobr>
<label for="content_type_for_preview">Content-type (MIME):</label> <select id="content_type_for_preview" name="content_type_for_preview" style="font-size: x-small;">
''' % {'ln': ln,
'filename': filename,
'label_hide_doc': _("Hide Documentation"),
'code': code,
'readonly': readonly,
'disabled': disabled,
'toolbar': toolbar}
for content_type in content_types:
if content_type == content_type_for_preview:
out += '''<option value="%(content_type)s" selected="selected">%(content_type)s</option>''' % {'content_type': content_type}
else:
out += '''<option value="%(content_type)s">%(content_type)s</option>''' % {'content_type': content_type}
out += '''
</select></nobr>
<nobr><label for="ln_for_preview">Language:</label> <select id="ln_for_preview" name="ln_for_preview" style="font-size: x-small;">
'''
for lang in language_list_long():
if lang[0] == ln_for_preview:
out += '''<option value="%(ln)s" selected="selected">%(language)s</option>''' % {'ln': lang[0],
'language': lang[1]}
else:
out += '''<option value="%(ln)s">%(language)s</option>''' % {'ln': lang[0], 'language': lang[1]}
out += '''
</select></nobr>
<nobr><label for="pattern_for_preview">Search Pattern: </label><input type="text" value="%(pattern_for_preview)s" size="8" name="pattern_for_preview" id="pattern_for_preview" style="font-size: x-small;"/></nobr>
<input type="submit" class="adminbutton" name="preview_action" value="Reload Preview"/>
</td>
</tr>
<tr><td>
<iframe src ="%(siteurl)s/admin/bibformat/bibformatadmin.py/format_template_show_preview_or_save?ln=%(ln)s&ln_for_preview=%(ln_for_preview)s&pattern_for_preview=%(pattern_for_preview)s&bft=%(filename)s" name="previewiframe" id="previewiframe" width="100%%" height="400"></iframe>
</td></tr>
</table>
</form>
</td>
''' % {'ln': ln,
'siteurl': CFG_SITE_URL, 'filename': filename,
'ln_for_preview': ln_for_preview,
'pattern_for_preview': pattern_for_preview
}
#Second column Print documentation
out += '''
<td valign="top" id="docTable">
<table width="100%%"><tr>
<th class="adminheaderleft">Elements Documentation</th>
</tr>
</table>
<table width="100%%"><tr>
<td class="admintdright">
<form action="format_template_show_short_doc?ln=%(ln)s" method="POST" target="shortDocFrame">
<nobr><label for="search_doc_pattern">Search for: </label><input type="text" size="15" name="search_doc_pattern" id="search_doc_pattern" value=""/> <input type="submit" class="adminbutton" name="search_in_doc" value="Search" /></nobr>
</form>
</td>
</tr>
</table>
<iframe name="shortDocFrame" id="shortDocFrame" src ="%(siteurl)s/admin/bibformat/bibformatadmin.py/format_template_show_short_doc?ln=%(ln)s" height="90%%" width="98%%"></iframe>
</td>
</tr>
</table>
''' % {'siteurl': CFG_SITE_URL, 'ln': ln}
return out
def tmpl_admin_format_template_show_short_doc(self, format_elements):
"""
Prints the format element documentation in a condensed way to display
inside format template editor.
This page is different from others: it is displayed inside a <iframe>
tag in template tmpl_admin_format_template_show.
@param ln: language
@param format_elements: a list of format elements structures as returned by get_format_elements
@return: HTML markup
"""
out = '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>BibFormat Short Documentation of Format Elements</title>
<link rel="stylesheet" href="%(siteurl)s/img/invenio.css">
<script src="%(quicktags)s" type="text/javascript"></script>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head>
<body>
<script type="text/javascript">
function toggle_visibility(element, show, r,g,b){
var children = element.childNodes
var child
for(x=0; x<children.length; x++){
if (children[x].id == 'params'){
child = children[x]
}
}
if (show=='show'){
element.style.background='rgb(201, 218, 255)'
element.style.cursor='pointer'
child.style.display=''
} else {
element.style.background="rgb("+r+","+g+","+b+")"
child.style.display='none'
}
}
///// FROM JS QuickTags ///////
// Copyright (c) 2002-2005 Alex King
// http://www.alexking.org/
//
// Licensed under the LGPL license
// http://www.gnu.org/copyleft/lesser.html
function insertAtCursor(myField, myValue) {
//IE support
if (document.selection) {
myField.focus();
sel = document.selection.createRange();
sel.text = myValue;
}
//MOZILLA/NETSCAPE support
else if (myField.selectionStart || myField.selectionStart == '0') {
var startPos = myField.selectionStart;
var endPos = myField.selectionEnd;
myField.value = myField.value.substring(0, startPos)
+ myValue
+ myField.value.substring(endPos, myField.value.length);
} else {
myField.value += myValue;
}
}
///// END FROM JS QuickTags /////
function insert_my_code_into_container(code){
var codeArea = parent.document.getElementById("code");
if (codeArea.readOnly == false){
//var clean_code = code.replace(=#,'="');
//clean_code = clean_code.replace(# ,'" ');
insertAtCursor(codeArea, code);
}
}
</script>
''' % {
'siteurl': CFG_SITE_SECURE_URL or CFG_SITE_URL,
'quicktags': url_for('formatter.static',
filename='js/formatter/quicktags.js')
}
if len(format_elements) == 0:
out += '''
<em>No format elements found</em>
'''
else:
line = 0
#Print elements doc
for format_element in format_elements:
format_attributes = format_element['attrs']
row_content = ""
name = format_attributes['name']
description = format_attributes['description']
params = [x['name'] + '=\u0022'+str(x['default'])+'\u0022' for x in format_attributes['params']]
builtin_params = [x['name'] + '=\u0022'+str(x['default'])+'\u0022' for x in format_attributes['builtin_params']]
code = "<BFE_" + name + ' ' + ' '.join(builtin_params)+ ' ' + ' '.join(params) +"/>"
if line % 2:
row_content += '''<div onmouseover="toggle_visibility(this, 'show', 235, 247, 255);"
onmouseout="toggle_visibility(this, 'hide', 235, 247, 255);"
style="background-color: rgb(235, 247, 255);"
onclick="insert_my_code_into_container('%s')"
><hr/>''' % code
else:
row_content += '''<div onmouseover="toggle_visibility(this, 'show', 255, 255, 255);"
onmouseout="toggle_visibility(this, 'hide', 255, 255, 255);"
onclick="insert_my_code_into_container('%s')"
>''' % code
row_content += '''
<code> <b><BFE_%(name)s/></b><br/></code>
<small>%(description)s.</small>
<div id="params" style="display:none;">
<ul>
''' % {'name': name, 'description': description}
for param in format_attributes['params']:
row_content += '''
<li><small><b>%(name)s</b>: %(description)s</small></li>
''' % {'name': param['name'],
'description': param['description']}
for param in format_attributes['builtin_params']:
row_content += '''
<li><small><b>%(name)s</b>: %(description)s</small></li>
''' % {'name': param['name'],
'description': param['description']}
row_content += '</ul></div>'
if line % 2:
row_content += '''<hr/></div>'''
else:
row_content += '</div>'
line += 1
out += row_content
out += '''</body></html>'''
return out
def tmpl_admin_format_templates_management(self, ln, formats):
"""
Returns the management console for formats. Includes list of formats and
associated administration tools.
@param ln: language
@param formats: a list of dictionaries with formats attributes
@return: format management console as html
"""
_ = gettext_set_language(ln) # load the right message language
#top of the page and table header
out = '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small>%(manage_format_templates)s</small> </td>
<td>1. <small><a href="output_formats_manage?ln=%(ln)s">%(manage_output_formats)s</a> </td>
<td>2. <small><a href="format_elements_doc?ln=%(ln)s">%(format_elements_documentation)s</a></small> </td>
</tr>
</table>
<p>From here you can create, edit or delete formats templates.
Have a look at the <a href="format_elements_doc?ln=%(ln)s">format elements documentation</a> to
learn which elements you can use in your templates.</p>
<table class="admin_wvar" width="95%%" cellspacing="0">
<tr>
<th class="adminheaderleft" > </th>
<th class="adminheaderleft" >%(name)s</th>
<th class="adminheaderleft" >%(description)s</th>
<th class="adminheaderleft" >%(status)s</th>
<th class="adminheaderleft" >%(last_modification_date)s</th>
<th class="adminheadercenter" >%(action)s [<a href="%(siteurl)s/help/admin/bibformat-admin-guide#formatTemplates">?</a>]</th>
</tr>
''' % {'name': _("Name"),
'description': _("Description"),
'menu': _("Menu"),
'status': _("Status"),
'last_modification_date': _("Last Modification Date"),
'action': _("Action"),
'ln': ln,
'manage_output_formats': _("Manage Output Formats"),
'manage_format_templates': _("Manage Format Templates"),
'format_elements_documentation': _("Format Elements Documentation"),
'siteurl': CFG_SITE_URL}
#table content: formats names, description and buttons
if len(formats) == 0:
out += '''<tr>
<td colspan="6" class="admintd" align="center"><em>No format</em></td>
</tr>'''
else:
line = 0
for attrs in formats:
filename = attrs['filename']
if filename == "":
filename = " "
name = attrs['name']
if name == "":
name = " "
description = attrs['description']
if description == "":
description = " "
last_mod_date = attrs['last_mod_date']
status = attrs['status']
disabled = ""
if not attrs['editable']:
disabled = 'disabled="disabled"'
style = 'style="vertical-align: middle;'
if line % 2:
style = 'style="vertical-align: middle;background-color: rgb(235, 247, 255);'
line += 1
row_content = '''<tr>
<td class="admintdright" %(style)s"> </td>
<td class="admintdleft" %(style)s white-space: nowrap;"><a href="format_template_show?bft=%(filename)s&ln=%(ln)s">%(name)s</a></td>
<td class="admintdleft" %(style)s" >%(description)s</td>
<td class="admintdleft" %(style)s white-space: nowrap;" >%(status)s</td>
<td class="admintdleft" %(style)s white-space: nowrap;" >%(last_mod_date)s</td>
<td class="admintd" %(style)s white-space: nowrap;">
<form method="post" action="format_template_delete?ln=%(ln)s&bft=%(filename)s">
<input class="adminbutton" type="submit" value="%(delete)s" %(disabled)s/>
</form>
</td>
</tr>
''' % {'filename': filename,
'name': name,
'description': description,
'ln': ln,
'style': style,
'disabled': disabled,
'last_mod_date': last_mod_date,
'status': status,
'delete': _("Delete")
}
out += row_content
#table footer, buttons and bottom of the page
out += '''
<tr>
<td align="left" colspan="3">
<form action="format_templates_manage?ln=%(ln)s">
<input type="hidden" name="checking" value="1"></input>
<input class="adminbutton" type="submit" value="%(extensive_checking)s"/>
</form>
</td>
<td align="right" colspan="3">
<form action="format_template_add?ln=%(ln)s">
<input class="adminbutton" type="submit" value="%(add_format_template)s"/>
</form>
</td>
</tr>
</table>
''' % {'ln': ln,
'add_format_template': _("Add New Format Template"),
'extensive_checking': _("Check Format Templates Extensively")}
return out
def tmpl_admin_output_formats_management(self, ln, output_formats):
"""
Returns the main management console for formats. Includes list of formats and
associated administration tools.
@param ln: language
@param output_formats: a list of output formats
@return: main management console as html
"""
_ = gettext_set_language(ln) # load the right message language
#top of the page and table header
out = '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="format_templates_manage?ln=%(ln)s">%(manage_format_templates)s</a></small> </td>
<td>1. <small>%(manage_output_formats)s</small> </td>
<td>2. <small><a href="format_elements_doc?ln=%(ln)s">%(format_elements_documentation)s</a></small> </td>
</tr>
</table>
<p>From here you can add, edit or delete output formats available for collections. Output formats define which template to use. <br/>To edit templates go to the <a href="format_templates_manage?ln=%(ln)s">template administration page</a>.</p>
<table class="admin_wvar" width="95%%" cellspacing="0">
<tr>
<th class="adminheaderleft" > </th>
<th class="adminheaderleft" ><a href="output_formats_manage?ln=%(ln)s&sortby=code">%(code)s</a></th>
<th class="adminheaderleft" ><a href="output_formats_manage?ln=%(ln)s&sortby=name">%(name)s</a></th>
<th class="adminheaderleft" >%(description)s</th>
<th class="adminheaderleft" >%(status)s</th>
<th class="adminheaderleft" >%(last_modification_date)s</th>
<th class="adminheadercenter" >%(action)s [<a href="%(siteurl)s/help/admin/bibformat-admin-guide#outputFormats">?</a>]</th>
</tr>
''' % {'code': _("Code"),
'name': _("Name"),
'description': _("Description"),
'status': _("Status"),
'last_modification_date': _("Last Modification Date"),
'action': _("Action"),
'ln': ln,
'manage_output_formats': _("Manage Output Formats"),
'manage_format_templates': _("Manage Format Templates"),
'format_elements_documentation': _("Format Elements Documentation"),
'menu': _("Menu"),
'siteurl': CFG_SITE_URL}
#table content: formats names, description and buttons
if len(output_formats) == 0:
out += '''<tr>
<td colspan="5" class="admintd" align="center"><em>No format</em></td>
</tr>'''
else:
line = 0
for output_format in output_formats:
format_attributes = output_format['attrs']
name = format_attributes['names']['generic']
if name == "":
name = " "
description = format_attributes['description']
if description == "":
description = " "
code = format_attributes['code']
if code == "":
code = " "
last_mod_date = output_format['last_mod_date']
status = output_format['status']
disabled = ""
if not output_format['editable']:
disabled = 'disabled="disabled"'
style = "vertical-align: middle;"
if line % 2:
style = 'vertical-align: middle; background-color: rgb(235, 247, 255);'
line += 1
row_content = '''<tr>
<td class="admintdright" style="%(style)s"> </td>
<td class="admintdleft" style="white-space: nowrap; %(style)s">
<a href="output_format_show?bfo=%(code)s">%(code)s</a>
</td>
<td class="admintdleft" style="white-space: nowrap; %(style)s">
<a href="output_format_show?bfo=%(code)s">%(name)s</a>
</td>
<td class="admintdleft"style="%(style)s" >
%(description)s
</td>
<td class="admintd" style="white-space: nowrap; %(style)s" >%(status)s</td>
<td class="admintdleft" style="white-space: nowrap;%(style)s" >%(last_mod_date)s</td>
<td class="admintd" style="white-space: nowrap; %(style)s">
<form method="POST" action="output_format_delete?ln=%(ln)s&bfo=%(code)s">
<input class="adminbutton" type="submit" value="Delete" %(disabled)s />
</form>
</td>
</tr>
''' % {'style': style,
'code': code,
'description': description,
'name': name,
'ln': ln,
'disabled': disabled,
'last_mod_date': last_mod_date,
'status': status}
out += row_content
#table footer, buttons and bottom of the page
out += '''
<tr>
<td align="right" colspan="7">
<form method="GET" action="output_format_add?ln=%(ln)s">
<input class="adminbutton" type="submit" value="%(add_output_format)s"/>
</form>
</td>
</tr>
</table>
''' % {'ln': ln,
'add_output_format': _("Add New Output Format")}
return out
def tmpl_admin_output_format_show(self, ln, code, rules, default,
format_templates, editable):
"""
Returns the content of an output format
rules is an ordered list of dict (sorted by evaluation order),
with keys 'field', 'value' and 'template'
IMPORTANT: we display rules evaluation index starting at 1 in
interface, but we start internally at 0
@param ln: language
@param code: the code of the output to show
@param name: the name of this output format
@param rules: the list of rules for this output format
@param default: the default format template of the output format
@param format_templates: the list of format_templates
@param editable: True if we let user edit, else False
@return: the management console for this output format
"""
_ = gettext_set_language(ln)
out = '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="output_formats_manage?ln=%(ln)s">%(close_output_format)s</a></small> </td>
<td>1. <small>%(rules)s</small> </td>
<td>2. <small><a href="output_format_show_attributes?ln=%(ln)s&bfo=%(code)s">%(modify_output_format_attributes)s</a></small> </td>
<td>3. <small><a href="output_format_show_dependencies?ln=%(ln)s&bfo=%(code)s">%(check_dependencies)s</a></small> </td>
</tr>
</table>
<p>Define here the rules the specifies which template to use for a given record.</p>
''' % {'code': code,
'ln': ln,
'menu': _("menu"),
'close_output_format': _("Close Output Format"),
'rules': _("Rules"),
'modify_output_format_attributes': _("Modify Output Format Attributes"),
'check_dependencies': _("Check Dependencies")
}
out += '''
<form name="rules" action="output_format_show?ln=%(ln)s&bfo=%(code)s" method="post">
<table>
<tr>
<td>
''' % {'ln': ln, 'code': code}
disabled = ""
readonly = ""
if not editable:
disabled = 'disabled="disabled"'
readonly = 'readonly="readonly"'
if len(rules) == 0:
out += '''<p align="center"><em>No special rule</em></p>'''
line = 1
for rule in rules:
out += '''
<table align="center" class="admin_wvar" cellspacing="0">
<tr>
'''
out += '''
<td rowspan="2" class="adminheader" style="vertical-align: middle;">'''
if line > 1:
out += '''
<input type="image" src="%(siteurl)s/img/smallup.gif" alt="Increase priority of rule %(row)s" name="+ %(row)s" value="+ %(row)s" %(disabled)s/></div>
''' % {'siteurl': CFG_SITE_URL, 'row': line, 'disabled': disabled}
out += '''<div>%(row)s</div>''' % {'row': line}
if line < len(rules):
out += '''
<input type="image" src="%(siteurl)s/img/smalldown.gif" alt="Decrease priority of rule %(row)s" name="- %(row)s" value="- %(row)s" %(disabled)s/>
''' % {'siteurl': CFG_SITE_URL,
'row': line,
'disabled': disabled}
out += '''</td>
<td class="adminheaderleft"> </td>
'''
out += '''
<td class="adminheaderleft" style="white-space: nowrap;">
Use template <select name="r_tpl" %(disabled)s>''' % {'disabled': disabled}
for template in format_templates:
attrs = format_templates[template]['attrs']
attrs['template'] = template
if template.endswith('.xsl') and not \
attrs['name'].endswith(' (XSL)'):
attrs['name'] += ' (XSL)'
if template != rule['template']:
out += '''<option value="%(template)s">%(name)s</option>''' % attrs
else:
out += '''<option value="%(template)s" selected="selected">%(name)s</option>''' % attrs
if rule['template'] not in format_templates and rule['template'] != "":
#case where a non existing format template is use in output format
#we need to add it as option
out += '''<option value="%s" selected="selected">%s</option>''' % (rule['template'],
rule['template'])
out += '''</select> if field
<input type="text" name="r_fld" value="%(field)s" size="10" %(readonly)s/> is equal to <input type="text" value="%(value)s" name="r_val" %(readonly)s/>
</td>
<td class="adminheaderright" style="vertical-align: middle;">
[<a href="%(siteurl)s/help/admin/bibformat-admin-guide#rulesOutputFormat">?</a>]
</td>
</tr>
''' % {'siteurl': CFG_SITE_URL,
'field': rule['field'],
'value': rule['value'],
'readonly': readonly}
out += '''
<tr>
<td colspan ="3" class="adminheaderright" style="vertical-align: middle; white-space: nowrap;">
<input type="submit" class="adminbutton" name="r_upd" value="%(remove_rule_label)s %(row)s" %(disabled)s/>
</td>
</tr>
</table>
''' % {'remove_rule_label': _("Remove Rule"),
'row': line,
'disabled': disabled}
line += 1
out += '''
<table width="100%" align="center" class="admin_wvar" cellspacing="0">
<tr>
'''
out += '''
<td width="30" class="adminheaderleft"> </td>
<td class="adminheaderleft">By default use <select id="default" name="default" %(disabled)s>''' % {'disabled': disabled}
for template in format_templates:
attrs = format_templates[template]['attrs']
attrs['template'] = template
if template.endswith('.xsl') and not \
attrs['name'].endswith(' (XSL)'):
attrs['name'] += ' (XSL)'
if template != default:
out += '''<option value="%(template)s">%(name)s</option>''' % attrs
else:
out += '''<option value="%(template)s" selected="selected">%(name)s</option>''' % attrs
if default not in format_templates and default != "":
#case where a non existing format tempate is use in output format
#we need to add it as option (only if it is not empty string)
out += '''<option value="%s" selected="selected">%s</option>''' % (default, default)
out += '''</select></td>
</tr>
</table>
<div align="right">
<input tabindex="6" class="adminbutton" type="submit" name="r_upd" value="%(add_new_rule_label)s" %(disabled)s/>
<input tabindex="7" class="adminbutton" type="submit" name="r_upd" value="%(save_changes_label)s" %(disabled)s/>
</div>
</td>
</tr>
</table>
</form>
''' % {'add_new_rule_label': _("Add New Rule"),
'save_changes_label': _("Save Changes"),
'disabled': disabled
}
return out
def tmpl_admin_output_format_show_attributes(self, ln,
name,
description,
content_type,
code,
names_trans,
editable,
visible):
"""
Returns a page to change output format name and description
names_trans is an ordered list of dicts with keys 'lang' and 'trans'
@param ln: language
@param name: the name of the format
@param description: the description of the format
@param code: the code of the format
@param content_type: the (MIME) content type of the ouput format
@param names_trans: the translations in the same order as the languages from get_languages()
@param editable: True if we let user edit, else False
@param visible: True if output format should be shown in list of available output formats
@return: editor for output format attributes
"""
_ = gettext_set_language(ln) # load the right message language
out = ""
out += '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="output_formats_manage?ln=%(ln)s">%(close_output_format)s</a></small> </td>
<td>1. <small><a href="output_format_show?ln=%(ln)s&bfo=%(code)s">%(rules)s</a></small> </td>
<td>2. <small>%(modify_output_format_attributes)s</small> </td>
<td>3. <small><a href="output_format_show_dependencies?ln=%(ln)s&bfo=%(code)s">%(check_dependencies)s</a></small> </td>
</tr>
</table><br/>
''' % {'ln': ln,
'code': code,
'close_output_format': _("Close Output Format"),
'rules': _("Rules"),
'modify_output_format_attributes': _("Modify Output Format Attributes"),
'check_dependencies': _("Check Dependencies"),
'menu': _("Menu")
}
disabled = ""
readonly = ""
if not editable:
disabled = 'disabled="disabled"'
readonly = 'readonly="readonly"'
out += '''
<form action="output_format_update_attributes?ln=%(ln)s&bfo=%(code)s" method="POST">
<table class="admin_wvar" cellspacing="0">
<tr>
<th colspan="2" class="adminheaderleft">
Output Format Attributes [<a href="%(siteurl)s/help/admin/bibformat-admin-guide#attrsOutputFormat">?</a>]</th>
</tr>
<tr>
<td class="admintdright"><label for="outputFormatCode">Code</label>: </td>
<td><input tabindex="0" name="code" type="text" id="outputFormatCode" maxlength="6" size="6" value="%(code)s" %(readonly)s/></td>
</tr>
<tr>
<td class="admintdright">Visibility: </td>
<td><input tabindex="1" name="visibility" type="checkbox" id="outputFormatVisibility" %(visibility)s %(disabled)s value="1" /><small><label for="outputFormatVisibility">Show in list of available output formats (on public pages)</label></small></td>
</tr>
<td class="admintdright"><label for="outputFormatContentType">Content type</label>: </td>
<td><input tabindex="2" name="content_type" type="text" id="outputFormatContentType" size="25" value="%(content_type)s" %(readonly)s/> <small>Mime content-type. Specifies how the browser should handle this output.</small></td>
<tr>
<td class="admintdright"><label for="outputFormatName">Name</label>: </td>
<td><input tabindex="3" name="name" type="text" id="outputFormatName" size="25" value="%(name)s" %(readonly)s/></td>
</tr>
''' % {'name': name,
'ln': ln,
'code': code,
'content_type': content_type,
'readonly': readonly,
'siteurl': CFG_SITE_URL,
'visibility': visible == 1 and 'checked="checked"' or '',
'disabled': disabled}
#Add translated names
i = 3
for name_trans in names_trans:
i += 1
out += '''
<tr>
<td class="admintdright"><label for="outputFormatName%(i)s">%(lang)s Name</label>: </td>
<td><input tabindex="%(i)s" name="names_trans" type="text" id="outputFormatName%(i)s" size="25" value="%(name)s" %(readonly)s/></td>
</tr>''' % {'name': name_trans['trans'],
'lang': name_trans['lang'],
'i': i,
'readonly': readonly}
#Description and end of page
out += '''
<tr>
<td class="admintdright" valign="top"><label for="outputFormatDescription">Description</label>: </td>
<td><textarea tabindex="%(tabindexdesc)s" name="description" id="outputFormatDescription" rows="4" cols="25" %(readonly)s>%(description)s</textarea> </td>
</tr>
<tr>
<td colspan="2" align="right"><input tabindex="%(tabindexbutton)s" class="adminbutton" type="submit" value="Update Output Format Attributes" %(disabled)s/></td>
</tr>
</table>
</form>
''' % {'description': description,
'tabindexdesc': i + 1,
'tabindexbutton': i + 2,
'readonly': readonly,
'disabled': disabled}
return out
def tmpl_admin_output_format_show_dependencies(self, ln, name, code, format_templates):
"""
Shows the dependencies of the given format.
@param ln: language
@param name: the name of the output format
@param code: the code of the output format
@param format_templates: format templates that depend on this format (and also elements and tags)
@return: HTML markup
"""
_ = gettext_set_language(ln) # load the right message language
out = '''
<table class="admin_wvar">
<tr><th colspan="4" class="adminheaderleft" cellspacing="0">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="output_formats_manage?ln=%(ln)s">%(close_output_format)s</a></small> </td>
<td>1. <small><a href="output_format_show?ln=%(ln)s&bfo=%(code)s">%(rules)s</a></small> </td>
<td>2. <small><a href="output_format_show_attributes?ln=%(ln)s&bfo=%(code)s">%(modify_output_format_attributes)s</a></small> </td>
<td>3. <small>%(check_dependencies)s</small> </td>
</tr>
</table><br/>
<table width="90%%" class="admin_wvar" cellspacing="0"><tr>
<th class="adminheaderleft">Format Templates that use %(name)s</th>
<th class="adminheaderleft">Format Elements used by %(name)s</th>
<th class="adminheaderleft">Tags Called*</th>
</tr>
''' % {'name': name,
'code': code,
'ln': ln,
'close_output_format': _("Close Output Format"),
'rules': _("Rules"),
'modify_output_format_attributes': _("Modify Output Format Attributes"),
'check_dependencies': _("Check Dependencies"),
'menu': _("Menu")
}
if len(format_templates) == 0:
out += '''<tr><td colspan="3"><p align="center">
<i>This output format uses no format template.</i></p></td></tr>'''
for format_template in format_templates:
name = format_template['name']
filename = format_template['filename']
out += '''<tr><td><a href="format_template_show?bft=%(filename)s&ln=%(ln)s">%(name)s</a></td>
<td> </td><td> </td></tr>''' % {'filename': filename,
'name': name,
'ln': ln}
for format_element in format_template['elements']:
name = format_element['name']
filename = format_element['filename']
out += '''<tr><td> </td>
<td><a href="format_elements_doc?ln=%(ln)s#%(anchor)s">%(name)s</a></td>
<td> </td></tr>''' % {'anchor': name.upper(),
'name': name,
'ln': ln}
for tag in format_element['tags']:
out += '''<tr><td> </td><td> </td>
<td>%(tag)s</td></tr>''' % {'tag': tag}
out += '''
</table>
<b>*Note</b>: Some tags linked with this format template might not be shown. Check manually.
'''
return out
def tmpl_admin_format_elements_documentation(self, ln, format_elements):
"""
Returns the main management console for format elements. Includes list of formats elements and
associated administration tools.
@param ln: language
@param format_elements: a list of dictionaries with formats elements attributes
@return: main management console as html
"""
_ = gettext_set_language(ln) # load the right message language
#top of the page and table header
out = '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="format_templates_manage?ln=%(ln)s">%(manage_format_templates)s</a></small> </td>
<td>1. <small><a href="output_formats_manage?ln=%(ln)s">%(manage_output_formats)s</a></small> </td>
<td>2. <small>%(format_elements_documentation)s</small> </td>
</tr>
</table>
<p>Here you can read the APIs of the formats elements, the elementary bricks for formats.</p>
''' % {'ln': ln,
'menu': _("Menu"),
'manage_output_formats': _("Manage Output Formats"),
'manage_format_templates': _("Manage Format Templates"),
'format_elements_documentation': _("Format Elements Documentation"),
}
#table content: formats names, description and actions
if len(format_elements) == 0:
out += '''
<em>No format elements found</em>
'''
else:
#Print summary of elements (name + decription)
out += '''<h2>Summary table of elements</h2>'''
out += '''<table width="90%">'''
for format_element in format_elements:
format_attributes = format_element['attrs']
out += '''
<tr>
<td>
<code><a href="#%(name)s"><BFE_%(name)s/></a></code>
</td>
<td>
%(description)s
</td>
</tr>
''' % format_attributes
out += "</table>"
#Print details of elements
out += '''<h2>Details of elements</h2>'''
for format_element in format_elements:
format_attributes = format_element['attrs']
element_name = format_attributes['name']
out += self.tmpl_admin_print_format_element_documentation(ln, element_name, format_attributes)
#table footer, buttons and bottom of the page
out += '''
<table align="center" width="95%">
</table>'''
return out
def tmpl_admin_print_format_element_documentation(self, ln, name, attributes, print_see_also=True):
"""
Prints the formatted documentation of a single element. Used in main documentation of element and
in creation of floater for Dreamweaver.
@param ln: language
@param name: the name of the element
@param attributes: the attributes of the element, as returned by get_format_element_attrs_from_*
@param print_see_also: if True, prints links to other sections related to element
@return: HTML markup
"""
params_names = ""
for param in attributes['params']:
params_names += "<b>"+param['name'] +'</b>="..." '
out = '''
<a name="%(name)s"></a><h3>%(name)s</h3>
<b><BFE_%(name)s</b> %(params_names)s<b>/></b><br/><br/>
<em>%(description)s.</em><br/><br/>
<b>Parameters:</b><br/>
''' % {'params_names': params_names,
'name': name,
'description': attributes['description']}
for param in attributes['params']:
out += '''
<code>%(name)s</code> - %(description)s. ''' % param
if param['default'] != "":
default = cgi.escape(str(param['default']))
if default.strip() == "":
default = " "
out += '''
Default value is «<code>%s</code>»
''' % default
out += '<br/>'
for param in attributes['builtin_params']:
out += '''
<code>%(name)s</code> - %(description)s. ''' % param
if param['default'] != "":
default = cgi.escape(str(param['default']))
if default.strip() == "":
default = " "
out += '''
Default value is «<code>%s</code>»
''' % default
out += '<br/>'
if print_see_also:
out += '''<br/>
<b>See also:</b><br/>'''
for element in attributes['seealso']:
element_name = element.split('.')[0].upper()
out += '''
<a href="#%(name)s">Element <em>%(name)s</em></a><br/>''' % {'name': element_name}
out += '''
<a href ="format_element_show_dependencies?ln=%(ln)s&bfe=%(bfe)s">Dependencies of this element</a><br/>
<a href ="validate_format?ln=%(ln)s&bfe=%(bfe)s">The correctness of this element</a><br/>
<a href ="format_element_test?ln=%(ln)s&bfe=%(bfe)s">Test this element</a><br/>
''' % {'ln': ln, 'bfe': name}
return out
def tmpl_admin_format_element_show_dependencies(self, ln, name, format_templates, tags):
"""
Shows the dependencies of the given format element
@param ln: language
@param name: the name of the element
@param format_templates: format templates that depend on this element
@param tags: the tags that are called by this format element
@return: HTML markup
"""
out = '''
<p>Go back to <a href="format_elements_doc?ln=%(ln)s#%(name)s">documentation</a></p>
''' % {'ln': ln, 'name': name.upper()}
out += ''' <table width="90%" class="admin_wvar" cellspacing="0"><tr>'''
out += '''
<th class="adminheaderleft">Format Templates that use %(name)s</th>
<th class="adminheaderleft">Tags Called*</th>
</tr>
<tr>
<td> <br/>''' % {"name": name}
#Print format elements (and tags)
if len(format_templates) == 0:
out += '''<p align="center">
<i>This format element is not used in any format template.</i></p>'''
for format_template in format_templates:
name = format_template['name']
filename = format_template['filename']
out += '''<a href="format_template_show?ln=%(ln)s&bft=%(filename)s">%(name)s</a><br/>''' % {'filename': filename,
'name': name,
'ln': ln}
#Print tags
out += "</td><td> <br/>"
if len(tags) == 0:
out += '''<p align="center">
<i>This format element uses no tag.</i></p>'''
for tag in tags:
out += '''%(tag)s<br/>''' % {'tag': tag}
out += '''
</td>
</tr>
</table>
<b>*Note</b>: Some tags linked with this format template might not be shown. Check manually.
'''
return out
def tmpl_admin_format_element_test(self, ln, bfe, description, param_names, param_values, param_descriptions, result):
"""
Prints a page where the user can test the given format element with his own parameters.
@param ln: language
@param bfe: the format element name
@param description: a description of the element
@param param_names: a list of parameters names/labels
@param param_values: a list of values for parameters
@param param_descriptions: a list of description for parameters
@param result: the result of the evaluation
@return: HTML markup
"""
out = '''
<p>Go back to <a href="format_elements_doc?ln=%(ln)s#%(name)s">documentation</a></p>
''' % {'ln': ln, 'name': bfe.upper()}
out += '''
<h3><BFE_%(bfe)s /></h3>
<p>%(description)s</p>
<table width="100%%"><tr><td>
<form method="post" action="format_element_test?ln=%(ln)s&bfe=%(bfe)s">
<table>
''' % {'bfe': bfe, 'ln': ln, 'description': description}
for i in range(len(param_names)):
out += '''
<tr>
<td class="admintdright">%(name)s</td>
<td class="admintdright"><input type="text" name="param_values" value="%(value)s"/></td>
<td class="admintdleft">%(description)s </td>
</tr>
''' % {'name': cgi.escape(param_names[i]),
'value': cgi.escape(param_values[i], quote=True),
'description': param_descriptions[i]}
out += '''
<tr><td colspan="2" class="admintdright"><input type="submit" class="adminbutton" value="Test!"/></td>
<td> </td>
</tr>
</table>
</form>
<fieldset style="display:inline;margin-left:auto;margin-right:auto;">
<legend>Result:</legend>%(result)s</fieldset>
''' % {'result': result}
out += '''
</td></tr><tr><td>
'''
#out += self.tmpl_admin_print_format_element_documentation(ln, bfe, attributes, False)
out += '''</td></tr></table>'''
return out
def tmpl_admin_add_format_element(self, ln):
"""
Shows how to add a format element (mainly doc)
@param ln: language
@return: HTML markup
"""
_ = gettext_set_language(ln) # load the right message language
out = '''
<p>To add a new basic element (only fetch the value of a field, without special post-processing), go to the <a href="%(siteurl)sadmin/bibindex/bibindexadmin.py/field">BibEdit "Manage Logical Fields"</a> page and add a name for a field. Make sure that the name is unique and corresponds well to the field. For example, to add an element that fetch the value of field 245__%%, add a new logical field with name "title" and field "245__%%". Then in your template, call BFE_TITLE to print the title.</p>
<p>To add a new complex element (for eg. special formatting of the field, condition on the value, etc.) you must go to the lib/python/invenio/bibformat_elements directory of your Invenio installation, and add a new format element file. Read documentation for more information.</p>
''' % {'siteurl': CFG_SITE_URL}
return out
def tmpl_dreamweaver_floater(self, ln, format_elements):
"""
Returns the content of the BibFormat palette for Dreamweaver. This
'floater' will let users of Dreamweaver to insert Format elements
into their code right from the floater.
@param ln: language
@param format_elements: an ordered list of format elements structures as returned by get_format_elements
@return: HTML markup (according to Dreamweaver specs)
"""
names_list = [] # list of element names such as ['Authors', 'Title']
codes_list = [] # list of element code such as ['<BFE_AUTHORS limit="" separator="," />', '<BFE_TITLE />']
docs_list = [] # list of HTML doc for each element
for format_element in format_elements:
format_attributes = format_element['attrs']
name = format_attributes['name']
#description = format_attributes['description']
params = [x['name'] + '="'+str(x['default'])+'"' for x in format_attributes['params']]
builtin_params = [x['name'] + '="'+str(x['default'])+'"' for x in format_attributes['builtin_params']]
code = ("<BFE_" + name + ' ' + ' '.join(builtin_params)+ ' ' + ' '.join(params) +"/>").replace("'", r"\'")
doc = self.tmpl_admin_print_format_element_documentation(ln, name, format_attributes, print_see_also=False).replace("'", r"\'")
names_list.append(name)
codes_list.append(code)
docs_list.append(doc)
out = '''
<!DOCTYPE HTML SYSTEM "-//Macromedia//DWExtension layout-engine5.0//floater">
<html>
<head>
<!-- This file is to be used as floating panel for Dreamweaver.
To install, drag and drop inside /Configuration/Floaters of your Dreamweaver
application directory. You also have to enable a menu to open the floater:
Edit file Menu.xml located inside /Configuration/Menus of your Dreamweaver
application directory and copy-paste the following line in the menu you want
(typically inside tag 'menu' with attribute id = 'DWMenu_Window_Others'):
<menuitem name="BibFormat Elements" enabled="true" command="dw.toggleFloater('BibFormat_floater.html')" checked="dw.getFloaterVisibility('BibFormat_floater.html')" />
-->
<title>BibFormat Elements</title>
<script language="JavaScript">
var docs = new Array(%(docs)s);
var codes = new Array(%(codes)s);
function selectionChanged(){
// get the selected node
var theDOM = dw.getDocumentDOM();
var theNode = theDOM.getSelectedNode();
// check if node is a BibFormat Element
if (theNode.nodeType == Node.COMMENT_NODE && theNode.data.length >= 5 && theNode.data.toLowerCase().substring(0,5) == "<bfe_"){
var names = document.elementsList.options;
for (i=0;i<names.length; i++){
if (names[i].text.toLowerCase() == theNode.data.split(' ')[0].toLowerCase() ||
names[i].text.toLowerCase() == theNode.data.split(' ')[0].toLowerCase().substring(5,theNode.data.length)){
document.elementsList.selectedIndex = i;
selectElement(document.elementsList);
return;
}
}
}
}
function isAvailableInCodeView(){
return true;
}
function selectElement(elementsList){
document.infoBFE.innerHTML = docs[elementsList.selectedIndex];
}
function insertElement(){
// insert selection into code
var element_code = codes[document.elementsList.selectedIndex];
// get the DOM
var theDOM = dw.getDocumentDOM();
var theDocEl = theDOM.documentElement;
var theWholeDoc = theDocEl.outerHTML;
// Get the offsets of the selection
var theSel = theDOM.getSelection();
theDocEl.outerHTML = theWholeDoc.substring(0,theSel[0]) + element_code + theWholeDoc.substring(theSel[1]);
}
</script>
</head>
<body>
<table width="100%%" border="0" cellspacing="0" cellpadding="3">
<tr>
<td valign="top">
<select name="elementsList" id="elementsList" size="15" onChange="selectElement(this)">
%(names)s
</select><br/>
<input type="submit" name="Submit" value="Insert" onClick="insertElement()">
</td>
<td valign="top" width="100%%">
<div id="infoBFE">
<center>No Format Element selected. Select one from the list on the right.</center>
</div>
</td>
</tr>
</table>
</body>
</html>
''' % {'docs': ', '.join(["'"+x+"'" for x in docs_list]).replace('\n', '\\n'),
'codes': ', '.join(["'"+x+"'" for x in codes_list]).replace('\n', '\\n'),
'names': '\n'.join(['<option value="'+x+'">'+x+'</option>' for x in names_list])}
return out
def tmpl_admin_validate_format(self, ln, errors):
"""
Prints the errors of the validation of a format (might be any
kind of format)
@param ln: language
@param errors: a list of tuples (error code, string error message)
@return: HTML markup
"""
_ = gettext_set_language(ln) # load the right message language
out = ""
if len(errors) == 0:
out += '''<span style="color: rgb(0, 255, 0);" >%s.</span>''' % _('No problem found with format')
elif len(errors) == 1:
out += '''<span style="color: rgb(255, 0, 0);" >%s:</span><br/>''' % _('An error has been found')
else:
out += '''<span style="color: rgb(255, 0, 0);" >%s:</span><br/>''' % _('The following errors have been found')
for error in errors:
out += error + "<br/>"
return out
def tmpl_admin_dialog_box(self, url, title, message, options):
"""
Prints a dialog box with given title, message and options
@param url: the url of the page that must process the result of the dialog box
@param ln: language
@param title: the title of the dialog box
@param message: a formatted message to display inside dialog box
@param options: a list of string options to display as button to the user
@return: HTML markup
"""
out = ""
out += '''
<div style="text-align:center;">
<fieldset style="display:inline;margin-left:auto;margin-right:auto;">
<legend>%(title)s:</legend>
<p>%(message)s</p>
<form method="post" action="%(url)s">
''' % {'title': title,
'message': message,
'url': url}
for option in options:
out += '''<input type="submit" class="adminbutton" name="chosen_option" value="%(value)s" /> ''' % {'value': option}
out += '''</form></fieldset></div>'''
return out
| gpl-2.0 | 206,509,986,830,123,460 | 42.911253 | 507 | 0.525475 | false |
claudio-idra/subterfuge | dbconfigure.py | 22 | 1470 | #####################################################################
#This file is for use to fully reset the Subterfuge Database
#It should only be necessary due to significant develompent changes
#Usage MUST be as follows:
#rm db && rm base_db
#./manage.py syncdb
#python dbconfigure.py
#This will rebuild the Database from scratch
#####################################################################
import os
from django.conf import settings
settings.configure(DATABASE_ENGINE="sqlite3",
DATABASE_HOST="",
DATABASE_NAME="db",
DATABASE_USER="",
DATABASE_PASSWORD="")
from django.db import models
from main.models import *
from modules.models import *
#Create Settings Data Space
table = setup(autoconf = "no")
table.save()
#Build Default Settings
print "Setting Database Default Configuration..."
setup.objects.update(autoconf = "yes")
setup.objects.update(ploadrate = "3")
setup.objects.update(injectrate = "6")
setup.objects.update(arprate = "8")
setup.objects.update(smartarp = "yes")
#Build Netview Module
print "Configuring Database Space for Modules..."
print "Building HTTP Code Injection Module"
newmod = installed(name = "httpcodeinjection")
newmod.save()
print "Building Tunnel Block Module"
newmod = installed(name = "tunnelblock")
newmod.save()
print "Building Denial of Service Module"
newmod = installed(name = "dos")
newmod.save()
| gpl-3.0 | -8,627,531,776,754,089,000 | 29 | 69 | 0.633333 | false |
emrecamasuvi/appengineTmp | lib/flask/wrappers.py | 773 | 6709 | # -*- coding: utf-8 -*-
"""
flask.wrappers
~~~~~~~~~~~~~~
Implements the WSGI wrappers (request and response).
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase
from werkzeug.exceptions import BadRequest
from .debughelpers import attach_enctype_error_multidict
from . import json
from .globals import _request_ctx_stack
_missing = object()
def _get_data(req, cache):
getter = getattr(req, 'get_data', None)
if getter is not None:
return getter(cache=cache)
return req.data
class Request(RequestBase):
"""The request object used by default in Flask. Remembers the
matched endpoint and view arguments.
It is what ends up as :class:`~flask.request`. If you want to replace
the request object used you can subclass this and set
:attr:`~flask.Flask.request_class` to your subclass.
The request object is a :class:`~werkzeug.wrappers.Request` subclass and
provides all of the attributes Werkzeug defines plus a few Flask
specific ones.
"""
#: the internal URL rule that matched the request. This can be
#: useful to inspect which methods are allowed for the URL from
#: a before/after handler (``request.url_rule.methods``) etc.
#:
#: .. versionadded:: 0.6
url_rule = None
#: a dict of view arguments that matched the request. If an exception
#: happened when matching, this will be `None`.
view_args = None
#: if matching the URL failed, this is the exception that will be
#: raised / was raised as part of the request handling. This is
#: usually a :exc:`~werkzeug.exceptions.NotFound` exception or
#: something similar.
routing_exception = None
# switched by the request context until 1.0 to opt in deprecated
# module functionality
_is_old_module = False
@property
def max_content_length(self):
"""Read-only view of the `MAX_CONTENT_LENGTH` config key."""
ctx = _request_ctx_stack.top
if ctx is not None:
return ctx.app.config['MAX_CONTENT_LENGTH']
@property
def endpoint(self):
"""The endpoint that matched the request. This in combination with
:attr:`view_args` can be used to reconstruct the same or a
modified URL. If an exception happened when matching, this will
be `None`.
"""
if self.url_rule is not None:
return self.url_rule.endpoint
@property
def module(self):
"""The name of the current module if the request was dispatched
to an actual module. This is deprecated functionality, use blueprints
instead.
"""
from warnings import warn
warn(DeprecationWarning('modules were deprecated in favor of '
'blueprints. Use request.blueprint '
'instead.'), stacklevel=2)
if self._is_old_module:
return self.blueprint
@property
def blueprint(self):
"""The name of the current blueprint"""
if self.url_rule and '.' in self.url_rule.endpoint:
return self.url_rule.endpoint.rsplit('.', 1)[0]
@property
def json(self):
"""If the mimetype is `application/json` this will contain the
parsed JSON data. Otherwise this will be `None`.
The :meth:`get_json` method should be used instead.
"""
# XXX: deprecate property
return self.get_json()
def get_json(self, force=False, silent=False, cache=True):
"""Parses the incoming JSON request data and returns it. If
parsing fails the :meth:`on_json_loading_failed` method on the
request object will be invoked. By default this function will
only load the json data if the mimetype is ``application/json``
but this can be overriden by the `force` parameter.
:param force: if set to `True` the mimetype is ignored.
:param silent: if set to `False` this method will fail silently
and return `False`.
:param cache: if set to `True` the parsed JSON data is remembered
on the request.
"""
rv = getattr(self, '_cached_json', _missing)
if rv is not _missing:
return rv
if self.mimetype != 'application/json' and not force:
return None
# We accept a request charset against the specification as
# certain clients have been using this in the past. This
# fits our general approach of being nice in what we accept
# and strict in what we send out.
request_charset = self.mimetype_params.get('charset')
try:
data = _get_data(self, cache)
if request_charset is not None:
rv = json.loads(data, encoding=request_charset)
else:
rv = json.loads(data)
except ValueError as e:
if silent:
rv = None
else:
rv = self.on_json_loading_failed(e)
if cache:
self._cached_json = rv
return rv
def on_json_loading_failed(self, e):
"""Called if decoding of the JSON data failed. The return value of
this method is used by :meth:`get_json` when an error occurred. The
default implementation just raises a :class:`BadRequest` exception.
.. versionchanged:: 0.10
Removed buggy previous behavior of generating a random JSON
response. If you want that behavior back you can trivially
add it by subclassing.
.. versionadded:: 0.8
"""
raise BadRequest()
def _load_form_data(self):
RequestBase._load_form_data(self)
# in debug mode we're replacing the files multidict with an ad-hoc
# subclass that raises a different error for key errors.
ctx = _request_ctx_stack.top
if ctx is not None and ctx.app.debug and \
self.mimetype != 'multipart/form-data' and not self.files:
attach_enctype_error_multidict(self)
class Response(ResponseBase):
"""The response object that is used by default in Flask. Works like the
response object from Werkzeug but is set to have an HTML mimetype by
default. Quite often you don't have to create this object yourself because
:meth:`~flask.Flask.make_response` will take care of that for you.
If you want to replace the response object used you can subclass this and
set :attr:`~flask.Flask.response_class` to your subclass.
"""
default_mimetype = 'text/html'
| apache-2.0 | -4,530,983,491,010,492,000 | 35.461957 | 79 | 0.631987 | false |
execunix/vinos | external/bsd/wpa/dist/wpa_supplicant/examples/p2p/p2p_find.py | 29 | 4714 | #!/usr/bin/python
# Tests p2p_find
# Will list all devices found/lost within a time frame (timeout)
# Then Program will exit
######### MAY NEED TO RUN AS SUDO #############
import dbus
import sys, os
import time
import gobject
import threading
import getopt
from dbus.mainloop.glib import DBusGMainLoop
def usage():
print "Usage:"
print " %s -i <interface_name> [-t <timeout>] \ " \
% sys.argv[0]
print " [-w <wpas_dbus_interface>]"
print "Options:"
print " -i = interface name"
print " -t = timeout = 0s (infinite)"
print " -w = wpas dbus interface = fi.w1.wpa_supplicant1"
print "Example:"
print " %s -i wlan0 -t 10" % sys.argv[0]
# Required Signals
def deviceFound(devicepath):
print "Device found: %s" % (devicepath)
def deviceLost(devicepath):
print "Device lost: %s" % (devicepath)
class P2P_Find (threading.Thread):
# Needed Variables
global bus
global wpas_object
global interface_object
global p2p_interface
global interface_name
global wpas
global wpas_dbus_interface
global timeout
global path
# Dbus Paths
global wpas_dbus_opath
global wpas_dbus_interfaces_opath
global wpas_dbus_interfaces_interface
global wpas_dbus_interfaces_p2pdevice
# Constructor
def __init__(self,interface_name,wpas_dbus_interface,timeout):
# Initializes variables and threads
self.timeout = int(timeout)
self.interface_name = interface_name
self.wpas_dbus_interface = wpas_dbus_interface
# Initializes thread and daemon allows for ctrl-c kill
threading.Thread.__init__(self)
self.daemon = True
# Generating interface/object paths
self.wpas_dbus_opath = "/" + \
self.wpas_dbus_interface.replace(".","/")
self.wpas_wpas_dbus_interfaces_opath = self.wpas_dbus_opath + \
"/Interfaces"
self.wpas_dbus_interfaces_interface = \
self.wpas_dbus_interface + ".Interface"
self.wpas_dbus_interfaces_p2pdevice = \
self.wpas_dbus_interfaces_interface \
+ ".P2PDevice"
# Getting interfaces and objects
DBusGMainLoop(set_as_default=True)
self.bus = dbus.SystemBus()
self.wpas_object = self.bus.get_object(
self.wpas_dbus_interface,
self.wpas_dbus_opath)
self.wpas = dbus.Interface(self.wpas_object,
self.wpas_dbus_interface)
# Try to see if supplicant knows about interface
# If not, throw an exception
try:
self.path = self.wpas.GetInterface(
self.interface_name)
except dbus.DBusException, exc:
error = 'Error:\n Interface ' + self.interface_name \
+ ' was not found'
print error
usage()
os._exit(0)
self.interface_object = self.bus.get_object(
self.wpas_dbus_interface, self.path)
self.p2p_interface = dbus.Interface(self.interface_object,
self.wpas_dbus_interfaces_p2pdevice)
#Adds listeners for find and lost
self.bus.add_signal_receiver(deviceFound,
dbus_interface=self.wpas_dbus_interfaces_p2pdevice,
signal_name="DeviceFound")
self.bus.add_signal_receiver(deviceLost,
dbus_interface=self.wpas_dbus_interfaces_p2pdevice,
signal_name="DeviceLost")
# Sets up p2p_find
P2PFindDict = dbus.Dictionary(
{'Timeout':int(self.timeout)})
self.p2p_interface.Find(P2PFindDict)
# Run p2p_find
def run(self):
# Allows other threads to keep working while MainLoop runs
# Required for timeout implementation
gobject.MainLoop().get_context().iteration(True)
gobject.threads_init()
gobject.MainLoop().run()
if __name__ == "__main__":
# Defaults for optional inputs
timeout = 0
wpas_dbus_interface = 'fi.w1.wpa_supplicant1'
# interface_name is required
interface_name = None
# Using getopts to handle options
try:
options, args = getopt.getopt(sys.argv[1:],"hi:t:w:")
except getopt.GetoptError:
usage()
quit()
# If theres a switch, override default option
for key, value in options:
# Help
if (key == "-h"):
usage()
quit()
# Interface Name
elif (key == "-i"):
interface_name = value
# Timeout
elif (key == "-t"):
if ( int(value) >= 0):
timeout = value
else:
print "Error:\n Timeout cannot be negative"
usage()
quit()
# Dbus interface
elif (key == "-w"):
wpas_dbus_interface = value
else:
assert False, "unhandled option"
# Interface name is required and was not given
if (interface_name == None):
print "Error:\n interface_name is required"
usage()
quit()
# Constructor
try:
p2p_find_test = P2P_Find(interface_name, wpas_dbus_interface, timeout)
except:
print "Error:\n Invalid wpas_dbus_interface"
usage()
quit()
# Start P2P_Find
p2p_find_test.start()
try:
# If timeout is 0, then run forever
if (timeout == 0):
while(True):
pass
# Else sleep for (timeout)
else:
time.sleep(p2p_find_test.timeout)
except:
pass
quit()
| apache-2.0 | -3,100,834,918,593,524,700 | 23.552083 | 72 | 0.690709 | false |
moandcompany/luigi | examples/terasort.py | 59 | 3445 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import luigi
import luigi.contrib.hadoop_jar
import luigi.contrib.hdfs
logger = logging.getLogger('luigi-interface')
def hadoop_examples_jar():
config = luigi.configuration.get_config()
examples_jar = config.get('hadoop', 'examples-jar')
if not examples_jar:
logger.error("You must specify hadoop:examples-jar in luigi.cfg")
raise
if not os.path.exists(examples_jar):
logger.error("Can't find example jar: " + examples_jar)
raise
return examples_jar
DEFAULT_TERASORT_IN = '/tmp/terasort-in'
DEFAULT_TERASORT_OUT = '/tmp/terasort-out'
class TeraGen(luigi.contrib.hadoop_jar.HadoopJarJobTask):
"""
Runs TeraGen, by default with 1TB of data (10B records)
"""
records = luigi.Parameter(default="10000000000",
description="Number of records, each record is 100 Bytes")
terasort_in = luigi.Parameter(default=DEFAULT_TERASORT_IN,
description="directory to store terasort input into.")
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`~luigi.target.Target`)
"""
return luigi.contrib.hdfs.HdfsTarget(self.terasort_in)
def jar(self):
return hadoop_examples_jar()
def main(self):
return "teragen"
def args(self):
# First arg is 10B -- each record is 100bytes
return [self.records, self.output()]
class TeraSort(luigi.contrib.hadoop_jar.HadoopJarJobTask):
"""
Runs TeraGent, by default using
"""
terasort_in = luigi.Parameter(default=DEFAULT_TERASORT_IN,
description="directory to store terasort input into.")
terasort_out = luigi.Parameter(default=DEFAULT_TERASORT_OUT,
description="directory to store terasort output into.")
def requires(self):
"""
This task's dependencies:
* :py:class:`~.TeraGen`
:return: object (:py:class:`luigi.task.Task`)
"""
return TeraGen(terasort_in=self.terasort_in)
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`~luigi.target.Target`)
"""
return luigi.contrib.hdfs.HdfsTarget(self.terasort_out)
def jar(self):
return hadoop_examples_jar()
def main(self):
return "terasort"
def args(self):
return [self.input(), self.output()]
if __name__ == '__main__':
luigi.run()
| apache-2.0 | 3,561,607,410,017,211,000 | 28.698276 | 90 | 0.643251 | false |
intgr/django-cms | cms/migrations/0045_auto__add_field_page_application_urls.py | 15 | 16500 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Page.application_urls'
db.add_column(u'cms_page', 'application_urls',
self.gf('django.db.models.fields.CharField')(db_index=True, max_length=200, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Page.application_urls'
db.delete_column(u'cms_page', 'application_urls')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'object_name': 'Page'},
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': "orm['%s']" % user_orm_label}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['%s']" % user_orm_label, 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': "orm['%s']" % user_orm_label}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms'] | bsd-3-clause | 2,625,375,170,202,780,700 | 80.285714 | 187 | 0.555455 | false |
izak/xhtml2pdf | demo/tgpisa/tgpisa/commands.py | 155 | 1630 | # -*- coding: utf-8 -*-
"""This module contains functions called from console script entry points."""
import os
import sys
from os.path import dirname, exists, join
import pkg_resources
pkg_resources.require("TurboGears")
import turbogears
import cherrypy
cherrypy.lowercase_api = True
class ConfigurationError(Exception):
pass
def start():
"""Start the CherryPy application server."""
setupdir = dirname(dirname(__file__))
curdir = os.getcwd()
# First look on the command line for a desired config file,
# if it's not on the command line, then look for 'setup.py'
# in the current directory. If there, load configuration
# from a file called 'dev.cfg'. If it's not there, the project
# is probably installed and we'll look first for a file called
# 'prod.cfg' in the current directory and then for a default
# config file called 'default.cfg' packaged in the egg.
if len(sys.argv) > 1:
configfile = sys.argv[1]
elif exists(join(setupdir, "setup.py")):
configfile = join(setupdir, "dev.cfg")
elif exists(join(curdir, "prod.cfg")):
configfile = join(curdir, "prod.cfg")
else:
try:
configfile = pkg_resources.resource_filename(
pkg_resources.Requirement.parse("tgpisa"),
"config/default.cfg")
except pkg_resources.DistributionNotFound:
raise ConfigurationError("Could not find default configuration.")
turbogears.update_config(configfile=configfile,
modulename="tgpisa.config")
from tgpisa.controllers import Root
turbogears.start_server(Root())
| apache-2.0 | -2,788,601,249,287,152,000 | 30.346154 | 77 | 0.676687 | false |
anksp21/Community-Zenpacks | ZenPacks.community.zenAppProfiler/ZenPacks/community/zenAppProfiler/__init__.py | 2 | 5384 | import Globals
import os.path
skinsDir = os.path.join(os.path.dirname(__file__), 'skins')
from Products.CMFCore.DirectoryView import registerDirectory
if os.path.isdir(skinsDir):
registerDirectory(skinsDir, globals())
import transaction
from Products.ZenModel.ZenossInfo import ZenossInfo
from Products.ZenModel.ZenPack import ZenPackBase
from Products.ZenModel.ZenMenu import ZenMenu
class ZenPack(ZenPackBase):
""" ZenPack loader
"""
profilerTab = { 'id' : 'profileorganizer'
, 'name' : 'Profiles'
, 'action' : 'Profiles/viewProfileOrganizer'
, 'permissions' : ( "Manage DMD", )
}
def addProfilerTab(self,app):
dmdloc = self.dmd
finfo = dmdloc.factory_type_information
actions = list(finfo[0]['actions'])
for i in range(len(actions)):
if (self.profilerTab['id'] in actions[i].values()):
return
actions.append(self.profilerTab)
finfo[0]['actions'] = tuple(actions)
dmdloc.factory_type_information = finfo
transaction.commit()
def rmvProfilerTab(self,app):
dmdloc = self.dmd
finfo = dmdloc.factory_type_information
actions = list(finfo[0]['actions'])
for i in range(len(actions)):
if (self.profilerTab['id'] in actions[i].values()):
actions.remove(self.profilerTab)
finfo[0]['actions'] = tuple(actions)
dmdloc.factory_type_information = finfo
transaction.commit()
def installMenus(self,app):
dmdloc = self.dmd
self.removeMenus(dmdloc)
modulemenu = ZenMenu('ModuleMenu')
dmdloc.zenMenus._setObject(modulemenu.id, modulemenu)
modulemenu = dmdloc.zenMenus._getOb(modulemenu.id)
modulemenu.manage_addZenMenuItem('addModule',
action='dialog_addModule', # page template that is called
description='Add Ruleset',
ordering=4.0,
isdialog=True)
modulemenu.manage_addZenMenuItem('removeModule',
action='dialog_removeModule', # page template that is called
description='Remove Ruleset',
ordering=3.0,
isdialog=True)
modulemenu.manage_addZenMenuItem('runAllMembershipRules',
action='dialog_runAllMembershipRules', # page template that is called
description='Build All Memberships',
ordering=2.0,
isdialog=True)
modulemenu = ZenMenu('RuleDefinitions')
dmdloc.zenMenus._setObject(modulemenu.id, modulemenu)
modulemenu = dmdloc.zenMenus._getOb(modulemenu.id)
modulemenu.manage_addZenMenuItem('addRule',
action='dialog_addRule', # page template that is called
description='Add Rule',
ordering=4.0,
isdialog=True)
modulemenu.manage_addZenMenuItem('removeRule',
action='dialog_removeRule', # page template that is called
description='Remove Rule',
ordering=3.0,
isdialog=True)
modulemenu = ZenMenu('RuleModule')
dmdloc.zenMenus._setObject(modulemenu.id, modulemenu)
modulemenu = dmdloc.zenMenus._getOb(modulemenu.id)
modulemenu.manage_addZenMenuItem('runAllMembershipRules',
action='dialog_runModuleMembershipRules', # page template that is called
description='Build Memberships',
ordering=2.0,
isdialog=True)
modulemenu.manage_addZenMenuItem('buildAlerts',
action='dialog_buildModuleAlerts', # page template that is called
description='Build Alerts',
ordering=1.0,
isdialog=True)
def removeMenus(self, dmd):
try:
self.dmd.zenMenus._delObject('ModuleMenu')
except AttributeError:
pass
try:
self.dmd.zenMenus._delObject('RuleDefinitions')
except AttributeError:
pass
try:
self.dmd.zenMenus._delObject('RuleModule')
except AttributeError:
pass
def install(self, app):
ZenPackBase.install(self, app)
self.addProfilerTab(app)
self.installMenus(app.zport.dmd)
def upgrade(self, app):
ZenPackBase.upgrade(self, app)
self.addProfilerTab(app)
self.installMenus(app.zport.dmd)
def remove(self, app, junk):
self.rmvProfilerTab(app)
self.dmd._delObject('Profiles')
self.removeMenus(self.zport.dmd)
#ZenPackBase.remove(self, app, junk)
#ZenPackBase.remove(self.app, leaveObjects)
| gpl-2.0 | -3,753,552,154,679,509,000 | 40.736434 | 108 | 0.534547 | false |
eduNEXT/edx-platform | lms/djangoapps/certificates/generation.py | 2 | 6695 | """
Course certificate generation
These methods generate course certificates (they create a new course certificate if it does not yet exist, or update the
existing cert if it does already exist).
For now, these methods deal primarily with allowlist certificates, and are part of the V2 certificates revamp.
These methods should be called from tasks.
"""
import logging
from uuid import uuid4
from common.djangoapps.student.models import CourseEnrollment, UserProfile
from lms.djangoapps.certificates.data import CertificateStatuses
from lms.djangoapps.certificates.models import GeneratedCertificate
from lms.djangoapps.certificates.queue import XQueueCertInterface
from lms.djangoapps.certificates.utils import (
emit_certificate_event,
has_html_certificates_enabled
)
from lms.djangoapps.grades.api import CourseGradeFactory
from lms.djangoapps.instructor.access import list_with_level
from openedx.core.djangoapps.content.course_overviews.api import get_course_overview_or_none
log = logging.getLogger(__name__)
def generate_course_certificate(user, course_key, status, generation_mode):
"""
Generate a course certificate for this user, in this course run. If the certificate has a passing status, also emit
a certificate event.
Note that the certificate could be either an allowlist certificate or a "regular" course certificate; the content
will be the same either way.
Args:
user: user for whom to generate a certificate
course_key: course run key for which to generate a certificate
status: certificate status (value from the CertificateStatuses model)
generation_mode: Used when emitting an events. Options are "self" (implying the user generated the cert
themself) and "batch" for everything else.
"""
cert = _generate_certificate(user, course_key, status)
if CertificateStatuses.is_passing_status(cert.status):
# Emit a certificate event
event_data = {
'user_id': user.id,
'course_id': str(course_key),
'certificate_id': cert.verify_uuid,
'enrollment_mode': cert.mode,
'generation_mode': generation_mode
}
emit_certificate_event(event_name='created', user=user, course_id=course_key, event_data=event_data)
elif CertificateStatuses.unverified == cert.status:
cert.mark_unverified(source='certificate_generation')
return cert
def _generate_certificate(user, course_key, status):
"""
Generate a certificate for this user, in this course run.
"""
# Retrieve the existing certificate for the learner if it exists
existing_certificate = GeneratedCertificate.certificate_for_student(user, course_key)
profile = UserProfile.objects.get(user=user)
profile_name = profile.name
course_grade = CourseGradeFactory().read(user, course_key=course_key)
enrollment_mode, __ = CourseEnrollment.enrollment_mode_for_user(user, course_key)
# Retain the `verify_uuid` from an existing certificate if possible, this will make it possible for the learner to
# keep the existing URL to their certificate
if existing_certificate and existing_certificate.verify_uuid:
uuid = existing_certificate.verify_uuid
else:
uuid = uuid4().hex
cert, created = GeneratedCertificate.objects.update_or_create(
user=user,
course_id=course_key,
defaults={
'user': user,
'course_id': course_key,
'mode': enrollment_mode,
'name': profile_name,
'status': status,
'grade': course_grade.percent,
'download_url': '',
'key': '',
'verify_uuid': uuid,
'error_reason': ''
}
)
if created:
created_msg = 'Certificate was created.'
else:
created_msg = 'Certificate already existed and was updated.'
log.info(f'Generated certificate with status {cert.status} for {user.id} : {course_key}. {created_msg}')
return cert
def generate_user_certificates(student, course_key, insecure=False, generation_mode='batch', forced_grade=None):
"""
It will add the add-cert request into the xqueue.
A new record will be created to track the certificate
generation task. If an error occurs while adding the certificate
to the queue, the task will have status 'error'. It also emits
`edx.certificate.created` event for analytics.
This method has not yet been updated (it predates the certificates revamp). If modifying this method,
see also generate_user_certificates() in generation_handler.py (which is very similar but is not called from a
celery task). In the future these methods will be unified.
Args:
student (User)
course_key (CourseKey)
Keyword Arguments:
insecure - (Boolean)
generation_mode - who has requested certificate generation. Its value should `batch`
in case of django command and `self` if student initiated the request.
forced_grade - a string indicating to replace grade parameter. if present grading
will be skipped.
"""
beta_testers_queryset = list_with_level(course_key, 'beta')
if beta_testers_queryset.filter(username=student.username):
log.info(f"Canceling Certificate Generation task for user {student.id} : {course_key}. User is a Beta Tester.")
return
xqueue = XQueueCertInterface()
if insecure:
xqueue.use_https = False
course_overview = get_course_overview_or_none(course_key)
if not course_overview:
log.info(f"Canceling Certificate Generation task for user {student.id} : {course_key} due to a missing course"
f"overview.")
return
generate_pdf = not has_html_certificates_enabled(course_overview)
cert = xqueue.add_cert(
student,
course_key,
generate_pdf=generate_pdf,
forced_grade=forced_grade
)
log.info(f"Queued Certificate Generation task for {student.id} : {course_key}")
# If cert_status is not present in certificate valid_statuses (for example unverified) then
# add_cert returns None and raises AttributeError while accessing cert attributes.
if cert is None:
return
if CertificateStatuses.is_passing_status(cert.status):
emit_certificate_event('created', student, course_key, course_overview, {
'user_id': student.id,
'course_id': str(course_key),
'certificate_id': cert.verify_uuid,
'enrollment_mode': cert.mode,
'generation_mode': generation_mode
})
return cert.status
| agpl-3.0 | -6,237,152,878,046,844,000 | 37.924419 | 120 | 0.688275 | false |
ar7z1/ansible | lib/ansible/modules/identity/opendj/opendj_backendprop.py | 77 | 6939 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Werner Dijkerman ([email protected])
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: opendj_backendprop
short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command.
description:
- This module will update settings for OpenDJ with the command set-backend-prop.
- It will check first via de get-backend-prop if configuration needs to be applied.
version_added: "2.2"
author:
- Werner Dijkerman (@dj-wasabi)
options:
opendj_bindir:
description:
- The path to the bin directory of OpenDJ.
required: false
default: /opt/opendj/bin
hostname:
description:
- The hostname of the OpenDJ server.
required: true
port:
description:
- The Admin port on which the OpenDJ instance is available.
required: true
username:
description:
- The username to connect to.
required: false
default: cn=Directory Manager
password:
description:
- The password for the cn=Directory Manager user.
- Either password or passwordfile is needed.
required: false
passwordfile:
description:
- Location to the password file which holds the password for the cn=Directory Manager user.
- Either password or passwordfile is needed.
required: false
backend:
description:
- The name of the backend on which the property needs to be updated.
required: true
name:
description:
- The configuration setting to update.
required: true
value:
description:
- The value for the configuration item.
required: true
state:
description:
- If configuration needs to be added/updated
required: false
default: "present"
'''
EXAMPLES = '''
- name: "Add or update OpenDJ backend properties"
action: opendj_backendprop
hostname=localhost
port=4444
username="cn=Directory Manager"
password=password
backend=userRoot
name=index-entry-limit
value=5000
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
class BackendProp(object):
def __init__(self, module):
self._module = module
def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name):
my_command = [
opendj_bindir + '/dsconfig',
'get-backend-prop',
'-h', hostname,
'--port', str(port),
'--bindDN', username,
'--backend-name', backend_name,
'-n', '-X', '-s'
] + password_method
rc, stdout, stderr = self._module.run_command(my_command)
if rc == 0:
return stdout
else:
self._module.fail_json(msg="Error message: " + str(stderr))
def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value):
my_command = [
opendj_bindir + '/dsconfig',
'set-backend-prop',
'-h', hostname,
'--port', str(port),
'--bindDN', username,
'--backend-name', backend_name,
'--set', name + ":" + value,
'-n', '-X'
] + password_method
rc, stdout, stderr = self._module.run_command(my_command)
if rc == 0:
return True
else:
self._module.fail_json(msg="Error message: " + stderr)
def validate_data(self, data=None, name=None, value=None):
for config_line in data.split('\n'):
if config_line:
split_line = config_line.split()
if split_line[0] == name:
if split_line[1] == value:
return True
return False
def main():
module = AnsibleModule(
argument_spec=dict(
opendj_bindir=dict(default="/opt/opendj/bin", type="path"),
hostname=dict(required=True),
port=dict(required=True),
username=dict(default="cn=Directory Manager", required=False),
password=dict(required=False, no_log=True),
passwordfile=dict(required=False, type="path"),
backend=dict(required=True),
name=dict(required=True),
value=dict(required=True),
state=dict(default="present"),
),
supports_check_mode=True,
mutually_exclusive=[['password', 'passwordfile']],
required_one_of=[['password', 'passwordfile']]
)
opendj_bindir = module.params['opendj_bindir']
hostname = module.params['hostname']
port = module.params['port']
username = module.params['username']
password = module.params['password']
passwordfile = module.params['passwordfile']
backend_name = module.params['backend']
name = module.params['name']
value = module.params['value']
state = module.params['state']
if module.params["password"] is not None:
password_method = ['-w', password]
elif module.params["passwordfile"] is not None:
password_method = ['-j', passwordfile]
opendj = BackendProp(module)
validate = opendj.get_property(opendj_bindir=opendj_bindir,
hostname=hostname,
port=port,
username=username,
password_method=password_method,
backend_name=backend_name)
if validate:
if not opendj.validate_data(data=validate, name=name, value=value):
if module.check_mode:
module.exit_json(changed=True)
if opendj.set_property(opendj_bindir=opendj_bindir,
hostname=hostname,
port=port,
username=username,
password_method=password_method,
backend_name=backend_name,
name=name,
value=value):
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.exit_json(changed=False)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 | 7,237,501,349,423,861,000 | 33.014706 | 112 | 0.557141 | false |
koushikcgit/xen | tools/python/xen/xm/opts.py | 43 | 17981 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <[email protected]>
# Copyright (C) 2005 XenSource Ltd.
#============================================================================
"""Object-oriented command-line option support.
"""
import getopt
import os
import os.path
import sys
import types
def _line_wrap(text, width = 70):
lines = []
current_line = ''
words = text.strip().split()
while words:
word = words.pop(0)
if len(current_line) + len(word) + 1 < width:
current_line += word + ' '
else:
lines.append(current_line.strip())
current_line = word + ' '
if current_line:
lines.append(current_line.strip())
return lines
def wrap(text, width = 70):
""" Really basic textwrap. Useful because textwrap is not available
for Python 2.2, and textwrap.wrap ignores newlines in Python 2.3+.
"""
if len(text) < width:
return [text]
lines = []
for line in text.split('\n'):
lines += _line_wrap(line, width)
return lines
class OptionError(Exception):
def _get_message(self):
return self.__message
def _set_message(self, value):
self.__message = value
message = property(_get_message, _set_message)
"""Denotes an error in option parsing."""
def __init__(self, message, usage = ''):
self.message = message
self.usage = usage
Exception.__init__(self, message)
def __str__(self):
return self.message
class XMLFileError(Exception):
"""Thrown is input is an XML File"""
def __init__(self, XMLFile):
self.XMLFile = XMLFile
def __str__(self):
return "XMLFileError: %s" % self.XMLFile
def getFile(self):
return self.XMLFile
class Opt:
"""An individual option.
"""
def __init__(self, opts, name, short=None, long=None,
val=None, fn=None, use=None, default=None):
"""Create an option.
opts parent options object
name name of the field it controls
short short (1-char) command line switch (optional)
long long command-line switch. Defaults to option name.
val string used to print option args in help.
If val is not specified the option has no arg.
fn function to call when the option is specified.
use usage (help) string
default default value if not specified on command-line
"""
self.opts = opts
self.name = name
self.short = short
if long is None:
long = name
self.long = long
self.val = val
self.use = use
self.default = default
self.optkeys = []
if self.short:
self.optkeys.append('-' + self.short)
if self.long:
self.optkeys.append('--' + self.long)
self.fn = fn
self.specified_opt = None
self.specified_val = None
self.value = None
self.set(default)
def reset(self):
self.specified_opt = None
self.specified_val = None
self.value = None
self.set(self.default)
def __repr__(self):
return self.name + '=' + str(self.specified_val)
def __str__(self):
""" Formats the option into:
'-k, --key description'
"""
PARAM_WIDTH = 20
if self.val:
keys = ', '.join(['%s=%s' % (k, self.val) for k in self.optkeys])
else:
keys = ', '.join(self.optkeys)
desc = wrap(self.use, 55)
if len(keys) > PARAM_WIDTH:
desc = [''] + desc
wrapped = ('\n' + ' ' * (PARAM_WIDTH + 1)).join(desc)
return keys.ljust(PARAM_WIDTH + 1) + wrapped
def set(self, value):
"""Set the option value.
"""
self.opts.setopt(self.name, value)
def get(self):
"""Get the option value.
"""
return self.opts.getopt(self.name)
def append(self, value):
"""Append a value to the option value.
"""
v = self.get() or []
v.append(value)
self.set(v)
def short_opt(self):
"""Short option spec.
"""
if self.short:
if self.val:
return self.short + ':'
else:
return self.short
else:
return None
def long_opt(self):
"""Long option spec.
"""
if self.long:
if self.val:
return self.long + '='
else:
return self.long
else:
return None
def format(self, str, start=' ', out=sys.stdout):
"""Print a string, with consistent indentation at the start of lines.
"""
lines = str.split('\n')
for l in lines:
l = l.strip()
if start:
out.write(start)
out.write(l)
out.write('\n')
def show(self, out=sys.stdout):
sep = ' '
for x in self.optkeys:
out.write(sep)
out.write(x)
sep = ', '
if self.val:
out.write(' ')
out.write(self.val)
out.write('\n')
if self.use:
self.format(self.use, out=out);
if self.val:
self.format('Default ' + str(self.default or 'None'), out=out)
def specify(self, k, v):
"""Specify the option. Called when the option is set
from the command line.
k option switch used
v optional value given (if any)
"""
if k in self.optkeys:
if self.val is None and v:
self.opts.err("Option '%s' does not take a value" % k)
self.specified_opt = k
self.specified_val = v
if self.fn:
self.fn(self, k, v)
return 1
else:
return 0
def specified(self):
"""Test whether the option has been specified: set
from the command line.
"""
return self.specified_opt
class OptVar(Opt):
"""An individual option variable.
"""
def __init__(self, opts, name,
val=None, fn=None, use=None, default=None):
"""Create an option.
opts parent options object
name name of the field it controls
val string used to print option args in help.
If val is not specified the option has no arg.
fn function to call when the option is specified.
use usage (help) string
default default value if not specified on command-line
"""
if val is None:
val = name.upper()
Opt.__init__(self, opts, name, val=val, fn=fn, use=use, default=default)
self.optkeys = []
self.optkeys.append(self.long)
def short_opt(self):
return None
def long_opt(self):
return None
def show(self, out=sys.stdout):
print >>out, ' %s=%s' % (self.optkeys[0], self.val)
if self.use:
self.format(self.use, out=out);
if self.val:
self.format('Default ' + str(self.default or 'None'), out=out)
class OptVals:
"""Class to hold option values.
"""
def __init__(self):
self.quiet = False
class Opts:
"""Container for options.
"""
imports = ["import sys",
"import os",
"import os.path",
"from xen.util.ip import *",
]
def __init__(self, use=None):
"""Options constructor.
use usage string
"""
self.use = use
# List of options.
self.options = []
# Options indexed by name.
self.options_map = {}
# Command-line arguments.
self.argv = []
# Option values.
self.vals = OptVals()
# Variables for default scripts.
self.vars = {}
# Option to use for bare words.
self.default_opt = None
def reset(self):
self.vals = OptVals()
self.vars = {}
for opt in self.options:
opt.reset()
def __repr__(self):
return '\n'.join(map(str, self.options))
def __str__(self):
options = [s for s in self.options if s.optkeys[0][0] == '-']
output = ''
if options:
output += '\nOptions:\n\n'
output += '\n'.join([str(o) for o in options])
output += '\n'
return output
def val_usage(self):
optvals = [s for s in self.options if s.optkeys[0][0] != '-']
output = ''
if optvals:
output += '\nValues:\n\n'
output += '\n'.join([str(o) for o in optvals])
output += '\n'
return output
def opt(self, name, **args):
"""Add an option.
name option name
**args keyword params for option constructor
"""
x = Opt(self, name, **args)
self.options.append(x)
self.options_map[name] = x
return x
def default(self, name):
self.default_opt = name
def getdefault(self, val):
if self.default_opt is None:
return 0
opt = self.option(self.default_opt)
return opt.set(val)
def var(self, name, **args):
x = OptVar(self, name, **args)
self.options.append(x)
self.options_map[name] = x
return x
def setvar(self, var, val):
"""Set a default script variable.
"""
self.vars[var] = val
def getvar(self, var):
"""Get a default script variable.
"""
return self.vars.get(var)
def option(self, name):
"""Get an option (object).
"""
return self.options_map.get(name)
def setopt(self, name, val):
"""Set an option value.
An option can also be set using 'opts.vals.name = val'.
"""
setattr(self.vals, name, val)
def getopt(self, name):
"""Get an option value.
An option value can also be got using 'opts.vals.name'.
"""
return getattr(self.vals, name)
def specified(self, name):
"""Test if an option has been specified.
"""
opt = self.option(name)
return opt and opt.specified()
def err(self, msg):
"""Print an error to stderr and exit.
"""
print >>sys.stderr, "Error:", msg
sys.exit(1)
def info(self, msg):
"""Print a message to stdout (unless quiet is set).
"""
if self.vals.quiet: return
print msg
def warn(self, msg):
"""Print a warning to stdout.
"""
print >>sys.stderr, "Warning:", msg
def parse(self, argv):
"""Parse arguments argv using the options.
return remaining arguments
"""
self.argv = argv
# hack to work around lack of gnu getopts parsing in python 2.2
args = argv[1:]
xargs = []
while args:
# let getopt parse whatever it feels like -- if anything
try:
(xvals, args) = getopt.getopt(args[0:],
self.short_opts(),
self.long_opts())
except getopt.GetoptError, err:
raise OptionError(str(err), self.use)
#self.err(str(err))
for (k, v) in xvals:
for opt in self.options:
if opt.specify(k, v): break
else:
raise OptionError('Unknown option: %s' % k, self.use)
if not args:
break
# then process the 1st arg
(arg,args) = (args[0], args[1:])
isvar = 0
if '=' in arg:
(k, v) = arg.split('=', 1)
for opt in self.options:
if opt.specify(k, v):
isvar = 1
break
elif self.getdefault(arg):
isvar = 1
if not isvar:
xargs.append(arg)
return xargs
def short_opts(self):
"""Get short options specifier for getopt.
"""
l = []
for x in self.options:
y = x.short_opt()
if not y: continue
l.append(y)
return ''.join(l)
def long_opts(self):
"""Get long options specifier for getopt.
"""
l = []
for x in self.options:
y = x.long_opt()
if not y: continue
l.append(y)
return l
def usage(self):
print 'Usage: ', self.argv[0], self.use or 'OPTIONS'
print
if self.options:
for opt in self.options:
opt.show()
print
print
def var_usage(self):
if self.vars:
print 'The config file defines the following variables:'
for var in self.vars:
var.show()
print
print
def config_usage(self):
if self.imports:
print 'The following are automically imported:'
for x in self.imports:
print ' ', x
print
self.var_usage()
def load_defconfig(self, help=0):
"""Load a defconfig script. Assumes these options set:
'path' search path
'defconfig' script name
"""
for x in [ '' ] + self.vals.path.split(':'):
if x:
p = os.path.join(x, self.vals.defconfig)
else:
p = self.vals.defconfig
if not p.startswith('/'):
p = os.path.join(os.path.curdir, p)
if os.path.exists(p):
self.info('Using config file "%s".' % p)
f = open(p)
is_xml = (f.read(1) == '<')
f.close()
if is_xml:
raise XMLFileError(p)
self.load(p, help)
break
else:
raise OptionError('Unable to open config file: %s' % \
self.vals.defconfig,
self.use)
def load(self, defconfig, help):
"""Load a defconfig file. Local variables in the file
are used to set options with the same names.
Variables are not used to set options that are already specified.
"""
# Create global and local dicts for the file.
# Initialize locals to the vars.
# Use exec to do the standard imports and
# define variables we are passing to the script.
globs = {}
locs = {}
locs.update(self.vars)
cmd = '\n'.join(self.imports +
[ "from xen.xm.help import Vars",
"xm_file = '%s'" % defconfig,
"xm_help = %d" % help,
"xm_vars = Vars(xm_file, xm_help, locals())"
])
exec cmd in globs, locs
try:
execfile(defconfig, globs, locs)
except SyntaxError,e:
raise SyntaxError, \
"Errors were found at line %d while processing %s:\n\t%s"\
%(e.lineno,defconfig,e.text)
except:
if not help: raise
if help:
self.config_usage()
return
# Extract the values set by the script and set the corresponding
# options, if not set on the command line.
vtypes = [ types.StringType,
types.ListType,
types.IntType,
types.FloatType
]
for (k, v) in locs.items():
if self.specified(k): continue
if not(type(v) in vtypes): continue
self.setopt(k, v)
def set_true(opt, k, v):
"""Set an option true."""
opt.set(1)
def set_false(opt, k, v):
"""Set an option false."""
opt.set(0)
def set_bool(opt, k, v):
"""Set a boolean option.
"""
if v in ('yes', 'y'):
opt.set(1)
elif v in ('no', 'n'):
opt.set(0)
else:
opt.opts.err('Invalid value:' +v)
def set_value(opt, k, v):
"""Set an option to a value."""
opt.set(v)
def set_int(opt, k, v):
"""Set an option to an integer value."""
try:
v = int(v)
except:
opt.opts.err('Invalid value: ' + str(v))
opt.set(v)
def set_long(opt, k, v):
"""Set an option to a long integer value."""
try:
v = long(v)
except:
opt.opts.err('Invalid value: ' + str(v))
opt.set(v)
def set_float(opt, k, v):
"""Set an option to a float value."""
try:
v = float(v)
except:
opt.opts.err('Invalid value: ' + str(v))
opt.set(v)
def append_value(opt, k, v):
"""Append a value to a list option."""
opt.append(v)
def set_var(opt, k, v):
"""Set a default script variable.
"""
(var, val) = v.strip().split('=', 1)
opt.opts.setvar(var.strip(), val.strip())
| gpl-2.0 | 4,655,203,752,074,545,000 | 27.677831 | 80 | 0.49975 | false |
kimegitee/python-koans | python2/libs/colorama/ansitowin32.py | 287 | 6621 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll
if windll is not None:
winterm = WinTerm()
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = sys.platform.startswith('win')
# should we strip ANSI sequences from our output?
if strip is None:
strip = on_windows
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = on_windows and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
}
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif is_a_tty(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
for match in self.ANSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(paramstring)
self.call_win32(command, params)
def extract_params(self, paramstring):
def split(paramstring):
for p in paramstring.split(';'):
if p != '':
yield int(p)
return tuple(split(paramstring))
def call_win32(self, command, params):
if params == []:
params = [0]
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in ('H', 'f'): # set cursor position
func = winterm.set_cursor_position
func(params, on_stderr=self.on_stderr)
elif command in ('J'):
func = winterm.erase_data
func(params, on_stderr=self.on_stderr)
elif command == 'A':
if params == () or params == None:
num_rows = 1
else:
num_rows = params[0]
func = winterm.cursor_up
func(num_rows, on_stderr=self.on_stderr)
| mit | -9,201,103,425,506,319,000 | 34.031746 | 79 | 0.584655 | false |
ltilve/chromium | gin/fingerprint/fingerprint_v8_snapshot.py | 64 | 2442 | #!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Fingerprints the V8 snapshot blob files.
Constructs a SHA256 fingerprint of the V8 natives and snapshot blob files and
creates a .cc file which includes these fingerprint as variables.
"""
import hashlib
import optparse
import os
import sys
_HEADER = """// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file was generated by fingerprint_v8_snapshot.py.
namespace gin {
"""
_FOOTER = """
} // namespace gin
"""
def FingerprintFile(file_path):
input_file = open(file_path, 'rb')
sha256 = hashlib.sha256()
while True:
block = input_file.read(sha256.block_size)
if not block:
break
sha256.update(block)
return sha256.digest()
def WriteFingerprint(output_file, variable_name, fingerprint):
output_file.write('\nextern const unsigned char %s[] = { ' % variable_name)
for byte in fingerprint:
output_file.write(str(ord(byte)) + ', ')
output_file.write('};\n')
def WriteOutputFile(natives_fingerprint,
snapshot_fingerprint,
output_file_path):
output_dir_path = os.path.dirname(output_file_path)
if not os.path.exists(output_dir_path):
os.makedirs(output_dir_path)
output_file = open(output_file_path, 'w')
output_file.write(_HEADER)
WriteFingerprint(output_file, 'g_natives_fingerprint', natives_fingerprint)
output_file.write('\n')
WriteFingerprint(output_file, 'g_snapshot_fingerprint', snapshot_fingerprint)
output_file.write(_FOOTER)
def main():
parser = optparse.OptionParser()
parser.add_option('--snapshot_file',
help='The input V8 snapshot blob file path.')
parser.add_option('--natives_file',
help='The input V8 natives blob file path.')
parser.add_option('--output_file',
help='The path for the output cc file which will be write.')
options, _ = parser.parse_args()
natives_fingerprint = FingerprintFile(options.natives_file)
snapshot_fingerprint = FingerprintFile(options.snapshot_file)
WriteOutputFile(
natives_fingerprint, snapshot_fingerprint, options.output_file)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 1,358,686,302,995,075,800 | 27.395349 | 80 | 0.692465 | false |
shiblon/pytour | static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/test/test_coding.py | 121 | 1212 |
import test.test_support, unittest
import os
class CodingTest(unittest.TestCase):
def test_bad_coding(self):
module_name = 'bad_coding'
self.verify_bad_module(module_name)
def test_bad_coding2(self):
module_name = 'bad_coding2'
self.verify_bad_module(module_name)
def verify_bad_module(self, module_name):
self.assertRaises(SyntaxError, __import__, 'test.' + module_name)
path = os.path.dirname(__file__)
filename = os.path.join(path, module_name + '.py')
with open(filename) as fp:
text = fp.read()
self.assertRaises(SyntaxError, compile, text, filename, 'exec')
def test_error_from_string(self):
# See http://bugs.python.org/issue6289
input = u"# coding: ascii\n\N{SNOWMAN}".encode('utf-8')
with self.assertRaises(SyntaxError) as c:
compile(input, "<string>", "exec")
expected = "'ascii' codec can't decode byte 0xe2 in position 16: " \
"ordinal not in range(128)"
self.assertTrue(c.exception.args[0].startswith(expected))
def test_main():
test.test_support.run_unittest(CodingTest)
if __name__ == "__main__":
test_main()
| apache-2.0 | -2,760,748,927,685,283,300 | 31.756757 | 76 | 0.616337 | false |
cetic/ansible | lib/ansible/modules/network/cloudengine/ce_snmp_location.py | 39 | 6774 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_snmp_location
version_added: "2.4"
short_description: Manages SNMP location configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP location configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
location:
description:
- Location information.
required: true
default: null
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: CloudEngine snmp location test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP location"
ce_snmp_location:
state: present
location: nanjing China
provider: "{{ cli }}"
- name: "Remove SNMP location"
ce_snmp_location:
state: absent
location: nanjing China
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"location": "nanjing China",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"location": "nanjing China"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent sys-info location nanjing China"]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config, ce_argument_spec
class SnmpLocation(object):
""" Manages SNMP location configuration """
def __init__(self, **kwargs):
""" Class init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
# config
self.cur_cfg = dict()
# module args
self.state = self.module.params['state']
self.location = self.module.params['location']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def check_args(self):
""" Check invalid args """
if self.location:
if len(self.location) > 255 or len(self.location) < 1:
self.module.fail_json(
msg='Error: The len of location %s is out of [1 - 255].' % self.location)
else:
self.module.fail_json(
msg='Error: The len of location is 0.')
def get_proposed(self):
""" Get proposed state """
self.proposed["state"] = self.state
if self.location:
self.proposed["location"] = self.location
def get_existing(self):
""" Get existing state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"location ")
self.cur_cfg["location"] = temp_data[1]
self.existing["location"] = temp_data[1]
def get_end_state(self):
""" Get end state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"location ")
self.end_state["location"] = temp_data[1]
def cli_load_config(self, commands):
""" Load config by cli """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_config(self):
""" Get config by cli """
regular = "| include snmp | include location"
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
return tmp_cfg
def set_config(self):
""" Set configure by cli """
cmd = "snmp-agent sys-info location %s" % self.location
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_config(self):
""" Undo configure by cli """
cmd = "undo snmp-agent sys-info location"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" Main work function """
self.check_args()
self.get_proposed()
self.get_existing()
if self.state == "present":
if "location" in self.cur_cfg.keys() and self.location == self.cur_cfg["location"]:
pass
else:
self.set_config()
else:
if "location" in self.cur_cfg.keys() and self.location == self.cur_cfg["location"]:
self.undo_config()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
location=dict(type='str', required=True)
)
argument_spec.update(ce_argument_spec)
module = SnmpLocation(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 | -8,808,908,371,850,308,000 | 25.880952 | 95 | 0.596693 | false |
Tatsh/Clementine | dist/codesign.py | 10 | 1320 | #!/usr/bin/python2
# Emulates the behaviour of codesign --deep which is missing on OS X < 10.9
import os
import re
import subprocess
import sys
def SignPath(path, developer_id, deep=True):
args = [
'codesign',
'--preserve-metadata=identifier,entitlements,resource-rules,requirements',
'-s', developer_id,
'-fv', path
]
if deep:
args.append('--deep')
subprocess.check_call(args)
def main():
if len(sys.argv) != 3:
print 'Usage: %s <developer id> <app bundle>' % sys.argv[0]
sys.exit(1)
developer_id = sys.argv[1]
app_bundle = sys.argv[2]
for root, dirs, files in os.walk(app_bundle):
for dir in dirs:
if re.search(r'\.framework$', dir):
SignPath(os.path.join(root, dir), developer_id)
for file in files:
if re.search(r'\.(dylib|so)$', file):
SignPath(os.path.join(root, file), developer_id)
elif re.match(r'(clementine-spotifyblob|clementine-tagreader|gst-plugin-scanner)', file):
SignPath(os.path.join(root, file), developer_id)
SignPath(app_bundle, developer_id, deep=False)
# Verify the signatures are valid.
subprocess.check_call([
'codesign', '--verify', '--verbose=4', app_bundle])
subprocess.check_call([
'spctl', '--assess', '--verbose=4', app_bundle])
if __name__ == '__main__':
main()
| gpl-3.0 | -8,318,172,586,712,644,000 | 25.938776 | 95 | 0.639394 | false |
aozima/rt-thread | bsp/taihu/rtconfig.py | 52 | 1285 | import os
# toolchains options
ARCH='ppc'
CPU='ppc405'
CROSS_TOOL='gcc'
TextBase = '0x00000000'
PLATFORM = 'gcc'
EXEC_PATH = 'C:/Program Files/CodeSourcery/Sourcery G++ Lite/bin'
BUILD = 'debug'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'powerpc-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=405 -mno-multiple -mno-string -mno-update -fno-exceptions -fno-builtin -msoft-float'
CFLAGS = DEVICE + ' -D__KERNEL__'
AFLAGS = '-D__ASSEMBLY__ -fno-exceptions -fno-builtin -mregnames -c -Wall -Xassembler -m405 -msoft-float -ffunction-sections'
LFLAGS = DEVICE + ' -Wl,--gc-sections,--cref,-Map=rtthread.map -T taihu.lds' + ' -Ttext=' + TextBase
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
DASM_ACTION = OBJDUMP + ' -d rtthread-taihu.elf > rtt.asm\n'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n' # + DASM_ACTION
| gpl-2.0 | 1,832,310,962,624,093,000 | 26.934783 | 131 | 0.595331 | false |
blefaudeux/Pinta | pinta/model/model_rnn.py | 1 | 2323 | import logging
from typing import List
import numpy as np
import torch
import torch.nn as nn
from pinta.model.model_base import NN
LOG = logging.getLogger("ConvRNN")
class ConvRNN(NN):
"""
Combination of a convolutional front end and an RNN (GRU) layer below
>> see https://gist.github.com/spro/c87cc706625b8a54e604fb1024106556
"""
def __init__(
self,
logdir: str,
input_size: int,
hidden_size: int,
kernel_sizes: List[int],
n_gru_layers: int,
output_size: int,
filename=None,
tuning_input_size: int = -1,
):
super().__init__(logdir)
# ----
# Define the model
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.gru_layers = n_gru_layers
# Conv front end
self.conv1 = nn.Conv1d(input_size, hidden_size, kernel_size=kernel_sizes[0])
self.conv2 = nn.Conv1d(hidden_size, hidden_size, kernel_size=kernel_sizes[1])
self.relu = nn.ReLU()
# GRU / LSTM layers
# Requires [batch, seq, inputs]
self.gru = nn.GRU(
hidden_size, hidden_size, n_gru_layers, dropout=0.01, batch_first=True
)
# Ends with a fully connected layer
self.out = nn.Linear(hidden_size, self.output_size)
# Load from trained NN if required
if filename is not None:
self._valid = self.load(filename)
if self._valid:
return
LOG.warning("Could not load the specified net, computing it from scratch")
def forward(self, inputs, hidden=None):
# Run through Conv1d and Pool1d layers
r1 = self.relu(self.conv1(inputs))
r2 = self.relu(self.conv2(r1))
# GRU/LSTM layer expects [batch, seq, inputs]
r2 = r2.transpose(1, 2)
output_rnn, hidden_out = self.gru(r2, hidden)
output = self.out(output_rnn[:, -1, :].squeeze())
return output, hidden_out
def get_layer_weights(self):
return self.conv1.weight
def _get_conv_out(self, shape):
# Useful to compute the shape out of the conv blocks
# (including eventual padding..)
o = self.conv(torch.zeros(1, *shape))
return int(np.prod(o.size()))
| gpl-3.0 | -6,373,979,256,973,989,000 | 28.0375 | 86 | 0.591477 | false |
mmalyska/eve-wspace | evewspace/SiteTracker/migrations/0003_auto__add_systemweight.py | 17 | 15303 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SystemWeight'
db.create_table('SiteTracker_systemweight', (
('system', self.gf('django.db.models.fields.related.OneToOneField')(related_name='st_weight', unique=True, primary_key=True, to=orm['Map.System'])),
('weight', self.gf('django.db.models.fields.FloatField')()),
))
db.send_create_signal('SiteTracker', ['SystemWeight'])
def backwards(self, orm):
# Deleting model 'SystemWeight'
db.delete_table('SiteTracker_systemweight')
models = {
'Map.system': {
'Meta': {'object_name': 'System', '_ormbases': ['core.SystemData']},
'first_visited': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'last_visited': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'lastscanned': ('django.db.models.fields.DateTimeField', [], {}),
'npckills': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'occupied': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'podkills': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'shipkills': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sysclass': ('django.db.models.fields.IntegerField', [], {}),
'systemdata_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.SystemData']", 'unique': 'True', 'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'SiteTracker.claim': {
'Meta': {'object_name': 'Claim'},
'bonus': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'period': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'claims'", 'to': "orm['SiteTracker.ClaimPeriod']"}),
'shareclaimed': ('django.db.models.fields.FloatField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'claims'", 'to': "orm['auth.User']"})
},
'SiteTracker.claimperiod': {
'Meta': {'object_name': 'ClaimPeriod'},
'closetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loothauledby': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'loothauled'", 'null': 'True', 'to': "orm['auth.User']"}),
'lootsoldby': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'lootsold'", 'null': 'True', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'starttime': ('django.db.models.fields.DateTimeField', [], {})
},
'SiteTracker.fleet': {
'Meta': {'object_name': 'Fleet'},
'current_boss': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'currently_bossing'", 'to': "orm['auth.User']"}),
'ended': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_boss': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bossfleets'", 'to': "orm['auth.User']"}),
'roles_needed': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'fleets_need'", 'symmetrical': 'False', 'to': "orm['SiteTracker.SiteRole']"}),
'started': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stfleets'", 'to': "orm['Map.System']"})
},
'SiteTracker.payoutentry': {
'Meta': {'object_name': 'PayoutEntry'},
'claim': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payout'", 'to': "orm['SiteTracker.Claim']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iskshare': ('django.db.models.fields.BigIntegerField', [], {}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': "orm['SiteTracker.PayoutReport']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payouts'", 'to': "orm['auth.User']"})
},
'SiteTracker.payoutreport': {
'Meta': {'object_name': 'PayoutReport'},
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payoutreports'", 'to': "orm['auth.User']"}),
'datepaid': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'grossprofit': ('django.db.models.fields.BigIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'period': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports'", 'to': "orm['SiteTracker.ClaimPeriod']"})
},
'SiteTracker.siterecord': {
'Meta': {'object_name': 'SiteRecord'},
'boss': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sitescredited'", 'to': "orm['auth.User']"}),
'fleet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sites'", 'to': "orm['SiteTracker.Fleet']"}),
'fleetsize': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'raw_points': ('django.db.models.fields.IntegerField', [], {}),
'site_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sitesrun'", 'to': "orm['SiteTracker.SiteType']"}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sitescompleted'", 'to': "orm['Map.System']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'weighted_points': ('django.db.models.fields.IntegerField', [], {})
},
'SiteTracker.siterole': {
'Meta': {'object_name': 'SiteRole'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'SiteTracker.sitetype': {
'Meta': {'object_name': 'SiteType'},
'defunct': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'longname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'shortname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '8'})
},
'SiteTracker.siteweight': {
'Meta': {'object_name': 'SiteWeight'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'raw_points': ('django.db.models.fields.IntegerField', [], {}),
'site_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'weights'", 'to': "orm['SiteTracker.SiteType']"}),
'sysclass': ('django.db.models.fields.IntegerField', [], {})
},
'SiteTracker.systemweight': {
'Meta': {'object_name': 'SystemWeight'},
'system': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'st_weight'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['Map.System']"}),
'weight': ('django.db.models.fields.FloatField', [], {})
},
'SiteTracker.userlog': {
'Meta': {'object_name': 'UserLog'},
'fleet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['SiteTracker.Fleet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jointime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'leavetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sitetrackerlogs'", 'to': "orm['auth.User']"})
},
'SiteTracker.usersite': {
'Meta': {'object_name': 'UserSite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['SiteTracker.SiteRecord']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sites'", 'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.constellation': {
'Meta': {'object_name': 'Constellation', 'db_table': "'mapConstellations'", 'managed': 'False'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_column': "'constellationID'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_column': "'constellationName'"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'constellations'", 'db_column': "'regionID'", 'to': "orm['core.Region']"}),
'x': ('django.db.models.fields.FloatField', [], {}),
'y': ('django.db.models.fields.FloatField', [], {}),
'z': ('django.db.models.fields.FloatField', [], {})
},
'core.region': {
'Meta': {'object_name': 'Region', 'db_table': "'mapRegions'", 'managed': 'False'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_column': "'regionID'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_column': "'regionName'"}),
'x': ('django.db.models.fields.FloatField', [], {}),
'y': ('django.db.models.fields.FloatField', [], {}),
'z': ('django.db.models.fields.FloatField', [], {})
},
'core.systemdata': {
'Meta': {'object_name': 'SystemData', 'db_table': "'mapSolarSystems'", 'managed': 'False'},
'constellation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'systems'", 'db_column': "'constellationID'", 'to': "orm['core.Constellation']"}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_column': "'solarSystemID'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_column': "'solarSystemName'"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'systems'", 'db_column': "'regionID'", 'to': "orm['core.Region']"}),
'security': ('django.db.models.fields.FloatField', [], {}),
'x': ('django.db.models.fields.FloatField', [], {}),
'y': ('django.db.models.fields.FloatField', [], {}),
'z': ('django.db.models.fields.FloatField', [], {})
}
}
complete_apps = ['SiteTracker'] | gpl-3.0 | -3,545,544,864,596,970,500 | 74.762376 | 182 | 0.550284 | false |
tealover/nova | nova/network/api.py | 17 | 23539 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from nova import exception
from nova.i18n import _LI
from nova.network import base_api
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as obj_base
from nova import policy
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution."""
@functools.wraps(func)
def wrapped(self, context, *args, **kwargs):
action = func.__name__
if not self.skip_policy_check:
check_policy(context, action)
return func(self, context, *args, **kwargs)
return wrapped
def check_policy(context, action):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
_action = 'network:%s' % action
policy.enforce(context, _action, target)
class API(base_api.NetworkAPI):
"""API for doing networking via the nova-network network manager.
This is a pluggable module - other implementations do networking via
other services (such as Neutron).
"""
def __init__(self, **kwargs):
self.network_rpcapi = network_rpcapi.NetworkAPI()
helper = utils.ExceptionHelper
# NOTE(vish): this local version of floating_manager has to convert
# ClientExceptions back since they aren't going over rpc.
self.floating_manager = helper(floating_ips.LocalManager())
super(API, self).__init__(**kwargs)
@wrap_check_policy
def get_all(self, context):
"""Get all the networks.
If it is an admin user then api will return all the
networks. If it is a normal user and nova Flat or FlatDHCP
networking is being used then api will return all
networks. Otherwise api will only return the networks which
belong to the user's project.
"""
if "nova.network.manager.Flat" in CONF.network_manager:
project_only = "allow_none"
else:
project_only = True
try:
return objects.NetworkList.get_all(context,
project_only=project_only)
except exception.NoNetworksFound:
return []
@wrap_check_policy
def get(self, context, network_uuid):
return objects.Network.get_by_uuid(context, network_uuid)
@wrap_check_policy
def create(self, context, **kwargs):
return self.network_rpcapi.create_networks(context, **kwargs)
@wrap_check_policy
def delete(self, context, network_uuid):
network = self.get(context, network_uuid)
if network.project_id is not None:
raise exception.NetworkInUse(network_id=network_uuid)
return self.network_rpcapi.delete_network(context, network_uuid, None)
@wrap_check_policy
def disassociate(self, context, network_uuid):
network = self.get(context, network_uuid)
objects.Network.disassociate(context, network.id,
host=True, project=True)
@wrap_check_policy
def get_fixed_ip(self, context, id):
return objects.FixedIP.get_by_id(context, id)
@wrap_check_policy
def get_fixed_ip_by_address(self, context, address):
return objects.FixedIP.get_by_address(context, address)
@wrap_check_policy
def get_floating_ip(self, context, id):
if not strutils.is_int_like(id):
raise exception.InvalidID(id=id)
return objects.FloatingIP.get_by_id(context, id)
@wrap_check_policy
def get_floating_ip_pools(self, context):
return objects.FloatingIP.get_pool_names(context)
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
return objects.FloatingIP.get_by_address(context, address)
@wrap_check_policy
def get_floating_ips_by_project(self, context):
return objects.FloatingIPList.get_by_project(context,
context.project_id)
@wrap_check_policy
def get_instance_id_by_floating_address(self, context, address):
fixed_ip = objects.FixedIP.get_by_floating_address(context, address)
if fixed_ip is None:
return None
else:
return fixed_ip.instance_uuid
@wrap_check_policy
def get_vifs_by_instance(self, context, instance):
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance.uuid)
for vif in vifs:
if vif.network_id is not None:
network = objects.Network.get_by_id(context, vif.network_id,
project_only='allow_none')
vif.net_uuid = network.uuid
return vifs
@wrap_check_policy
def get_vif_by_mac_address(self, context, mac_address):
vif = objects.VirtualInterface.get_by_address(context,
mac_address)
if vif.network_id is not None:
network = objects.Network.get_by_id(context, vif.network_id,
project_only='allow_none')
vif.net_uuid = network.uuid
return vif
@wrap_check_policy
def allocate_floating_ip(self, context, pool=None):
"""Adds (allocates) a floating ip to a project from a pool."""
return self.floating_manager.allocate_floating_ip(context,
context.project_id, False, pool)
@wrap_check_policy
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Removes (deallocates) a floating ip with address from a project."""
return self.floating_manager.deallocate_floating_ip(context, address,
affect_auto_assigned)
def disassociate_and_release_floating_ip(self, context, instance,
floating_ip):
"""Removes (deallocates) and deletes the floating ip.
This api call was added to allow this to be done in one operation
if using neutron.
"""
address = floating_ip['address']
if floating_ip.get('fixed_ip_id'):
try:
self.disassociate_floating_ip(context, instance, address)
except exception.FloatingIpNotAssociated:
msg = ("Floating ip %s has already been disassociated, "
"perhaps by another concurrent action.") % address
LOG.debug(msg)
# release ip from project
return self.release_floating_ip(context, address)
@wrap_check_policy
@base_api.refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
Ensures floating ip is allocated to the project in context.
Does not verify ownership of the fixed ip. Caller is assumed to have
checked that the instance is properly owned.
"""
orig_instance_uuid = self.floating_manager.associate_floating_ip(
context, floating_address, fixed_address, affect_auto_assigned)
if orig_instance_uuid:
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info(_LI('re-assign floating IP %(address)s from '
'instance %(instance_id)s'), msg_dict)
orig_instance = objects.Instance.get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
base_api.update_instance_cache_with_nw_info(self, context,
orig_instance)
@wrap_check_policy
@base_api.refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from fixed ip it is associated with."""
return self.floating_manager.disassociate_floating_ip(context, address,
affect_auto_assigned)
@wrap_check_policy
@base_api.refresh_cache
def allocate_for_instance(self, context, instance, vpn,
requested_networks, macs=None,
security_groups=None,
dhcp_options=None):
"""Allocates all network structures for an instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param vpn: A boolean, if True, indicate a vpn to access the instance.
:param requested_networks: A dictionary of requested_networks,
Optional value containing network_id, fixed_ip, and port_id.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
:param security_groups: None or security groups to allocate for
instance.
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
:returns: network info as from get_instance_nw_info() below
"""
# NOTE(vish): We can't do the floating ip allocation here because
# this is called from compute.manager which shouldn't
# have db access so we do it on the other side of the
# rpc.
flavor = instance.get_flavor()
args = {}
args['vpn'] = vpn
args['requested_networks'] = requested_networks
args['instance_id'] = instance.uuid
args['project_id'] = instance.project_id
args['host'] = instance.host
args['rxtx_factor'] = flavor['rxtx_factor']
args['macs'] = macs
args['dhcp_options'] = dhcp_options
nw_info = self.network_rpcapi.allocate_for_instance(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def deallocate_for_instance(self, context, instance,
requested_networks=None):
"""Deallocates all network structures related to instance."""
# NOTE(vish): We can't do the floating ip deallocation here because
# this is called from compute.manager which shouldn't
# have db access so we do it on the other side of the
# rpc.
if not isinstance(instance, obj_base.NovaObject):
instance = objects.Instance._from_db_object(context,
objects.Instance(), instance)
self.network_rpcapi.deallocate_for_instance(context, instance=instance,
requested_networks=requested_networks)
# NOTE(danms): Here for neutron compatibility
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def deallocate_port_for_instance(self, context, instance, port_id):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def list_ports(self, *args, **kwargs):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def show_port(self, *args, **kwargs):
raise NotImplementedError()
@wrap_check_policy
@base_api.refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Adds a fixed ip to instance from specified network."""
flavor = instance.get_flavor()
args = {'instance_id': instance.uuid,
'rxtx_factor': flavor['rxtx_factor'],
'host': instance.host,
'network_id': network_id}
nw_info = self.network_rpcapi.add_fixed_ip_to_instance(
context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
@base_api.refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Removes a fixed ip from instance from specified network."""
flavor = instance.get_flavor()
args = {'instance_id': instance.uuid,
'rxtx_factor': flavor['rxtx_factor'],
'host': instance.host,
'address': address}
nw_info = self.network_rpcapi.remove_fixed_ip_from_instance(
context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force adds another network to a project."""
self.network_rpcapi.add_network_to_project(context, project_id,
network_uuid)
@wrap_check_policy
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
"""Associate or disassociate host or project to network."""
network = self.get(context, network_uuid)
if host is not base_api.SENTINEL:
if host is None:
objects.Network.disassociate(context, network.id,
host=True, project=False)
else:
network.host = host
network.save()
if project is not base_api.SENTINEL:
if project is None:
objects.Network.disassociate(context, network.id,
host=False, project=True)
else:
objects.Network.associate(context, project,
network_id=network.id, force=True)
@wrap_check_policy
def get_instance_nw_info(self, context, instance, **kwargs):
"""Returns all network info related to an instance."""
result = self._get_instance_nw_info(context, instance)
# NOTE(comstud): Don't update API cell with new info_cache every
# time we pull network info for an instance. The periodic healing
# of info_cache causes too many cells messages. Healing the API
# will happen separately.
base_api.update_instance_cache_with_nw_info(self, context, instance,
result, update_cells=False)
return result
def _get_instance_nw_info(self, context, instance):
"""Returns all network info related to an instance."""
flavor = instance.get_flavor()
args = {'instance_id': instance.uuid,
'rxtx_factor': flavor['rxtx_factor'],
'host': instance.host,
'project_id': instance.project_id}
nw_info = self.network_rpcapi.get_instance_nw_info(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def validate_networks(self, context, requested_networks, num_instances):
"""validate the networks passed at the time of creating
the server.
Return the number of instances that can be successfully allocated
with the requested network configuration.
"""
if requested_networks:
self.network_rpcapi.validate_networks(context,
requested_networks)
# Neutron validation checks and returns how many of num_instances
# instances can be supported by the quota. For Nova network
# this is part of the subsequent quota check, so we just return
# the requested number in this case.
return num_instances
def create_pci_requests_for_sriov_ports(self, context,
pci_requests,
requested_networks):
"""Check requested networks for any SR-IOV port request.
Create a PCI request object for each SR-IOV port, and add it to the
pci_requests object that contains a list of PCI request object.
"""
# This is NOOP for Nova network since it doesn't support SR-IOV.
pass
@wrap_check_policy
def get_dns_domains(self, context):
"""Returns a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
return self.network_rpcapi.get_dns_domains(context)
@wrap_check_policy
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'dns_type': dns_type,
'domain': domain}
return self.network_rpcapi.add_dns_entry(context, **args)
@wrap_check_policy
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'domain': domain}
return self.network_rpcapi.modify_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.delete_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
return self.network_rpcapi.delete_dns_domain(context, domain=domain)
@wrap_check_policy
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
args = {'address': address, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_address(context, **args)
@wrap_check_policy
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_name(context, **args)
@wrap_check_policy
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
args = {'domain': domain, 'av_zone': availability_zone}
return self.network_rpcapi.create_private_dns_domain(context, **args)
@wrap_check_policy
def create_public_dns_domain(self, context, domain, project=None):
"""Create a public DNS domain with optional nova project."""
args = {'domain': domain, 'project': project}
return self.network_rpcapi.create_public_dns_domain(context, **args)
@wrap_check_policy
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures on hosts related to
instance.
"""
host = host or instance.host
# NOTE(tr3buchet): host is passed in cases where we need to setup
# or teardown the networks on a host which has been migrated to/from
# and instance.host is not yet or is no longer equal to
args = {'instance_id': instance.id,
'host': host,
'teardown': teardown}
self.network_rpcapi.setup_networks_on_host(context, **args)
def _get_multi_addresses(self, context, instance):
try:
fixed_ips = objects.FixedIPList.get_by_instance_uuid(
context, instance.uuid)
except exception.FixedIpNotFoundForInstance:
return False, []
addresses = []
for fixed in fixed_ips:
for floating in fixed.floating_ips:
addresses.append(floating.address)
return fixed_ips[0].network.multi_host, addresses
@wrap_check_policy
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
flavor = instance.get_flavor()
args = dict(
instance_uuid=instance.uuid,
rxtx_factor=flavor['rxtx_factor'],
project_id=instance.project_id,
source_compute=migration['source_compute'],
dest_compute=migration['dest_compute'],
floating_addresses=None,
)
multi_host, addresses = self._get_multi_addresses(context, instance)
if multi_host:
args['floating_addresses'] = addresses
args['host'] = migration['source_compute']
self.network_rpcapi.migrate_instance_start(context, **args)
@wrap_check_policy
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
flavor = instance.get_flavor()
args = dict(
instance_uuid=instance.uuid,
rxtx_factor=flavor['rxtx_factor'],
project_id=instance.project_id,
source_compute=migration['source_compute'],
dest_compute=migration['dest_compute'],
floating_addresses=None,
)
multi_host, addresses = self._get_multi_addresses(context, instance)
if multi_host:
args['floating_addresses'] = addresses
args['host'] = migration['dest_compute']
self.network_rpcapi.migrate_instance_finish(context, **args)
def setup_instance_network_on_host(self, context, instance, host):
"""Setup network for specified instance on host."""
self.migrate_instance_finish(context, instance,
{'source_compute': None,
'dest_compute': host})
def cleanup_instance_network_on_host(self, context, instance, host):
"""Cleanup network for specified instance on host."""
self.migrate_instance_start(context, instance,
{'source_compute': host,
'dest_compute': None})
| apache-2.0 | -4,378,524,625,044,181,500 | 41.184588 | 79 | 0.609797 | false |
crakensio/django_training | lib/python2.7/site-packages/django/contrib/sessions/models.py | 173 | 1997 | from django.db import models
from django.utils.translation import ugettext_lazy as _
class SessionManager(models.Manager):
def encode(self, session_dict):
"""
Returns the given session dictionary serialized and encoded as a string.
"""
return SessionStore().encode(session_dict)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delete() # Clear sessions with no data.
return s
class Session(models.Model):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
The Django sessions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" header.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django Web site).
"""
session_key = models.CharField(_('session key'), max_length=40,
primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expire date'), db_index=True)
objects = SessionManager()
class Meta:
db_table = 'django_session'
verbose_name = _('session')
verbose_name_plural = _('sessions')
def get_decoded(self):
return SessionStore().decode(self.session_data)
# At bottom to avoid circular import
from django.contrib.sessions.backends.db import SessionStore
| cc0-1.0 | 3,697,134,009,628,584,000 | 35.981481 | 80 | 0.68002 | false |
ArneBab/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/plat-mac/lib-scriptpackages/StdSuites/__init__.py | 73 | 12854 | """
Package generated from /Volumes/Sap/System Folder/Extensions/AppleScript
Resource aeut resid 0 Standard Event Suites for English
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the StdSuites package is removed.", stacklevel=2)
import aetools
Error = aetools.Error
import Text_Suite
import AppleScript_Suite
import Standard_Suite
import Macintosh_Connectivity_Clas
import QuickDraw_Graphics_Suite
import QuickDraw_Graphics_Suppleme
import Required_Suite
import Table_Suite
import Type_Names_Suite
_code_to_module = {
'TEXT' : Text_Suite,
'ascr' : AppleScript_Suite,
'core' : Standard_Suite,
'macc' : Macintosh_Connectivity_Clas,
'qdrw' : QuickDraw_Graphics_Suite,
'qdsp' : QuickDraw_Graphics_Suppleme,
'reqd' : Required_Suite,
'tbls' : Table_Suite,
'tpnm' : Type_Names_Suite,
}
_code_to_fullname = {
'TEXT' : ('StdSuites.Text_Suite', 'Text_Suite'),
'ascr' : ('StdSuites.AppleScript_Suite', 'AppleScript_Suite'),
'core' : ('StdSuites.Standard_Suite', 'Standard_Suite'),
'macc' : ('StdSuites.Macintosh_Connectivity_Clas', 'Macintosh_Connectivity_Clas'),
'qdrw' : ('StdSuites.QuickDraw_Graphics_Suite', 'QuickDraw_Graphics_Suite'),
'qdsp' : ('StdSuites.QuickDraw_Graphics_Suppleme', 'QuickDraw_Graphics_Suppleme'),
'reqd' : ('StdSuites.Required_Suite', 'Required_Suite'),
'tbls' : ('StdSuites.Table_Suite', 'Table_Suite'),
'tpnm' : ('StdSuites.Type_Names_Suite', 'Type_Names_Suite'),
}
from Text_Suite import *
from AppleScript_Suite import *
from Standard_Suite import *
from Macintosh_Connectivity_Clas import *
from QuickDraw_Graphics_Suite import *
from QuickDraw_Graphics_Suppleme import *
from Required_Suite import *
from Table_Suite import *
from Type_Names_Suite import *
def getbaseclasses(v):
if not getattr(v, '_propdict', None):
v._propdict = {}
v._elemdict = {}
for superclassname in getattr(v, '_superclassnames', []):
superclass = eval(superclassname)
getbaseclasses(superclass)
v._propdict.update(getattr(superclass, '_propdict', {}))
v._elemdict.update(getattr(superclass, '_elemdict', {}))
v._propdict.update(getattr(v, '_privpropdict', {}))
v._elemdict.update(getattr(v, '_privelemdict', {}))
import StdSuites
#
# Set property and element dictionaries now that all classes have been defined
#
getbaseclasses(graphic_group)
getbaseclasses(oval)
getbaseclasses(graphic_text)
getbaseclasses(graphic_shape)
getbaseclasses(drawing_area)
getbaseclasses(graphic_line)
getbaseclasses(polygon)
getbaseclasses(pixel)
getbaseclasses(rounded_rectangle)
getbaseclasses(graphic_object)
getbaseclasses(arc)
getbaseclasses(pixel_map)
getbaseclasses(rectangle)
getbaseclasses(selection_2d_object)
getbaseclasses(application)
getbaseclasses(document)
getbaseclasses(window)
getbaseclasses(file)
getbaseclasses(alias)
getbaseclasses(insertion_point)
getbaseclasses(character)
getbaseclasses(paragraph)
getbaseclasses(word)
getbaseclasses(text_flow)
getbaseclasses(text_style_info)
getbaseclasses(line)
getbaseclasses(text)
getbaseclasses(AppleTalk_address)
getbaseclasses(address_specification)
getbaseclasses(Token_Ring_address)
getbaseclasses(FireWire_address)
getbaseclasses(bus_slot)
getbaseclasses(SCSI_address)
getbaseclasses(ADB_address)
getbaseclasses(USB_address)
getbaseclasses(device_specification)
getbaseclasses(LocalTalk_address)
getbaseclasses(IP_address)
getbaseclasses(Ethernet_address)
getbaseclasses(graphic_group)
getbaseclasses(drawing_area)
getbaseclasses(cell)
getbaseclasses(column)
getbaseclasses(table)
getbaseclasses(row)
getbaseclasses(small_integer)
getbaseclasses(system_dictionary)
getbaseclasses(color_table)
getbaseclasses(fixed_point)
getbaseclasses(plain_text)
getbaseclasses(type_element_info)
getbaseclasses(machine_location)
getbaseclasses(PostScript_picture)
getbaseclasses(type_suite_info)
getbaseclasses(menu_item)
getbaseclasses(pixel_map_record)
getbaseclasses(small_real)
getbaseclasses(null)
getbaseclasses(rotation)
getbaseclasses(fixed)
getbaseclasses(long_point)
getbaseclasses(target_id)
getbaseclasses(type_property_info)
getbaseclasses(type_parameter_info)
getbaseclasses(long_fixed_point)
getbaseclasses(bounding_rectangle)
getbaseclasses(TIFF_picture)
getbaseclasses(long_fixed)
getbaseclasses(location_reference)
getbaseclasses(version)
getbaseclasses(RGB16_color)
getbaseclasses(double_integer)
getbaseclasses(type_event_info)
getbaseclasses(point)
getbaseclasses(application_dictionary)
getbaseclasses(unsigned_integer)
getbaseclasses(menu)
getbaseclasses(fixed_rectangle)
getbaseclasses(long_fixed_rectangle)
getbaseclasses(type_class_info)
getbaseclasses(RGB96_color)
getbaseclasses(dash_style)
getbaseclasses(scrap_styles)
getbaseclasses(extended_real)
getbaseclasses(long_rectangle)
getbaseclasses(May)
getbaseclasses(string)
getbaseclasses(miles)
getbaseclasses(number_or_date)
getbaseclasses(October)
getbaseclasses(event)
getbaseclasses(Pascal_string)
getbaseclasses(zone)
getbaseclasses(picture)
getbaseclasses(list_or_string)
getbaseclasses(number)
getbaseclasses(Tuesday)
getbaseclasses(version)
getbaseclasses(December)
getbaseclasses(square_kilometres)
getbaseclasses(reference)
getbaseclasses(vector)
getbaseclasses(weekday)
getbaseclasses(Sunday)
getbaseclasses(international_text)
getbaseclasses(seconds)
getbaseclasses(RGB_color)
getbaseclasses(kilometres)
getbaseclasses(styled_Unicode_text)
getbaseclasses(missing_value)
getbaseclasses(metres)
getbaseclasses(number_or_string)
getbaseclasses(list)
getbaseclasses(linked_list)
getbaseclasses(real)
getbaseclasses(encoded_string)
getbaseclasses(list_or_record)
getbaseclasses(Monday)
getbaseclasses(September)
getbaseclasses(anything)
getbaseclasses(property)
getbaseclasses(reference_form)
getbaseclasses(item)
getbaseclasses(grams)
getbaseclasses(record)
getbaseclasses(empty_ae_name_)
getbaseclasses(constant)
getbaseclasses(square_miles)
getbaseclasses(data)
getbaseclasses(Unicode_text)
getbaseclasses(yards)
getbaseclasses(cubic_yards)
getbaseclasses(pounds)
getbaseclasses(cubic_centimetres)
getbaseclasses(text)
getbaseclasses(July)
getbaseclasses(cubic_metres)
getbaseclasses(styled_text)
getbaseclasses(number_2c__date_or_text)
getbaseclasses(feet)
getbaseclasses(February)
getbaseclasses(degrees_Celsius)
getbaseclasses(keystroke)
getbaseclasses(integer)
getbaseclasses(degrees_Fahrenheit)
getbaseclasses(list_2c__record_or_text)
getbaseclasses(date)
getbaseclasses(degrees_Kelvin)
getbaseclasses(centimetres)
getbaseclasses(writing_code)
getbaseclasses(alias_or_string)
getbaseclasses(writing_code_info)
getbaseclasses(text_item)
getbaseclasses(machine)
getbaseclasses(type_class)
getbaseclasses(preposition)
getbaseclasses(Wednesday)
getbaseclasses(upper_case)
getbaseclasses(March)
getbaseclasses(square_feet)
getbaseclasses(November)
getbaseclasses(quarts)
getbaseclasses(alias)
getbaseclasses(January)
getbaseclasses(month)
getbaseclasses(June)
getbaseclasses(August)
getbaseclasses(styled_Clipboard_text)
getbaseclasses(gallons)
getbaseclasses(cubic_inches)
getbaseclasses(Friday)
getbaseclasses(sound)
getbaseclasses(class_)
getbaseclasses(kilograms)
getbaseclasses(script)
getbaseclasses(litres)
getbaseclasses(boolean)
getbaseclasses(square_metres)
getbaseclasses(inches)
getbaseclasses(character)
getbaseclasses(April)
getbaseclasses(ounces)
getbaseclasses(app)
getbaseclasses(handler)
getbaseclasses(C_string)
getbaseclasses(Thursday)
getbaseclasses(square_yards)
getbaseclasses(cubic_feet)
getbaseclasses(Saturday)
getbaseclasses(file_specification)
#
# Indices of types declared in this module
#
_classdeclarations = {
'cpic' : graphic_group,
'covl' : oval,
'cgtx' : graphic_text,
'cgsh' : graphic_shape,
'cdrw' : drawing_area,
'glin' : graphic_line,
'cpgn' : polygon,
'cpxl' : pixel,
'crrc' : rounded_rectangle,
'cgob' : graphic_object,
'carc' : arc,
'cpix' : pixel_map,
'crec' : rectangle,
'csel' : selection_2d_object,
'capp' : application,
'docu' : document,
'cwin' : window,
'file' : file,
'alis' : alias,
'cins' : insertion_point,
'cha ' : character,
'cpar' : paragraph,
'cwor' : word,
'cflo' : text_flow,
'tsty' : text_style_info,
'clin' : line,
'ctxt' : text,
'cat ' : AppleTalk_address,
'cadr' : address_specification,
'ctok' : Token_Ring_address,
'cfw ' : FireWire_address,
'cbus' : bus_slot,
'cscs' : SCSI_address,
'cadb' : ADB_address,
'cusb' : USB_address,
'cdev' : device_specification,
'clt ' : LocalTalk_address,
'cip ' : IP_address,
'cen ' : Ethernet_address,
'cpic' : graphic_group,
'cdrw' : drawing_area,
'ccel' : cell,
'ccol' : column,
'ctbl' : table,
'crow' : row,
'shor' : small_integer,
'aeut' : system_dictionary,
'clrt' : color_table,
'fpnt' : fixed_point,
'TEXT' : plain_text,
'elin' : type_element_info,
'mLoc' : machine_location,
'EPS ' : PostScript_picture,
'suin' : type_suite_info,
'cmen' : menu_item,
'tpmm' : pixel_map_record,
'sing' : small_real,
'null' : null,
'trot' : rotation,
'fixd' : fixed,
'lpnt' : long_point,
'targ' : target_id,
'pinf' : type_property_info,
'pmin' : type_parameter_info,
'lfpt' : long_fixed_point,
'qdrt' : bounding_rectangle,
'TIFF' : TIFF_picture,
'lfxd' : long_fixed,
'insl' : location_reference,
'vers' : version,
'tr16' : RGB16_color,
'comp' : double_integer,
'evin' : type_event_info,
'QDpt' : point,
'aete' : application_dictionary,
'magn' : unsigned_integer,
'cmnu' : menu,
'frct' : fixed_rectangle,
'lfrc' : long_fixed_rectangle,
'gcli' : type_class_info,
'tr96' : RGB96_color,
'tdas' : dash_style,
'styl' : scrap_styles,
'exte' : extended_real,
'lrct' : long_rectangle,
'may ' : May,
'TEXT' : string,
'mile' : miles,
'nd ' : number_or_date,
'oct ' : October,
'evnt' : event,
'pstr' : Pascal_string,
'zone' : zone,
'PICT' : picture,
'ls ' : list_or_string,
'nmbr' : number,
'tue ' : Tuesday,
'vers' : version,
'dec ' : December,
'sqkm' : square_kilometres,
'obj ' : reference,
'vect' : vector,
'wkdy' : weekday,
'sun ' : Sunday,
'itxt' : international_text,
'scnd' : seconds,
'cRGB' : RGB_color,
'kmtr' : kilometres,
'sutx' : styled_Unicode_text,
'msng' : missing_value,
'metr' : metres,
'ns ' : number_or_string,
'list' : list,
'llst' : linked_list,
'doub' : real,
'encs' : encoded_string,
'lr ' : list_or_record,
'mon ' : Monday,
'sep ' : September,
'****' : anything,
'prop' : property,
'kfrm' : reference_form,
'cobj' : item,
'gram' : grams,
'reco' : record,
'undf' : empty_ae_name_,
'enum' : constant,
'sqmi' : square_miles,
'rdat' : data,
'utxt' : Unicode_text,
'yard' : yards,
'cyrd' : cubic_yards,
'lbs ' : pounds,
'ccmt' : cubic_centimetres,
'ctxt' : text,
'jul ' : July,
'cmet' : cubic_metres,
'STXT' : styled_text,
'nds ' : number_2c__date_or_text,
'feet' : feet,
'feb ' : February,
'degc' : degrees_Celsius,
'kprs' : keystroke,
'long' : integer,
'degf' : degrees_Fahrenheit,
'lrs ' : list_2c__record_or_text,
'ldt ' : date,
'degk' : degrees_Kelvin,
'cmtr' : centimetres,
'psct' : writing_code,
'sf ' : alias_or_string,
'citl' : writing_code_info,
'citm' : text_item,
'mach' : machine,
'type' : type_class,
'prep' : preposition,
'wed ' : Wednesday,
'case' : upper_case,
'mar ' : March,
'sqft' : square_feet,
'nov ' : November,
'qrts' : quarts,
'alis' : alias,
'jan ' : January,
'mnth' : month,
'jun ' : June,
'aug ' : August,
'styl' : styled_Clipboard_text,
'galn' : gallons,
'cuin' : cubic_inches,
'fri ' : Friday,
'snd ' : sound,
'pcls' : class_,
'kgrm' : kilograms,
'scpt' : script,
'litr' : litres,
'bool' : boolean,
'sqrm' : square_metres,
'inch' : inches,
'cha ' : character,
'apr ' : April,
'ozs ' : ounces,
'capp' : app,
'hand' : handler,
'cstr' : C_string,
'thu ' : Thursday,
'sqyd' : square_yards,
'cfet' : cubic_feet,
'sat ' : Saturday,
'fss ' : file_specification,
}
class StdSuites(Text_Suite_Events,
AppleScript_Suite_Events,
Standard_Suite_Events,
Macintosh_Connectivity_Clas_Events,
QuickDraw_Graphics_Suite_Events,
QuickDraw_Graphics_Suppleme_Events,
Required_Suite_Events,
Table_Suite_Events,
Type_Names_Suite_Events,
aetools.TalkTo):
_signature = 'ascr'
_moduleName = 'StdSuites'
| mit | -1,221,008,212,516,907,500 | 26.061053 | 86 | 0.699238 | false |
nirmeshk/oh-mainline | vendor/packages/gdata/src/gdata/tlslite/HandshakeSettings.py | 359 | 6364 | """Class for setting handshake parameters."""
from constants import CertificateType
from utils import cryptomath
from utils import cipherfactory
class HandshakeSettings:
"""This class encapsulates various parameters that can be used with
a TLS handshake.
@sort: minKeySize, maxKeySize, cipherNames, certificateTypes,
minVersion, maxVersion
@type minKeySize: int
@ivar minKeySize: The minimum bit length for asymmetric keys.
If the other party tries to use SRP, RSA, or Diffie-Hellman
parameters smaller than this length, an alert will be
signalled. The default is 1023.
@type maxKeySize: int
@ivar maxKeySize: The maximum bit length for asymmetric keys.
If the other party tries to use SRP, RSA, or Diffie-Hellman
parameters larger than this length, an alert will be signalled.
The default is 8193.
@type cipherNames: list
@ivar cipherNames: The allowed ciphers, in order of preference.
The allowed values in this list are 'aes256', 'aes128', '3des', and
'rc4'. If these settings are used with a client handshake, they
determine the order of the ciphersuites offered in the ClientHello
message.
If these settings are used with a server handshake, the server will
choose whichever ciphersuite matches the earliest entry in this
list.
NOTE: If '3des' is used in this list, but TLS Lite can't find an
add-on library that supports 3DES, then '3des' will be silently
removed.
The default value is ['aes256', 'aes128', '3des', 'rc4'].
@type certificateTypes: list
@ivar certificateTypes: The allowed certificate types, in order of
preference.
The allowed values in this list are 'x509' and 'cryptoID'. This
list is only used with a client handshake. The client will
advertise to the server which certificate types are supported, and
will check that the server uses one of the appropriate types.
NOTE: If 'cryptoID' is used in this list, but cryptoIDlib is not
installed, then 'cryptoID' will be silently removed.
@type minVersion: tuple
@ivar minVersion: The minimum allowed SSL/TLS version.
This variable can be set to (3,0) for SSL 3.0, (3,1) for
TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to
use a lower version, a protocol_version alert will be signalled.
The default is (3,0).
@type maxVersion: tuple
@ivar maxVersion: The maximum allowed SSL/TLS version.
This variable can be set to (3,0) for SSL 3.0, (3,1) for
TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to
use a higher version, a protocol_version alert will be signalled.
The default is (3,2). (WARNING: Some servers may (improperly)
reject clients which offer support for TLS 1.1. In this case,
try lowering maxVersion to (3,1)).
"""
def __init__(self):
self.minKeySize = 1023
self.maxKeySize = 8193
self.cipherNames = ["aes256", "aes128", "3des", "rc4"]
self.cipherImplementations = ["cryptlib", "openssl", "pycrypto",
"python"]
self.certificateTypes = ["x509", "cryptoID"]
self.minVersion = (3,0)
self.maxVersion = (3,2)
#Filters out options that are not supported
def _filter(self):
other = HandshakeSettings()
other.minKeySize = self.minKeySize
other.maxKeySize = self.maxKeySize
other.cipherNames = self.cipherNames
other.cipherImplementations = self.cipherImplementations
other.certificateTypes = self.certificateTypes
other.minVersion = self.minVersion
other.maxVersion = self.maxVersion
if not cipherfactory.tripleDESPresent:
other.cipherNames = [e for e in self.cipherNames if e != "3des"]
if len(other.cipherNames)==0:
raise ValueError("No supported ciphers")
try:
import cryptoIDlib
except ImportError:
other.certificateTypes = [e for e in self.certificateTypes \
if e != "cryptoID"]
if len(other.certificateTypes)==0:
raise ValueError("No supported certificate types")
if not cryptomath.cryptlibpyLoaded:
other.cipherImplementations = [e for e in \
self.cipherImplementations if e != "cryptlib"]
if not cryptomath.m2cryptoLoaded:
other.cipherImplementations = [e for e in \
other.cipherImplementations if e != "openssl"]
if not cryptomath.pycryptoLoaded:
other.cipherImplementations = [e for e in \
other.cipherImplementations if e != "pycrypto"]
if len(other.cipherImplementations)==0:
raise ValueError("No supported cipher implementations")
if other.minKeySize<512:
raise ValueError("minKeySize too small")
if other.minKeySize>16384:
raise ValueError("minKeySize too large")
if other.maxKeySize<512:
raise ValueError("maxKeySize too small")
if other.maxKeySize>16384:
raise ValueError("maxKeySize too large")
for s in other.cipherNames:
if s not in ("aes256", "aes128", "rc4", "3des"):
raise ValueError("Unknown cipher name: '%s'" % s)
for s in other.cipherImplementations:
if s not in ("cryptlib", "openssl", "python", "pycrypto"):
raise ValueError("Unknown cipher implementation: '%s'" % s)
for s in other.certificateTypes:
if s not in ("x509", "cryptoID"):
raise ValueError("Unknown certificate type: '%s'" % s)
if other.minVersion > other.maxVersion:
raise ValueError("Versions set incorrectly")
if not other.minVersion in ((3,0), (3,1), (3,2)):
raise ValueError("minVersion set incorrectly")
if not other.maxVersion in ((3,0), (3,1), (3,2)):
raise ValueError("maxVersion set incorrectly")
return other
def _getCertificateTypes(self):
l = []
for ct in self.certificateTypes:
if ct == "x509":
l.append(CertificateType.x509)
elif ct == "cryptoID":
l.append(CertificateType.cryptoID)
else:
raise AssertionError()
return l
| agpl-3.0 | 3,388,806,609,643,869,000 | 39.025157 | 76 | 0.641892 | false |
nyasara/azuremono-docker | IronPython-2.7.4/Lib/ctypes/_endian.py | 51 | 2101 | ######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
import sys
from ctypes import *
_array_type = type(c_int * 3)
def _other_endian(typ):
"""Return the type with the 'other' byte order. Simple types like
c_int and so on already have __ctype_be__ and __ctype_le__
attributes which contain the types, for more complicated types
only arrays are supported.
"""
try:
return getattr(typ, _OTHER_ENDIAN)
except AttributeError:
if type(typ) == _array_type:
return _other_endian(typ._type_) * typ._length_
raise TypeError("This type does not support other endian: %s" % typ)
class _swapped_meta(type(Structure)):
def __setattr__(self, attrname, value):
if attrname == "_fields_":
fields = []
for desc in value:
name = desc[0]
typ = desc[1]
rest = desc[2:]
fields.append((name, _other_endian(typ)) + rest)
value = fields
super(_swapped_meta, self).__setattr__(attrname, value)
################################################################
# Note: The Structure metaclass checks for the *presence* (not the
# value!) of a _swapped_bytes_ attribute to determine the bit order in
# structures containing bit fields.
if sys.byteorder == "little":
_OTHER_ENDIAN = "__ctype_be__"
LittleEndianStructure = Structure
class BigEndianStructure(Structure):
"""Structure with big endian byte order"""
__metaclass__ = _swapped_meta
_swappedbytes_ = None
elif sys.byteorder == "big":
_OTHER_ENDIAN = "__ctype_le__"
BigEndianStructure = Structure
class LittleEndianStructure(Structure):
"""Structure with little endian byte order"""
__metaclass__ = _swapped_meta
_swappedbytes_ = None
else:
raise RuntimeError("Invalid byteorder")
| mit | -5,636,234,720,495,956,000 | 33.016667 | 76 | 0.540219 | false |
duramato/SickRage | tornado/template.py | 142 | 31156 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A simple template system that compiles templates to Python code.
Basic usage looks like::
t = template.Template("<html>{{ myvalue }}</html>")
print t.generate(myvalue="XXX")
`Loader` is a class that loads templates from a root directory and caches
the compiled templates::
loader = template.Loader("/home/btaylor")
print loader.load("test.html").generate(myvalue="XXX")
We compile all templates to raw Python. Error-reporting is currently... uh,
interesting. Syntax for the templates::
### base.html
<html>
<head>
<title>{% block title %}Default title{% end %}</title>
</head>
<body>
<ul>
{% for student in students %}
{% block student %}
<li>{{ escape(student.name) }}</li>
{% end %}
{% end %}
</ul>
</body>
</html>
### bold.html
{% extends "base.html" %}
{% block title %}A bolder title{% end %}
{% block student %}
<li><span style="bold">{{ escape(student.name) }}</span></li>
{% end %}
Unlike most other template systems, we do not put any restrictions on the
expressions you can include in your statements. ``if`` and ``for`` blocks get
translated exactly into Python, so you can do complex expressions like::
{% for student in [p for p in people if p.student and p.age > 23] %}
<li>{{ escape(student.name) }}</li>
{% end %}
Translating directly to Python means you can apply functions to expressions
easily, like the ``escape()`` function in the examples above. You can pass
functions in to your template just like any other variable
(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`)::
### Python code
def add(x, y):
return x + y
template.execute(add=add)
### The template
{{ add(1, 2) }}
We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`,
`.json_encode()`, and `.squeeze()` to all templates by default.
Typical applications do not create `Template` or `Loader` instances by
hand, but instead use the `~.RequestHandler.render` and
`~.RequestHandler.render_string` methods of
`tornado.web.RequestHandler`, which load templates automatically based
on the ``template_path`` `.Application` setting.
Variable names beginning with ``_tt_`` are reserved by the template
system and should not be used by application code.
Syntax Reference
----------------
Template expressions are surrounded by double curly braces: ``{{ ... }}``.
The contents may be any python expression, which will be escaped according
to the current autoescape setting and inserted into the output. Other
template directives use ``{% %}``. These tags may be escaped as ``{{!``
and ``{%!`` if you need to include a literal ``{{`` or ``{%`` in the output.
To comment out a section so that it is omitted from the output, surround it
with ``{# ... #}``.
``{% apply *function* %}...{% end %}``
Applies a function to the output of all template code between ``apply``
and ``end``::
{% apply linkify %}{{name}} said: {{message}}{% end %}
Note that as an implementation detail apply blocks are implemented
as nested functions and thus may interact strangely with variables
set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}``
within loops.
``{% autoescape *function* %}``
Sets the autoescape mode for the current file. This does not affect
other files, even those referenced by ``{% include %}``. Note that
autoescaping can also be configured globally, at the `.Application`
or `Loader`.::
{% autoescape xhtml_escape %}
{% autoescape None %}
``{% block *name* %}...{% end %}``
Indicates a named, replaceable block for use with ``{% extends %}``.
Blocks in the parent template will be replaced with the contents of
the same-named block in a child template.::
<!-- base.html -->
<title>{% block title %}Default title{% end %}</title>
<!-- mypage.html -->
{% extends "base.html" %}
{% block title %}My page title{% end %}
``{% comment ... %}``
A comment which will be removed from the template output. Note that
there is no ``{% end %}`` tag; the comment goes from the word ``comment``
to the closing ``%}`` tag.
``{% extends *filename* %}``
Inherit from another template. Templates that use ``extends`` should
contain one or more ``block`` tags to replace content from the parent
template. Anything in the child template not contained in a ``block``
tag will be ignored. For an example, see the ``{% block %}`` tag.
``{% for *var* in *expr* %}...{% end %}``
Same as the python ``for`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
``{% from *x* import *y* %}``
Same as the python ``import`` statement.
``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}``
Conditional statement - outputs the first section whose condition is
true. (The ``elif`` and ``else`` sections are optional)
``{% import *module* %}``
Same as the python ``import`` statement.
``{% include *filename* %}``
Includes another template file. The included file can see all the local
variables as if it were copied directly to the point of the ``include``
directive (the ``{% autoescape %}`` directive is an exception).
Alternately, ``{% module Template(filename, **kwargs) %}`` may be used
to include another template with an isolated namespace.
``{% module *expr* %}``
Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is
not escaped::
{% module Template("foo.html", arg=42) %}
``UIModules`` are a feature of the `tornado.web.RequestHandler`
class (and specifically its ``render`` method) and will not work
when the template system is used on its own in other contexts.
``{% raw *expr* %}``
Outputs the result of the given expression without autoescaping.
``{% set *x* = *y* %}``
Sets a local variable.
``{% try %}...{% except %}...{% else %}...{% finally %}...{% end %}``
Same as the python ``try`` statement.
``{% while *condition* %}... {% end %}``
Same as the python ``while`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import linecache
import os.path
import posixpath
import re
import threading
from tornado import escape
from tornado.log import app_log
from tornado.util import ObjectDict, exec_in, unicode_type
try:
from cStringIO import StringIO # py2
except ImportError:
from io import StringIO # py3
_DEFAULT_AUTOESCAPE = "xhtml_escape"
_UNSET = object()
class Template(object):
"""A compiled template.
We compile into Python from the given template_string. You can generate
the template from variables with generate().
"""
# note that the constructor's signature is not extracted with
# autodoc because _UNSET looks like garbage. When changing
# this signature update website/sphinx/template.rst too.
def __init__(self, template_string, name="<string>", loader=None,
compress_whitespace=None, autoescape=_UNSET):
self.name = name
if compress_whitespace is None:
compress_whitespace = name.endswith(".html") or \
name.endswith(".js")
if autoescape is not _UNSET:
self.autoescape = autoescape
elif loader:
self.autoescape = loader.autoescape
else:
self.autoescape = _DEFAULT_AUTOESCAPE
self.namespace = loader.namespace if loader else {}
reader = _TemplateReader(name, escape.native_str(template_string))
self.file = _File(self, _parse(reader, self))
self.code = self._generate_python(loader, compress_whitespace)
self.loader = loader
try:
# Under python2.5, the fake filename used here must match
# the module name used in __name__ below.
# The dont_inherit flag prevents template.py's future imports
# from being applied to the generated code.
self.compiled = compile(
escape.to_unicode(self.code),
"%s.generated.py" % self.name.replace('.', '_'),
"exec", dont_inherit=True)
except Exception:
formatted_code = _format_code(self.code).rstrip()
app_log.error("%s code:\n%s", self.name, formatted_code)
raise
def generate(self, **kwargs):
"""Generate this template with the given arguments."""
namespace = {
"escape": escape.xhtml_escape,
"xhtml_escape": escape.xhtml_escape,
"url_escape": escape.url_escape,
"json_encode": escape.json_encode,
"squeeze": escape.squeeze,
"linkify": escape.linkify,
"datetime": datetime,
"_tt_utf8": escape.utf8, # for internal use
"_tt_string_types": (unicode_type, bytes),
# __name__ and __loader__ allow the traceback mechanism to find
# the generated source code.
"__name__": self.name.replace('.', '_'),
"__loader__": ObjectDict(get_source=lambda name: self.code),
}
namespace.update(self.namespace)
namespace.update(kwargs)
exec_in(self.compiled, namespace)
execute = namespace["_tt_execute"]
# Clear the traceback module's cache of source data now that
# we've generated a new template (mainly for this module's
# unittests, where different tests reuse the same name).
linecache.clearcache()
return execute()
def _generate_python(self, loader, compress_whitespace):
buffer = StringIO()
try:
# named_blocks maps from names to _NamedBlock objects
named_blocks = {}
ancestors = self._get_ancestors(loader)
ancestors.reverse()
for ancestor in ancestors:
ancestor.find_named_blocks(loader, named_blocks)
writer = _CodeWriter(buffer, named_blocks, loader, ancestors[0].template,
compress_whitespace)
ancestors[0].generate(writer)
return buffer.getvalue()
finally:
buffer.close()
def _get_ancestors(self, loader):
ancestors = [self.file]
for chunk in self.file.body.chunks:
if isinstance(chunk, _ExtendsBlock):
if not loader:
raise ParseError("{% extends %} block found, but no "
"template loader")
template = loader.load(chunk.name, self.name)
ancestors.extend(template._get_ancestors(loader))
return ancestors
class BaseLoader(object):
"""Base class for template loaders.
You must use a template loader to use template constructs like
``{% extends %}`` and ``{% include %}``. The loader caches all
templates after they are loaded the first time.
"""
def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None):
"""``autoescape`` must be either None or a string naming a function
in the template namespace, such as "xhtml_escape".
"""
self.autoescape = autoescape
self.namespace = namespace or {}
self.templates = {}
# self.lock protects self.templates. It's a reentrant lock
# because templates may load other templates via `include` or
# `extends`. Note that thanks to the GIL this code would be safe
# even without the lock, but could lead to wasted work as multiple
# threads tried to compile the same template simultaneously.
self.lock = threading.RLock()
def reset(self):
"""Resets the cache of compiled templates."""
with self.lock:
self.templates = {}
def resolve_path(self, name, parent_path=None):
"""Converts a possibly-relative path to absolute (used internally)."""
raise NotImplementedError()
def load(self, name, parent_path=None):
"""Loads a template."""
name = self.resolve_path(name, parent_path=parent_path)
with self.lock:
if name not in self.templates:
self.templates[name] = self._create_template(name)
return self.templates[name]
def _create_template(self, name):
raise NotImplementedError()
class Loader(BaseLoader):
"""A template loader that loads from a single root directory.
"""
def __init__(self, root_directory, **kwargs):
super(Loader, self).__init__(**kwargs)
self.root = os.path.abspath(root_directory)
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
current_path = os.path.join(self.root, parent_path)
file_dir = os.path.dirname(os.path.abspath(current_path))
relative_path = os.path.abspath(os.path.join(file_dir, name))
if relative_path.startswith(self.root):
name = relative_path[len(self.root) + 1:]
return name
def _create_template(self, name):
path = os.path.join(self.root, name)
with open(path, "rb") as f:
template = Template(f.read(), name=name, loader=self)
return template
class DictLoader(BaseLoader):
"""A template loader that loads from a dictionary."""
def __init__(self, dict, **kwargs):
super(DictLoader, self).__init__(**kwargs)
self.dict = dict
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
file_dir = posixpath.dirname(parent_path)
name = posixpath.normpath(posixpath.join(file_dir, name))
return name
def _create_template(self, name):
return Template(self.dict[name], name=name, loader=self)
class _Node(object):
def each_child(self):
return ()
def generate(self, writer):
raise NotImplementedError()
def find_named_blocks(self, loader, named_blocks):
for child in self.each_child():
child.find_named_blocks(loader, named_blocks)
class _File(_Node):
def __init__(self, template, body):
self.template = template
self.body = body
self.line = 0
def generate(self, writer):
writer.write_line("def _tt_execute():", self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
def each_child(self):
return (self.body,)
class _ChunkList(_Node):
def __init__(self, chunks):
self.chunks = chunks
def generate(self, writer):
for chunk in self.chunks:
chunk.generate(writer)
def each_child(self):
return self.chunks
class _NamedBlock(_Node):
def __init__(self, name, body, template, line):
self.name = name
self.body = body
self.template = template
self.line = line
def each_child(self):
return (self.body,)
def generate(self, writer):
block = writer.named_blocks[self.name]
with writer.include(block.template, self.line):
block.body.generate(writer)
def find_named_blocks(self, loader, named_blocks):
named_blocks[self.name] = self
_Node.find_named_blocks(self, loader, named_blocks)
class _ExtendsBlock(_Node):
def __init__(self, name):
self.name = name
class _IncludeBlock(_Node):
def __init__(self, name, reader, line):
self.name = name
self.template_name = reader.name
self.line = line
def find_named_blocks(self, loader, named_blocks):
included = loader.load(self.name, self.template_name)
included.file.find_named_blocks(loader, named_blocks)
def generate(self, writer):
included = writer.loader.load(self.name, self.template_name)
with writer.include(included, self.line):
included.file.body.generate(writer)
class _ApplyBlock(_Node):
def __init__(self, method, line, body=None):
self.method = method
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
method_name = "_tt_apply%d" % writer.apply_counter
writer.apply_counter += 1
writer.write_line("def %s():" % method_name, self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
writer.write_line("_tt_append(_tt_utf8(%s(%s())))" % (
self.method, method_name), self.line)
class _ControlBlock(_Node):
def __init__(self, statement, line, body=None):
self.statement = statement
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
writer.write_line("%s:" % self.statement, self.line)
with writer.indent():
self.body.generate(writer)
# Just in case the body was empty
writer.write_line("pass", self.line)
class _IntermediateControlBlock(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
# In case the previous block was empty
writer.write_line("pass", self.line)
writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1)
class _Statement(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
writer.write_line(self.statement, self.line)
class _Expression(_Node):
def __init__(self, expression, line, raw=False):
self.expression = expression
self.line = line
self.raw = raw
def generate(self, writer):
writer.write_line("_tt_tmp = %s" % self.expression, self.line)
writer.write_line("if isinstance(_tt_tmp, _tt_string_types):"
" _tt_tmp = _tt_utf8(_tt_tmp)", self.line)
writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line)
if not self.raw and writer.current_template.autoescape is not None:
# In python3 functions like xhtml_escape return unicode,
# so we have to convert to utf8 again.
writer.write_line("_tt_tmp = _tt_utf8(%s(_tt_tmp))" %
writer.current_template.autoescape, self.line)
writer.write_line("_tt_append(_tt_tmp)", self.line)
class _Module(_Expression):
def __init__(self, expression, line):
super(_Module, self).__init__("_tt_modules." + expression, line,
raw=True)
class _Text(_Node):
def __init__(self, value, line):
self.value = value
self.line = line
def generate(self, writer):
value = self.value
# Compress lots of white space to a single character. If the whitespace
# breaks a line, have it continue to break a line, but just with a
# single \n character
if writer.compress_whitespace and "<pre>" not in value:
value = re.sub(r"([\t ]+)", " ", value)
value = re.sub(r"(\s*\n\s*)", "\n", value)
if value:
writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
class ParseError(Exception):
"""Raised for template syntax errors."""
pass
class _CodeWriter(object):
def __init__(self, file, named_blocks, loader, current_template,
compress_whitespace):
self.file = file
self.named_blocks = named_blocks
self.loader = loader
self.current_template = current_template
self.compress_whitespace = compress_whitespace
self.apply_counter = 0
self.include_stack = []
self._indent = 0
def indent_size(self):
return self._indent
def indent(self):
class Indenter(object):
def __enter__(_):
self._indent += 1
return self
def __exit__(_, *args):
assert self._indent > 0
self._indent -= 1
return Indenter()
def include(self, template, line):
self.include_stack.append((self.current_template, line))
self.current_template = template
class IncludeTemplate(object):
def __enter__(_):
return self
def __exit__(_, *args):
self.current_template = self.include_stack.pop()[0]
return IncludeTemplate()
def write_line(self, line, line_number, indent=None):
if indent is None:
indent = self._indent
line_comment = ' # %s:%d' % (self.current_template.name, line_number)
if self.include_stack:
ancestors = ["%s:%d" % (tmpl.name, lineno)
for (tmpl, lineno) in self.include_stack]
line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
print(" " * indent + line + line_comment, file=self.file)
class _TemplateReader(object):
def __init__(self, name, text):
self.name = name
self.text = text
self.line = 1
self.pos = 0
def find(self, needle, start=0, end=None):
assert start >= 0, start
pos = self.pos
start += pos
if end is None:
index = self.text.find(needle, start)
else:
end += pos
assert end >= start
index = self.text.find(needle, start, end)
if index != -1:
index -= pos
return index
def consume(self, count=None):
if count is None:
count = len(self.text) - self.pos
newpos = self.pos + count
self.line += self.text.count("\n", self.pos, newpos)
s = self.text[self.pos:newpos]
self.pos = newpos
return s
def remaining(self):
return len(self.text) - self.pos
def __len__(self):
return self.remaining()
def __getitem__(self, key):
if type(key) is slice:
size = len(self)
start, stop, step = key.indices(size)
if start is None:
start = self.pos
else:
start += self.pos
if stop is not None:
stop += self.pos
return self.text[slice(start, stop, step)]
elif key < 0:
return self.text[key]
else:
return self.text[self.pos + key]
def __str__(self):
return self.text[self.pos:]
def _format_code(code):
lines = code.splitlines()
format = "%%%dd %%s\n" % len(repr(len(lines) + 1))
return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
def _parse(reader, template, in_block=None, in_loop=None):
body = _ChunkList([])
while True:
# Find next template directive
curly = 0
while True:
curly = reader.find("{", curly)
if curly == -1 or curly + 1 == reader.remaining():
# EOF
if in_block:
raise ParseError("Missing {%% end %%} block for %s" %
in_block)
body.chunks.append(_Text(reader.consume(), reader.line))
return body
# If the first curly brace is not the start of a special token,
# start searching from the character after it
if reader[curly + 1] not in ("{", "%", "#"):
curly += 1
continue
# When there are more than 2 curlies in a row, use the
# innermost ones. This is useful when generating languages
# like latex where curlies are also meaningful
if (curly + 2 < reader.remaining() and
reader[curly + 1] == '{' and reader[curly + 2] == '{'):
curly += 1
continue
break
# Append any text before the special token
if curly > 0:
cons = reader.consume(curly)
body.chunks.append(_Text(cons, reader.line))
start_brace = reader.consume(2)
line = reader.line
# Template directives may be escaped as "{{!" or "{%!".
# In this case output the braces and consume the "!".
# This is especially useful in conjunction with jquery templates,
# which also use double braces.
if reader.remaining() and reader[0] == "!":
reader.consume(1)
body.chunks.append(_Text(start_brace, line))
continue
# Comment
if start_brace == "{#":
end = reader.find("#}")
if end == -1:
raise ParseError("Missing end expression #} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
continue
# Expression
if start_brace == "{{":
end = reader.find("}}")
if end == -1:
raise ParseError("Missing end expression }} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty expression on line %d" % line)
body.chunks.append(_Expression(contents, line))
continue
# Block
assert start_brace == "{%", start_brace
end = reader.find("%}")
if end == -1:
raise ParseError("Missing end block %%} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty block tag ({%% %%}) on line %d" % line)
operator, space, suffix = contents.partition(" ")
suffix = suffix.strip()
# Intermediate ("else", "elif", etc) blocks
intermediate_blocks = {
"else": set(["if", "for", "while", "try"]),
"elif": set(["if"]),
"except": set(["try"]),
"finally": set(["try"]),
}
allowed_parents = intermediate_blocks.get(operator)
if allowed_parents is not None:
if not in_block:
raise ParseError("%s outside %s block" %
(operator, allowed_parents))
if in_block not in allowed_parents:
raise ParseError("%s block cannot be attached to %s block" % (operator, in_block))
body.chunks.append(_IntermediateControlBlock(contents, line))
continue
# End tag
elif operator == "end":
if not in_block:
raise ParseError("Extra {%% end %%} block on line %d" % line)
return body
elif operator in ("extends", "include", "set", "import", "from",
"comment", "autoescape", "raw", "module"):
if operator == "comment":
continue
if operator == "extends":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("extends missing file path on line %d" % line)
block = _ExtendsBlock(suffix)
elif operator in ("import", "from"):
if not suffix:
raise ParseError("import missing statement on line %d" % line)
block = _Statement(contents, line)
elif operator == "include":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("include missing file path on line %d" % line)
block = _IncludeBlock(suffix, reader, line)
elif operator == "set":
if not suffix:
raise ParseError("set missing statement on line %d" % line)
block = _Statement(suffix, line)
elif operator == "autoescape":
fn = suffix.strip()
if fn == "None":
fn = None
template.autoescape = fn
continue
elif operator == "raw":
block = _Expression(suffix, line, raw=True)
elif operator == "module":
block = _Module(suffix, line)
body.chunks.append(block)
continue
elif operator in ("apply", "block", "try", "if", "for", "while"):
# parse inner body recursively
if operator in ("for", "while"):
block_body = _parse(reader, template, operator, operator)
elif operator == "apply":
# apply creates a nested function so syntactically it's not
# in the loop.
block_body = _parse(reader, template, operator, None)
else:
block_body = _parse(reader, template, operator, in_loop)
if operator == "apply":
if not suffix:
raise ParseError("apply missing method name on line %d" % line)
block = _ApplyBlock(suffix, line, block_body)
elif operator == "block":
if not suffix:
raise ParseError("block missing name on line %d" % line)
block = _NamedBlock(suffix, block_body, template, line)
else:
block = _ControlBlock(contents, line, block_body)
body.chunks.append(block)
continue
elif operator in ("break", "continue"):
if not in_loop:
raise ParseError("%s outside %s block" % (operator, set(["for", "while"])))
body.chunks.append(_Statement(contents, line))
continue
else:
raise ParseError("unknown operator: %r" % operator)
| gpl-3.0 | 879,205,442,838,889,100 | 35.018497 | 98 | 0.577064 | false |
SDSG-Invenio/invenio | invenio/modules/formatter/format_elements/bfe_arxiv_link.py | 13 | 1776 | # This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Links to arXiv"""
from cgi import escape
from invenio.base.i18n import gettext_set_language
def format_element(bfo, tag="037__", target="_blank"):
"""
Extracts the arXiv preprint information and
presents it as a direct link towards arXiv.org
"""
_ = gettext_set_language(bfo.lang)
potential_arxiv_ids = bfo.fields(tag)
arxiv_id = ""
for potential_arxiv_id in potential_arxiv_ids:
if potential_arxiv_id.get('9') == 'arXiv' and potential_arxiv_id.get('a', '').startswith('arXiv:'):
arxiv_id = potential_arxiv_id['a'][len('arXiv:'):]
return '<a href="http://arxiv.org/abs/%s" target="%s" alt="%s">%s</a>' % (
escape(arxiv_id, True),
escape(target, True),
escape(_("This article on arXiv.org"), True),
escape(arxiv_id))
return ""
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 | -7,887,750,564,828,021,000 | 36.787234 | 107 | 0.664414 | false |
alexandrul-ci/robotframework | src/robot/running/namespace.py | 2 | 18101 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
from itertools import chain
from robot.errors import DataError
from robot.libraries import STDLIBS
from robot.output import LOGGER, Message
from robot.parsing.settings import Library, Variables, Resource
from robot.utils import (eq, find_file, is_string, OrderedDict, printable_name,
seq2str2, RecommendationFinder)
from .usererrorhandler import UserErrorHandler
from .userkeyword import UserLibrary
from .importer import Importer, ImportCache
from .runkwregister import RUN_KW_REGISTER
IMPORTER = Importer()
class Namespace(object):
_default_libraries = ('BuiltIn', 'Reserved', 'Easter')
_library_import_by_path_endings = ('.py', '.java', '.class', '/', os.sep)
def __init__(self, variables, suite, resource):
LOGGER.info("Initializing namespace for test suite '%s'" % suite.longname)
self.variables = variables
self._imports = resource.imports
self._kw_store = KeywordStore(resource)
self._imported_variable_files = ImportCache()
self._suite_name = suite.longname
self._running_test = False
@property
def libraries(self):
return self._kw_store.libraries.values()
def handle_imports(self):
self._import_default_libraries()
self._handle_imports(self._imports)
def _import_default_libraries(self):
for name in self._default_libraries:
self.import_library(name, notify=name == 'BuiltIn')
def _handle_imports(self, import_settings):
for item in import_settings:
try:
if not item.name:
raise DataError('%s setting requires a name' % item.type)
self._import(item)
except DataError as err:
item.report_invalid_syntax(err.message)
def _import(self, import_setting):
action = {'Library': self._import_library,
'Resource': self._import_resource,
'Variables': self._import_variables}[import_setting.type]
action(import_setting)
def import_resource(self, name, overwrite=True):
self._import_resource(Resource(None, name), overwrite=overwrite)
def _import_resource(self, import_setting, overwrite=False):
path = self._resolve_name(import_setting)
self._validate_not_importing_init_file(path)
if overwrite or path not in self._kw_store.resources:
resource = IMPORTER.import_resource(path)
self.variables.set_from_variable_table(resource.variables, overwrite)
user_library = UserLibrary(resource)
self._kw_store.resources[path] = user_library
self._handle_imports(resource.imports)
LOGGER.imported("Resource", user_library.name,
importer=import_setting.source,
source=path)
else:
LOGGER.info("Resource file '%s' already imported by suite '%s'"
% (path, self._suite_name))
def _validate_not_importing_init_file(self, path):
name = os.path.splitext(os.path.basename(path))[0]
if name.lower() == '__init__':
raise DataError("Initialization file '%s' cannot be imported as "
"a resource file." % path)
def import_variables(self, name, args, overwrite=False):
self._import_variables(Variables(None, name, args), overwrite)
def _import_variables(self, import_setting, overwrite=False):
path = self._resolve_name(import_setting)
args = self._resolve_args(import_setting)
if overwrite or (path, args) not in self._imported_variable_files:
self._imported_variable_files.add((path, args))
self.variables.set_from_file(path, args, overwrite)
LOGGER.imported("Variables", os.path.basename(path),
args=list(args),
importer=import_setting.source,
source=path)
else:
msg = "Variable file '%s'" % path
if args:
msg += " with arguments %s" % seq2str2(args)
LOGGER.info("%s already imported by suite '%s'"
% (msg, self._suite_name))
def import_library(self, name, args=None, alias=None, notify=True):
self._import_library(Library(None, name, args=args, alias=alias),
notify=notify)
def _import_library(self, import_setting, notify=True):
name = self._resolve_name(import_setting)
lib = IMPORTER.import_library(name, import_setting.args,
import_setting.alias, self.variables)
if lib.name in self._kw_store.libraries:
LOGGER.info("Test library '%s' already imported by suite '%s'"
% (lib.name, self._suite_name))
return
if notify:
LOGGER.imported("Library", lib.name,
args=list(import_setting.args),
originalname=lib.orig_name,
importer=import_setting.source,
source=lib.source)
self._kw_store.libraries[lib.name] = lib
lib.start_suite()
if self._running_test:
lib.start_test()
def _resolve_name(self, import_setting):
name = import_setting.name
try:
name = self.variables.replace_string(name)
except DataError as err:
self._raise_replacing_vars_failed(import_setting, err)
return self._get_name(name, import_setting)
def _raise_replacing_vars_failed(self, import_setting, err):
raise DataError("Replacing variables from setting '%s' failed: %s"
% (import_setting.type, err.message))
def _get_name(self, name, import_setting):
if import_setting.type == 'Library' and not self._is_library_by_path(name):
if ' ' in name:
# TODO: Remove support for extra spaces in name in RF 3.1.
# https://github.com/robotframework/robotframework/issues/2264
warning = ("Importing library with extra spaces in name like "
"'%s' is deprecated. Remove spaces and use '%s' "
"instead." % (name, name.replace(' ', '')))
import_setting.report_invalid_syntax(warning, 'WARN')
name = name.replace(' ', '')
return name
return find_file(name, import_setting.directory,
file_type=import_setting.type)
def _is_library_by_path(self, path):
return path.lower().endswith(self._library_import_by_path_endings)
def _resolve_args(self, import_setting):
try:
return self.variables.replace_list(import_setting.args)
except DataError as err:
self._raise_replacing_vars_failed(import_setting, err)
def set_search_order(self, new_order):
old_order = self._kw_store.search_order
self._kw_store.search_order = new_order
return old_order
def start_test(self):
self._running_test = True
self.variables.start_test()
for lib in self.libraries:
lib.start_test()
def end_test(self):
self.variables.end_test()
for lib in self.libraries:
lib.end_test()
self._running_test = True
def start_suite(self):
self.variables.start_suite()
def end_suite(self):
self.variables.end_suite()
for lib in self.libraries:
lib.end_suite()
def start_user_keyword(self):
self.variables.start_keyword()
def end_user_keyword(self):
self.variables.end_keyword()
def get_library_instance(self, libname):
return self._kw_store.get_library(libname).get_instance()
def get_library_instances(self):
return dict((name, lib.get_instance())
for name, lib in self._kw_store.libraries.items())
def reload_library(self, libname_or_instance):
library = self._kw_store.get_library(libname_or_instance)
library.reload()
return library
def get_runner(self, name):
try:
return self._kw_store.get_runner(name)
except DataError as err:
return UserErrorHandler(name, err.message)
class KeywordStore(object):
def __init__(self, resource):
self.user_keywords = UserLibrary(resource,
UserLibrary.TEST_CASE_FILE_TYPE)
self.libraries = OrderedDict()
self.resources = ImportCache()
self.search_order = ()
def get_library(self, name_or_instance):
if name_or_instance is None:
raise DataError("Library can not be None.")
if is_string(name_or_instance):
return self._get_lib_by_name(name_or_instance)
return self._get_lib_by_instance(name_or_instance)
def _get_lib_by_name(self, name):
if name in self.libraries:
return self.libraries[name]
matches = [lib for lib in self.libraries.values() if eq(lib.name, name)]
if len(matches) == 1:
return matches[0]
self._no_library_found(name, multiple=bool(matches))
def _no_library_found(self, name, multiple=False):
if multiple:
raise DataError("Multiple libraries matching '%s' found." % name)
raise DataError("No library '%s' found." % name)
def _get_lib_by_instance(self, instance):
for lib in self.libraries.values():
if lib.get_instance(create=False) is instance:
return lib
self._no_library_found(instance)
def get_runner(self, name):
runner = self._get_runner(name)
if runner is None:
self._raise_no_keyword_found(name)
return runner
def _raise_no_keyword_found(self, name):
msg = "No keyword with name '%s' found." % name
finder = KeywordRecommendationFinder(self.user_keywords,
self.libraries,
self.resources)
recommendations = finder.recommend_similar_keywords(name)
msg = finder.format_recommendations(msg, recommendations)
raise DataError(msg)
def _get_runner(self, name):
if not name:
raise DataError('Keyword name cannot be empty.')
if not is_string(name):
raise DataError('Keyword name must be a string.')
runner = self._get_runner_from_test_case_file(name)
if not runner and '.' in name:
runner = self._get_explicit_runner(name)
if not runner:
runner = self._get_implicit_runner(name)
if not runner:
runner = self._get_bdd_style_runner(name)
return runner
def _get_bdd_style_runner(self, name):
for prefix in ['given ', 'when ', 'then ', 'and ', 'but ']:
if name.lower().startswith(prefix):
runner = self._get_runner(name[len(prefix):])
if runner:
runner = copy.copy(runner)
runner.name = name
return runner
return None
def _get_implicit_runner(self, name):
runner = self._get_runner_from_resource_files(name)
if not runner:
runner = self._get_runner_from_libraries(name)
return runner
def _get_runner_from_test_case_file(self, name):
if name in self.user_keywords.handlers:
return self.user_keywords.handlers.create_runner(name)
def _get_runner_from_resource_files(self, name):
found = [lib.handlers.create_runner(name)
for lib in self.resources.values()
if name in lib.handlers]
if not found:
return None
if len(found) > 1:
found = self._get_runner_based_on_search_order(found)
if len(found) == 1:
return found[0]
self._raise_multiple_keywords_found(name, found)
def _get_runner_from_libraries(self, name):
found = [lib.handlers.create_runner(name) for lib in self.libraries.values()
if name in lib.handlers]
if not found:
return None
if len(found) > 1:
found = self._get_runner_based_on_search_order(found)
if len(found) == 2:
found = self._filter_stdlib_runner(*found)
if len(found) == 1:
return found[0]
self._raise_multiple_keywords_found(name, found)
def _get_runner_based_on_search_order(self, runners):
for libname in self.search_order:
for runner in runners:
if eq(libname, runner.libname):
return [runner]
return runners
def _filter_stdlib_runner(self, runner1, runner2):
stdlibs_without_remote = STDLIBS - set(['Remote'])
if runner1.library.orig_name in stdlibs_without_remote:
standard, custom = runner1, runner2
elif runner2.library.orig_name in stdlibs_without_remote:
standard, custom = runner2, runner1
else:
return [runner1, runner2]
if not RUN_KW_REGISTER.is_run_keyword(custom.library.orig_name, custom.name):
self._custom_and_standard_keyword_conflict_warning(custom, standard)
return [custom]
def _custom_and_standard_keyword_conflict_warning(self, custom, standard):
custom_with_name = standard_with_name = ''
if custom.library.name != custom.library.orig_name:
custom_with_name = " imported as '%s'" % custom.library.name
if standard.library.name != standard.library.orig_name:
standard_with_name = " imported as '%s'" % standard.library.name
warning = Message("Keyword '%s' found both from a custom test library "
"'%s'%s and a standard library '%s'%s. The custom "
"keyword is used. To select explicitly, and to get "
"rid of this warning, use either '%s' or '%s'."
% (standard.name,
custom.library.orig_name, custom_with_name,
standard.library.orig_name, standard_with_name,
custom.longname, standard.longname), level='WARN')
if custom.pre_run_messages:
custom.pre_run_messages.append(warning)
else:
custom.pre_run_messages = [warning]
def _get_explicit_runner(self, name):
found = []
for owner_name, kw_name in self._yield_owner_and_kw_names(name):
found.extend(self._find_keywords(owner_name, kw_name))
if len(found) > 1:
self._raise_multiple_keywords_found(name, found, implicit=False)
return found[0] if found else None
def _yield_owner_and_kw_names(self, full_name):
tokens = full_name.split('.')
for i in range(1, len(tokens)):
yield '.'.join(tokens[:i]), '.'.join(tokens[i:])
def _find_keywords(self, owner_name, name):
return [owner.handlers.create_runner(name)
for owner in chain(self.libraries.values(), self.resources.values())
if eq(owner.name, owner_name) and name in owner.handlers]
def _raise_multiple_keywords_found(self, name, found, implicit=True):
error = "Multiple keywords with name '%s' found" % name
if implicit:
error += ". Give the full name of the keyword you want to use"
names = sorted(runner.longname for runner in found)
raise DataError('\n '.join([error+':'] + names))
class KeywordRecommendationFinder(object):
def __init__(self, user_keywords, libraries, resources):
self.user_keywords = user_keywords
self.libraries = libraries
self.resources = resources
def recommend_similar_keywords(self, name):
"""Return keyword names similar to `name`."""
candidates = self._get_candidates('.' in name)
normalizer = lambda name: candidates.get(name, name).lower().replace(
'_', ' ')
finder = RecommendationFinder(normalizer)
return finder.find_recommendations(name, candidates)
@staticmethod
def format_recommendations(msg, recommendations):
return RecommendationFinder.format_recommendations(
msg, recommendations)
def _get_candidates(self, use_full_name):
names = {}
for owner, name in self._get_all_handler_names():
full_name = '%s.%s' % (owner, name) if owner else name
names[full_name] = full_name if use_full_name else name
return names
def _get_all_handler_names(self):
"""Return a list of `(library_name, handler_name)` tuples."""
handlers = [('', printable_name(handler.name, True))
for handler in self.user_keywords.handlers]
for library in chain(self.libraries.values(), self.resources.values()):
if library.name != 'Reserved':
handlers.extend(
((library.name or '',
printable_name(handler.name, code_style=True))
for handler in library.handlers))
# sort handlers to ensure consistent ordering between Jython and Python
return sorted(handlers)
| apache-2.0 | 655,373,593,165,000,000 | 40.138636 | 85 | 0.595768 | false |
zero323/spark | python/pyspark/sql/tests/test_serde.py | 23 | 6188 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import shutil
import tempfile
import time
from pyspark.sql import Row
from pyspark.sql.functions import lit
from pyspark.sql.types import StructType, StructField, DecimalType, BinaryType
from pyspark.testing.sqlutils import ReusedSQLTestCase, UTCOffsetTimezone
class SerdeTests(ReusedSQLTestCase):
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.testing.sqlutils import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
# The empty bytearray is test for SPARK-21534.
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')],
[bytearray(b'')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
def test_int_array_serialization(self):
# Note that this test seems dependent on parallelism.
data = self.spark.sparkContext.parallelize([[1, 2, 3, 4]] * 100, numSlices=12)
df = self.spark.createDataFrame(data, "array<integer>")
self.assertEqual(len(list(filter(lambda r: None in r.value, df.collect()))), 0)
def test_bytes_as_binary_type(self):
df = self.spark.createDataFrame([[b"abcd"]], "col binary")
self.assertEqual(df.first().col, bytearray(b'abcd'))
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_serde import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 | 2,659,824,447,473,668,000 | 40.530201 | 89 | 0.641726 | false |
PriceChild/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py | 69 | 20312 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
module: ec2_vpc_vgw
short_description: Create and delete AWS VPN Virtual Gateways.
description:
- Creates AWS VPN Virtual Gateways
- Deletes AWS VPN Virtual Gateways
- Attaches Virtual Gateways to VPCs
- Detaches Virtual Gateways from VPCs
version_added: "2.2"
requirements: [ boto3 ]
options:
state:
description:
- present to ensure resource is created.
- absent to remove resource
required: false
default: present
choices: [ "present", "absent"]
name:
description:
- name of the vgw to be created or deleted
required: false
type:
description:
- type of the virtual gateway to be created
required: false
choices: [ "ipsec.1" ]
vpn_gateway_id:
description:
- vpn gateway id of an existing virtual gateway
required: false
vpc_id:
description:
- the vpc-id of a vpc to attach or detach
required: false
wait_timeout:
description:
- number of seconds to wait for status during vpc attach and detach
required: false
default: 320
tags:
description:
- dictionary of resource tags
required: false
default: null
aliases: [ "resource_tags" ]
author: Nick Aslanidis (@naslanidis)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Create a new vgw attached to a specific VPC
ec2_vpc_vgw:
state: present
region: ap-southeast-2
profile: personal
vpc_id: vpc-12345678
name: personal-testing
type: ipsec.1
register: created_vgw
- name: Create a new unattached vgw
ec2_vpc_vgw:
state: present
region: ap-southeast-2
profile: personal
name: personal-testing
type: ipsec.1
tags:
environment: production
owner: ABC
register: created_vgw
- name: Remove a new vgw using the name
ec2_vpc_vgw:
state: absent
region: ap-southeast-2
profile: personal
name: personal-testing
type: ipsec.1
register: deleted_vgw
- name: Remove a new vgw using the vpn_gateway_id
ec2_vpc_vgw:
state: absent
region: ap-southeast-2
profile: personal
vpn_gateway_id: vgw-3a9aa123
register: deleted_vgw
'''
RETURN = '''
result:
description: The result of the create, or delete action.
returned: success
type: dictionary
'''
try:
import json
import time
import botocore
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def get_vgw_info(vgws):
if not isinstance(vgws, list):
return
for vgw in vgws:
vgw_info = {
'id': vgw['VpnGatewayId'],
'type': vgw['Type'],
'state': vgw['State'],
'vpc_id': None,
'tags': dict()
}
for tag in vgw['Tags']:
vgw_info['tags'][tag['Key']] = tag['Value']
if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached':
vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId']
return vgw_info
def wait_for_status(client, module, vpn_gateway_id, status):
polling_increment_secs = 15
max_retries = (module.params.get('wait_timeout') / polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
try:
response = find_vgw(client, module, vpn_gateway_id)
if response[0]['VpcAttachments'][0]['State'] == status:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return status_achieved, result
def attach_vgw(client, module, vpn_gateway_id):
params = dict()
params['VpcId'] = module.params.get('vpc_id')
try:
response = client.attach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached')
if not status_achieved:
module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console')
result = response
return result
def detach_vgw(client, module, vpn_gateway_id, vpc_id=None):
params = dict()
params['VpcId'] = module.params.get('vpc_id')
if vpc_id:
try:
response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
else:
try:
response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached')
if not status_achieved:
module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console')
result = response
return result
def create_vgw(client, module):
params = dict()
params['Type'] = module.params.get('type')
try:
response = client.create_vpn_gateway(Type=params['Type'])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def delete_vgw(client, module, vpn_gateway_id):
try:
response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
#return the deleted VpnGatewayId as this is not included in the above response
result = vpn_gateway_id
return result
def create_tags(client, module, vpn_gateway_id):
params = dict()
try:
response = client.create_tags(Resources=[vpn_gateway_id],Tags=load_tags(module))
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def delete_tags(client, module, vpn_gateway_id, tags_to_delete=None):
params = dict()
if tags_to_delete:
try:
response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
else:
try:
response = client.delete_tags(Resources=[vpn_gateway_id])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def load_tags(module):
tags = []
if module.params.get('tags'):
for name, value in module.params.get('tags').items():
tags.append({'Key': name, 'Value': str(value)})
tags.append({'Key': "Name", 'Value': module.params.get('name')})
else:
tags.append({'Key': "Name", 'Value': module.params.get('name')})
return tags
def find_tags(client, module, resource_id=None):
if resource_id:
try:
response = client.describe_tags(Filters=[
{'Name': 'resource-id', 'Values': [resource_id]}
])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def check_tags(client, module, existing_vgw, vpn_gateway_id):
params = dict()
params['Tags'] = module.params.get('tags')
vgw = existing_vgw
changed = False
tags_list = {}
#format tags for comparison
for tags in existing_vgw[0]['Tags']:
if tags['Key'] != 'Name':
tags_list[tags['Key']] = tags['Value']
# if existing tags don't match the tags arg, delete existing and recreate with new list
if params['Tags'] is not None and tags_list != params['Tags']:
delete_tags(client, module, vpn_gateway_id)
create_tags(client, module, vpn_gateway_id)
vgw = find_vgw(client, module)
changed = True
#if no tag args are supplied, delete any existing tags with the exception of the name tag
if params['Tags'] is None and tags_list != {}:
tags_to_delete = []
for tags in existing_vgw[0]['Tags']:
if tags['Key'] != 'Name':
tags_to_delete.append(tags)
delete_tags(client, module, vpn_gateway_id, tags_to_delete)
vgw = find_vgw(client, module)
changed = True
return vgw, changed
def find_vpc(client, module):
params = dict()
params['vpc_id'] = module.params.get('vpc_id')
if params['vpc_id']:
try:
response = client.describe_vpcs(VpcIds=[params['vpc_id']])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response
return result
def find_vgw(client, module, vpn_gateway_id=None):
params = dict()
params['Name'] = module.params.get('name')
params['Type'] = module.params.get('type')
params['State'] = module.params.get('state')
if params['State'] == 'present':
try:
response = client.describe_vpn_gateways(Filters=[
{'Name': 'type', 'Values': [params['Type']]},
{'Name': 'tag:Name', 'Values': [params['Name']]}
])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
else:
if vpn_gateway_id:
try:
response = client.describe_vpn_gateways(VpnGatewayIds=vpn_gateway_id)
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
else:
try:
response = client.describe_vpn_gateways(Filters=[
{'Name': 'type', 'Values': [params['Type']]},
{'Name': 'tag:Name', 'Values': [params['Name']]}
])
except botocore.exceptions.ClientError:
e = get_exception()
module.fail_json(msg=str(e))
result = response['VpnGateways']
return result
def ensure_vgw_present(client, module):
# If an existing vgw name and type matches our args, then a match is considered to have been
# found and we will not create another vgw.
changed = False
params = dict()
result = dict()
params['Name'] = module.params.get('name')
params['VpcId'] = module.params.get('vpc_id')
params['Type'] = module.params.get('type')
params['Tags'] = module.params.get('tags')
params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
# Check that a name argument has been supplied.
if not module.params.get('name'):
module.fail_json(msg='A name is required when a status of \'present\' is suppled')
# check if a gateway matching our module args already exists
existing_vgw = find_vgw(client, module)
if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
vgw, changed = check_tags(client, module, existing_vgw, vpn_gateway_id)
# if a vpc_id was provided, check if it exists and if it's attached
if params['VpcId']:
# check that the vpc_id exists. If not, an exception is thrown
vpc = find_vpc(client, module)
current_vpc_attachments = existing_vgw[0]['VpcAttachments']
if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached':
if current_vpc_attachments[0]['VpcId'] == params['VpcId'] and current_vpc_attachments[0]['State'] == 'attached':
changed = False
else:
# detach the existing vpc from the virtual gateway
vpc_to_detach = current_vpc_attachments[0]['VpcId']
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
time.sleep(5)
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
vgw = find_vgw(client, module, [vpn_gateway_id])
changed = True
else:
# attach the vgw to the supplied vpc
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
vgw = find_vgw(client, module, [vpn_gateway_id])
changed = True
# if params['VpcId'] is not provided, check the vgw is attached to a vpc. if so, detach it.
else:
existing_vgw = find_vgw(client, module, [vpn_gateway_id])
if existing_vgw[0]['VpcAttachments'] != []:
if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
# detach the vpc from the vgw
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
changed = True
vgw = find_vgw(client, module, [vpn_gateway_id])
else:
# create a new vgw
new_vgw = create_vgw(client, module)
changed = True
vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId']
# tag the new virtual gateway
create_tags(client, module, vpn_gateway_id)
# return current state of the vgw
vgw = find_vgw(client, module, [vpn_gateway_id])
# if a vpc-id was supplied, attempt to attach it to the vgw
if params['VpcId']:
attached_vgw = attach_vgw(client, module, vpn_gateway_id)
changed = True
vgw = find_vgw(client, module, [vpn_gateway_id])
result = get_vgw_info(vgw)
return changed, result
def ensure_vgw_absent(client, module):
# If an existing vgw name and type matches our args, then a match is considered to have been
# found and we will take steps to delete it.
changed = False
params = dict()
result = dict()
params['Name'] = module.params.get('name')
params['VpcId'] = module.params.get('vpc_id')
params['Type'] = module.params.get('type')
params['Tags'] = module.params.get('tags')
params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
# check if a gateway matching our module args already exists
if params['VpnGatewayIds']:
existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']])
if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted':
existing_vgw = existing_vgw_with_id
if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
if params['VpcId']:
if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
else:
# detach the vpc from the vgw
detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId'])
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
changed = True
else:
# attempt to detach any attached vpcs
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach)
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
changed = True
else:
# no vpc's are attached so attempt to delete the vgw
deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
changed = True
else:
changed = False
deleted_vgw = "Nothing to do"
else:
#Check that a name and type argument has been supplied if no vgw-id
if not module.params.get('name') or not module.params.get('type'):
module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is suppled')
existing_vgw = find_vgw(client, module)
if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
if params['VpcId']:
if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
else:
# detach the vpc from the vgw
detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
#now that the vpc has been detached, delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
changed = True
else:
# attempt to detach any attached vpcs
vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
changed = True
#now that the vpc has been detached, delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
else:
# no vpc's are attached so attempt to delete the vgw
deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
changed = True
else:
changed = False
deleted_vgw = None
result = deleted_vgw
return changed, result
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent']),
region=dict(required=True),
name=dict(),
vpn_gateway_id=dict(),
vpc_id=dict(),
wait_timeout=dict(type='int', default=320),
type=dict(default='ipsec.1', choices=['ipsec.1']),
tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='json and boto3 is required.')
state = module.params.get('state').lower()
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError:
e = get_exception()
module.fail_json(msg="Can't authorize connection - "+str(e))
if state == 'present':
(changed, results) = ensure_vgw_present(client, module)
else:
(changed, results) = ensure_vgw_absent(client, module)
module.exit_json(changed=changed, vgw=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 | -8,742,478,343,534,994,000 | 32.684909 | 143 | 0.596101 | false |
alisidd/tensorflow | tensorflow/compiler/tests/adam_test.py | 79 | 7193 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(XLATestCase):
def testBasic(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTensorLearningRate(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
if t % 2 == 0:
update1.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
else:
update2.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
if __name__ == "__main__":
test.main()
| apache-2.0 | -3,378,759,170,013,651,500 | 39.869318 | 80 | 0.622411 | false |
moto-timo/ironpython3 | Src/StdLib/Lib/test/test_pep277.py | 10 | 7007 | # Test the Unicode versions of normal file functions
# open, os.open, os.stat. os.listdir, os.rename, os.remove, os.mkdir, os.chdir, os.rmdir
import os
import sys
import unittest
import warnings
from unicodedata import normalize
from test import support
filenames = [
'1_abc',
'2_ascii',
'3_Gr\xfc\xdf-Gott',
'4_\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
'5_\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
'6_\u306b\u307d\u3093',
'7_\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
'8_\u66e8\u66e9\u66eb',
'9_\u66e8\u05e9\u3093\u0434\u0393\xdf',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
'10_\u1fee\u1ffd',
]
# Mac OS X decomposes Unicode names, using Normal Form D.
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
# "However, most volume formats do not follow the exact specification for
# these normal forms. For example, HFS Plus uses a variant of Normal Form D
# in which U+2000 through U+2FFF, U+F900 through U+FAFF, and U+2F800 through
# U+2FAFF are not decomposed."
if sys.platform != 'darwin':
filenames.extend([
# Specific code points: NFC(fn), NFD(fn), NFKC(fn) and NFKD(fn) all differents
'11_\u0385\u03d3\u03d4',
'12_\u00a8\u0301\u03d2\u0301\u03d2\u0308', # == NFD('\u0385\u03d3\u03d4')
'13_\u0020\u0308\u0301\u038e\u03ab', # == NFKC('\u0385\u03d3\u03d4')
'14_\u1e9b\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
'15_\u1fee\u1ffd\ufad1',
'16_\u2000\u2000\u2000A',
'17_\u2001\u2001\u2001A',
'18_\u2003\u2003\u2003A', # == NFC('\u2001\u2001\u2001A')
'19_\u0020\u0020\u0020A', # '\u0020' == ' ' == NFKC('\u2000') ==
# NFKC('\u2001') == NFKC('\u2003')
])
# Is it Unicode-friendly?
if not os.path.supports_unicode_filenames:
fsencoding = sys.getfilesystemencoding()
try:
for name in filenames:
name.encode(fsencoding)
except UnicodeEncodeError:
raise unittest.SkipTest("only NT+ and systems with "
"Unicode-friendly filesystem encoding")
class UnicodeFileTests(unittest.TestCase):
files = set(filenames)
normal_form = None
def setUp(self):
try:
os.mkdir(support.TESTFN)
except FileExistsError:
pass
self.addCleanup(support.rmtree, support.TESTFN)
files = set()
for name in self.files:
name = os.path.join(support.TESTFN, self.norm(name))
with open(name, 'wb') as f:
f.write((name+'\n').encode("utf-8"))
os.stat(name)
files.add(name)
self.files = files
def norm(self, s):
if self.normal_form:
return normalize(self.normal_form, s)
return s
def _apply_failure(self, fn, filename,
expected_exception=FileNotFoundError,
check_filename=True):
with self.assertRaises(expected_exception) as c:
fn(filename)
exc_filename = c.exception.filename
if check_filename:
self.assertEqual(exc_filename, filename, "Function '%s(%a) failed "
"with bad filename in the exception: %a" %
(fn.__name__, filename, exc_filename))
def test_failures(self):
# Pass non-existing Unicode filenames all over the place.
for name in self.files:
name = "not_" + name
self._apply_failure(open, name)
self._apply_failure(os.stat, name)
self._apply_failure(os.chdir, name)
self._apply_failure(os.rmdir, name)
self._apply_failure(os.remove, name)
self._apply_failure(os.listdir, name)
if sys.platform == 'win32':
# Windows is lunatic. Issue #13366.
_listdir_failure = NotADirectoryError, FileNotFoundError
else:
_listdir_failure = NotADirectoryError
def test_open(self):
for name in self.files:
f = open(name, 'wb')
f.write((name+'\n').encode("utf-8"))
f.close()
os.stat(name)
self._apply_failure(os.listdir, name, self._listdir_failure)
# Skip the test on darwin, because darwin does normalize the filename to
# NFD (a variant of Unicode NFD form). Normalize the filename to NFC, NFKC,
# NFKD in Python is useless, because darwin will normalize it later and so
# open(), os.stat(), etc. don't raise any exception.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_normalize(self):
files = set(self.files)
others = set()
for nf in set(['NFC', 'NFD', 'NFKC', 'NFKD']):
others |= set(normalize(nf, file) for file in files)
others -= files
for name in others:
self._apply_failure(open, name)
self._apply_failure(os.stat, name)
self._apply_failure(os.chdir, name)
self._apply_failure(os.rmdir, name)
self._apply_failure(os.remove, name)
self._apply_failure(os.listdir, name)
# Skip the test on darwin, because darwin uses a normalization different
# than Python NFD normalization: filenames are different even if we use
# Python NFD normalization.
@unittest.skipIf(sys.platform == 'darwin', 'irrelevant test on Mac OS X')
def test_listdir(self):
sf0 = set(self.files)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
f1 = os.listdir(support.TESTFN.encode(sys.getfilesystemencoding()))
f2 = os.listdir(support.TESTFN)
sf2 = set(os.path.join(support.TESTFN, f) for f in f2)
self.assertEqual(sf0, sf2, "%a != %a" % (sf0, sf2))
self.assertEqual(len(f1), len(f2))
def test_rename(self):
for name in self.files:
os.rename(name, "tmp")
os.rename("tmp", name)
def test_directory(self):
dirname = os.path.join(support.TESTFN, 'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
filename = '\xdf-\u66e8\u66e9\u66eb'
with support.temp_cwd(dirname):
with open(filename, 'wb') as f:
f.write((filename + '\n').encode("utf-8"))
os.access(filename,os.R_OK)
os.remove(filename)
class UnicodeNFCFileTests(UnicodeFileTests):
normal_form = 'NFC'
class UnicodeNFDFileTests(UnicodeFileTests):
normal_form = 'NFD'
class UnicodeNFKCFileTests(UnicodeFileTests):
normal_form = 'NFKC'
class UnicodeNFKDFileTests(UnicodeFileTests):
normal_form = 'NFKD'
def test_main():
support.run_unittest(
UnicodeFileTests,
UnicodeNFCFileTests,
UnicodeNFDFileTests,
UnicodeNFKCFileTests,
UnicodeNFKDFileTests,
)
if __name__ == "__main__":
test_main()
| apache-2.0 | -467,772,535,308,125,630 | 34.933333 | 88 | 0.605823 | false |
bzennn/blog_flask | python/lib/python3.5/site-packages/wheel/metadata.py | 62 | 11561 | """
Tools for converting old- to new-style metadata.
"""
import email.parser
import os.path
import re
import textwrap
from collections import namedtuple, OrderedDict
import pkg_resources
from . import __version__ as wheel_version
from .pkginfo import read_pkg_info
from .util import OrderedDefaultDict
METADATA_VERSION = "2.0"
PLURAL_FIELDS = {"classifier": "classifiers",
"provides_dist": "provides",
"provides_extra": "extras"}
SKIP_FIELDS = set()
CONTACT_FIELDS = (({"email": "author_email", "name": "author"},
"author"),
({"email": "maintainer_email", "name": "maintainer"},
"maintainer"))
# commonly filled out as "UNKNOWN" by distutils:
UNKNOWN_FIELDS = {"author", "author_email", "platform", "home_page", "license"}
# Wheel itself is probably the only program that uses non-extras markers
# in METADATA/PKG-INFO. Support its syntax with the extra at the end only.
EXTRA_RE = re.compile("""^(?P<package>.*?)(;\s*(?P<condition>.*?)(extra == '(?P<extra>.*?)')?)$""")
KEYWORDS_RE = re.compile("[\0-,]+")
MayRequiresKey = namedtuple('MayRequiresKey', ('condition', 'extra'))
def unique(iterable):
"""
Yield unique values in iterable, preserving order.
"""
seen = set()
for value in iterable:
if value not in seen:
seen.add(value)
yield value
def handle_requires(metadata, pkg_info, key):
"""
Place the runtime requirements from pkg_info into metadata.
"""
may_requires = OrderedDefaultDict(list)
for value in sorted(pkg_info.get_all(key)):
extra_match = EXTRA_RE.search(value)
if extra_match:
groupdict = extra_match.groupdict()
condition = groupdict['condition']
extra = groupdict['extra']
package = groupdict['package']
if condition.endswith(' and '):
condition = condition[:-5]
else:
condition, extra = None, None
package = value
key = MayRequiresKey(condition, extra)
may_requires[key].append(package)
if may_requires:
metadata['run_requires'] = []
def sort_key(item):
# Both condition and extra could be None, which can't be compared
# against strings in Python 3.
key, value = item
if key.condition is None:
return ''
return key.condition
for key, value in sorted(may_requires.items(), key=sort_key):
may_requirement = OrderedDict((('requires', value),))
if key.extra:
may_requirement['extra'] = key.extra
if key.condition:
may_requirement['environment'] = key.condition
metadata['run_requires'].append(may_requirement)
if 'extras' not in metadata:
metadata['extras'] = []
metadata['extras'].extend([key.extra for key in may_requires.keys() if key.extra])
def pkginfo_to_dict(path, distribution=None):
"""
Convert PKG-INFO to a prototype Metadata 2.0 (PEP 426) dict.
The description is included under the key ['description'] rather than
being written to a separate file.
path: path to PKG-INFO file
distribution: optional distutils Distribution()
"""
metadata = OrderedDefaultDict(
lambda: OrderedDefaultDict(lambda: OrderedDefaultDict(OrderedDict)))
metadata["generator"] = "bdist_wheel (" + wheel_version + ")"
try:
unicode
pkg_info = read_pkg_info(path)
except NameError:
with open(path, 'rb') as pkg_info_file:
pkg_info = email.parser.Parser().parsestr(pkg_info_file.read().decode('utf-8'))
description = None
if pkg_info['Summary']:
metadata['summary'] = pkginfo_unicode(pkg_info, 'Summary')
del pkg_info['Summary']
if pkg_info['Description']:
description = dedent_description(pkg_info)
del pkg_info['Description']
else:
payload = pkg_info.get_payload()
if isinstance(payload, bytes):
# Avoid a Python 2 Unicode error.
# We still suffer ? glyphs on Python 3.
payload = payload.decode('utf-8')
if payload:
description = payload
if description:
pkg_info['description'] = description
for key in sorted(unique(k.lower() for k in pkg_info.keys())):
low_key = key.replace('-', '_')
if low_key in SKIP_FIELDS:
continue
if low_key in UNKNOWN_FIELDS and pkg_info.get(key) == 'UNKNOWN':
continue
if low_key in sorted(PLURAL_FIELDS):
metadata[PLURAL_FIELDS[low_key]] = pkg_info.get_all(key)
elif low_key == "requires_dist":
handle_requires(metadata, pkg_info, key)
elif low_key == 'provides_extra':
if 'extras' not in metadata:
metadata['extras'] = []
metadata['extras'].extend(pkg_info.get_all(key))
elif low_key == 'home_page':
metadata['extensions']['python.details']['project_urls'] = {'Home': pkg_info[key]}
elif low_key == 'keywords':
metadata['keywords'] = KEYWORDS_RE.split(pkg_info[key])
else:
metadata[low_key] = pkg_info[key]
metadata['metadata_version'] = METADATA_VERSION
if 'extras' in metadata:
metadata['extras'] = sorted(set(metadata['extras']))
# include more information if distribution is available
if distribution:
for requires, attr in (('test_requires', 'tests_require'),):
try:
requirements = getattr(distribution, attr)
if isinstance(requirements, list):
new_requirements = sorted(convert_requirements(requirements))
metadata[requires] = [{'requires': new_requirements}]
except AttributeError:
pass
# handle contacts
contacts = []
for contact_type, role in CONTACT_FIELDS:
contact = OrderedDict()
for key in sorted(contact_type):
if contact_type[key] in metadata:
contact[key] = metadata.pop(contact_type[key])
if contact:
contact['role'] = role
contacts.append(contact)
if contacts:
metadata['extensions']['python.details']['contacts'] = contacts
# convert entry points to exports
try:
with open(os.path.join(os.path.dirname(path), "entry_points.txt"), "r") as ep_file:
ep_map = pkg_resources.EntryPoint.parse_map(ep_file.read())
exports = OrderedDict()
for group, items in sorted(ep_map.items()):
exports[group] = OrderedDict()
for item in sorted(map(str, items.values())):
name, export = item.split(' = ', 1)
exports[group][name] = export
if exports:
metadata['extensions']['python.exports'] = exports
except IOError:
pass
# copy console_scripts entry points to commands
if 'python.exports' in metadata['extensions']:
for (ep_script, wrap_script) in (('console_scripts', 'wrap_console'),
('gui_scripts', 'wrap_gui')):
if ep_script in metadata['extensions']['python.exports']:
metadata['extensions']['python.commands'][wrap_script] = \
metadata['extensions']['python.exports'][ep_script]
return metadata
def requires_to_requires_dist(requirement):
"""Compose the version predicates for requirement in PEP 345 fashion."""
requires_dist = []
for op, ver in requirement.specs:
requires_dist.append(op + ver)
if not requires_dist:
return ''
return " (%s)" % ','.join(sorted(requires_dist))
def convert_requirements(requirements):
"""Yield Requires-Dist: strings for parsed requirements strings."""
for req in requirements:
parsed_requirement = pkg_resources.Requirement.parse(req)
spec = requires_to_requires_dist(parsed_requirement)
extras = ",".join(parsed_requirement.extras)
if extras:
extras = "[%s]" % extras
yield (parsed_requirement.project_name + extras + spec)
def generate_requirements(extras_require):
"""
Convert requirements from a setup()-style dictionary to ('Requires-Dist', 'requirement')
and ('Provides-Extra', 'extra') tuples.
extras_require is a dictionary of {extra: [requirements]} as passed to setup(),
using the empty extra {'': [requirements]} to hold install_requires.
"""
for extra, depends in extras_require.items():
condition = ''
if extra and ':' in extra: # setuptools extra:condition syntax
extra, condition = extra.split(':', 1)
extra = pkg_resources.safe_extra(extra)
if extra:
yield ('Provides-Extra', extra)
if condition:
condition += " and "
condition += "extra == '%s'" % extra
if condition:
condition = '; ' + condition
for new_req in convert_requirements(depends):
yield ('Requires-Dist', new_req + condition)
def pkginfo_to_metadata(egg_info_path, pkginfo_path):
"""
Convert .egg-info directory with PKG-INFO to the Metadata 1.3 aka
old-draft Metadata 2.0 format.
"""
pkg_info = read_pkg_info(pkginfo_path)
pkg_info.replace_header('Metadata-Version', '2.0')
requires_path = os.path.join(egg_info_path, 'requires.txt')
if os.path.exists(requires_path):
with open(requires_path) as requires_file:
requires = requires_file.read()
for extra, reqs in sorted(pkg_resources.split_sections(requires),
key=lambda x: x[0] or ''):
for item in generate_requirements({extra: reqs}):
pkg_info[item[0]] = item[1]
description = pkg_info['Description']
if description:
pkg_info.set_payload(dedent_description(pkg_info))
del pkg_info['Description']
return pkg_info
def pkginfo_unicode(pkg_info, field):
"""Hack to coax Unicode out of an email Message() - Python 3.3+"""
text = pkg_info[field]
field = field.lower()
if not isinstance(text, str):
if not hasattr(pkg_info, 'raw_items'): # Python 3.2
return str(text)
for item in pkg_info.raw_items():
if item[0].lower() == field:
text = item[1].encode('ascii', 'surrogateescape') \
.decode('utf-8')
break
return text
def dedent_description(pkg_info):
"""
Dedent and convert pkg_info['Description'] to Unicode.
"""
description = pkg_info['Description']
# Python 3 Unicode handling, sorta.
surrogates = False
if not isinstance(description, str):
surrogates = True
description = pkginfo_unicode(pkg_info, 'Description')
description_lines = description.splitlines()
description_dedent = '\n'.join(
# if the first line of long_description is blank,
# the first line here will be indented.
(description_lines[0].lstrip(),
textwrap.dedent('\n'.join(description_lines[1:])),
'\n'))
if surrogates:
description_dedent = description_dedent \
.encode("utf8") \
.decode("ascii", "surrogateescape")
return description_dedent
if __name__ == "__main__":
import sys
import pprint
pprint.pprint(pkginfo_to_dict(sys.argv[1]))
| gpl-3.0 | 3,996,073,833,613,758,500 | 33.204142 | 99 | 0.594845 | false |
antsant/namebench | nb_third_party/dns/rdata.py | 215 | 14860 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata.
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
the module which implements that type.
@type _rdata_modules: dict
@var _module_prefix: The prefix to use when forming modules names. The
default is 'dns.rdtypes'. Changing this value will break the library.
@type _module_prefix: string
@var _hex_chunk: At most this many octets that will be represented in each
chunk of hexstring that _hexify() produces before whitespace occurs.
@type _hex_chunk: int"""
import cStringIO
import dns.exception
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
_hex_chunksize = 32
def _hexify(data, chunksize=None):
"""Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
"""
if chunksize is None:
chunksize = _hex_chunksize
hex = data.encode('hex_codec')
l = len(hex)
if l > chunksize:
chunks = []
i = 0
while i < l:
chunks.append(hex[i : i + chunksize])
i += chunksize
hex = ' '.join(chunks)
return hex
_base64_chunksize = 32
def _base64ify(data, chunksize=None):
"""Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
"""
if chunksize is None:
chunksize = _base64_chunksize
b64 = data.encode('base64_codec')
b64 = b64.replace('\n', '')
l = len(b64)
if l > chunksize:
chunks = []
i = 0
while i < l:
chunks.append(b64[i : i + chunksize])
i += chunksize
b64 = ' '.join(chunks)
return b64
__escaped = {
'"' : True,
'\\' : True,
}
def _escapify(qstring):
"""Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
"""
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + c
elif ord(c) >= 0x20 and ord(c) < 0x7F:
text += c
else:
text += '\\%03d' % ord(c)
return text
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != '\x00':
break
return ''.join(what[0 : i + 1])
class Rdata(object):
"""Base class for all DNS rdata types.
"""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
@rtype: string
"""
raise NotImplementedError
def to_wire(self, file, compress = None, origin = None):
"""Convert an rdata to wire format.
@rtype: string
"""
raise NotImplementedError
def to_digestable(self, origin = None):
"""Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form."""
f = cStringIO.StringIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
"""Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
"""
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
"""Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
from_text = classmethod(from_text)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
"""Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
pass
class GenericRdata(Rdata):
"""Generate Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
token = tok.get()
if not token.is_identifier() or token.value != '\#':
raise dns.exception.SyntaxError(r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value)
hex = ''.join(chunks)
data = hex.decode('hex_codec')
if len(data) != length:
raise dns.exception.SyntaxError('generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
file.write(self.data)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
return cls(rdclass, rdtype, wire[current : current + rdlen])
from_wire = classmethod(from_wire)
def _cmp(self, other):
return cmp(self.data, other.data)
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin = None, relativize = True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance"""
if isinstance(tok, str):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin = None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance"""
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
| apache-2.0 | -1,446,574,658,464,633,900 | 31.587719 | 86 | 0.602355 | false |
nguyentran/openviber | tools/scons-local/scons-local-2.0.1/SCons/Environment.py | 61 | 91318 | """SCons.Environment
Base class for construction Environments. These are
the primary objects used to communicate dependency and
construction information to the build engine.
Keyword arguments supplied when the construction Environment
is created are construction variables used to initialize the
Environment
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Environment.py 5134 2010/08/16 23:02:40 bdeegan"
import copy
import os
import sys
import re
import shlex
from collections import UserDict
import SCons.Action
import SCons.Builder
from SCons.Debug import logInstanceCreation
import SCons.Defaults
import SCons.Errors
import SCons.Memoize
import SCons.Node
import SCons.Node.Alias
import SCons.Node.FS
import SCons.Node.Python
import SCons.Platform
import SCons.SConf
import SCons.SConsign
import SCons.Subst
import SCons.Tool
import SCons.Util
import SCons.Warnings
class _Null(object):
pass
_null = _Null
_warn_copy_deprecated = True
_warn_source_signatures_deprecated = True
_warn_target_signatures_deprecated = True
CleanTargets = {}
CalculatorArgs = {}
semi_deepcopy = SCons.Util.semi_deepcopy
# Pull UserError into the global name space for the benefit of
# Environment().SourceSignatures(), which has some import statements
# which seem to mess up its ability to reference SCons directly.
UserError = SCons.Errors.UserError
def alias_builder(env, target, source):
pass
AliasBuilder = SCons.Builder.Builder(action = alias_builder,
target_factory = SCons.Node.Alias.default_ans.Alias,
source_factory = SCons.Node.FS.Entry,
multi = 1,
is_explicit = None,
name='AliasBuilder')
def apply_tools(env, tools, toolpath):
# Store the toolpath in the Environment.
if toolpath is not None:
env['toolpath'] = toolpath
if not tools:
return
# Filter out null tools from the list.
for tool in [_f for _f in tools if _f]:
if SCons.Util.is_List(tool) or isinstance(tool, tuple):
toolname = tool[0]
toolargs = tool[1] # should be a dict of kw args
tool = env.Tool(toolname, **toolargs)
else:
env.Tool(tool)
# These names are (or will be) controlled by SCons; users should never
# set or override them. This warning can optionally be turned off,
# but scons will still ignore the illegal variable names even if it's off.
reserved_construction_var_names = [
'CHANGED_SOURCES',
'CHANGED_TARGETS',
'SOURCE',
'SOURCES',
'TARGET',
'TARGETS',
'UNCHANGED_SOURCES',
'UNCHANGED_TARGETS',
]
future_reserved_construction_var_names = [
#'HOST_OS',
#'HOST_ARCH',
#'HOST_CPU',
]
def copy_non_reserved_keywords(dict):
result = semi_deepcopy(dict)
for k in result.keys():
if k in reserved_construction_var_names:
msg = "Ignoring attempt to set reserved variable `$%s'"
SCons.Warnings.warn(SCons.Warnings.ReservedVariableWarning, msg % k)
del result[k]
return result
def _set_reserved(env, key, value):
msg = "Ignoring attempt to set reserved variable `$%s'"
SCons.Warnings.warn(SCons.Warnings.ReservedVariableWarning, msg % key)
def _set_future_reserved(env, key, value):
env._dict[key] = value
msg = "`$%s' will be reserved in a future release and setting it will become ignored"
SCons.Warnings.warn(SCons.Warnings.FutureReservedVariableWarning, msg % key)
def _set_BUILDERS(env, key, value):
try:
bd = env._dict[key]
for k in bd.keys():
del bd[k]
except KeyError:
bd = BuilderDict(kwbd, env)
env._dict[key] = bd
for k, v in value.items():
if not SCons.Builder.is_a_Builder(v):
raise SCons.Errors.UserError('%s is not a Builder.' % repr(v))
bd.update(value)
def _del_SCANNERS(env, key):
del env._dict[key]
env.scanner_map_delete()
def _set_SCANNERS(env, key, value):
env._dict[key] = value
env.scanner_map_delete()
def _delete_duplicates(l, keep_last):
"""Delete duplicates from a sequence, keeping the first or last."""
seen={}
result=[]
if keep_last: # reverse in & out, then keep first
l.reverse()
for i in l:
try:
if i not in seen:
result.append(i)
seen[i]=1
except TypeError:
# probably unhashable. Just keep it.
result.append(i)
if keep_last:
result.reverse()
return result
# The following is partly based on code in a comment added by Peter
# Shannon at the following page (there called the "transplant" class):
#
# ASPN : Python Cookbook : Dynamically added methods to a class
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81732
#
# We had independently been using the idiom as BuilderWrapper, but
# factoring out the common parts into this base class, and making
# BuilderWrapper a subclass that overrides __call__() to enforce specific
# Builder calling conventions, simplified some of our higher-layer code.
class MethodWrapper(object):
"""
A generic Wrapper class that associates a method (which can
actually be any callable) with an object. As part of creating this
MethodWrapper object an attribute with the specified (by default,
the name of the supplied method) is added to the underlying object.
When that new "method" is called, our __call__() method adds the
object as the first argument, simulating the Python behavior of
supplying "self" on method calls.
We hang on to the name by which the method was added to the underlying
base class so that we can provide a method to "clone" ourselves onto
a new underlying object being copied (without which we wouldn't need
to save that info).
"""
def __init__(self, object, method, name=None):
if name is None:
name = method.__name__
self.object = object
self.method = method
self.name = name
setattr(self.object, name, self)
def __call__(self, *args, **kwargs):
nargs = (self.object,) + args
return self.method(*nargs, **kwargs)
def clone(self, new_object):
"""
Returns an object that re-binds the underlying "method" to
the specified new object.
"""
return self.__class__(new_object, self.method, self.name)
class BuilderWrapper(MethodWrapper):
"""
A MethodWrapper subclass that that associates an environment with
a Builder.
This mainly exists to wrap the __call__() function so that all calls
to Builders can have their argument lists massaged in the same way
(treat a lone argument as the source, treat two arguments as target
then source, make sure both target and source are lists) without
having to have cut-and-paste code to do it.
As a bit of obsessive backwards compatibility, we also intercept
attempts to get or set the "env" or "builder" attributes, which were
the names we used before we put the common functionality into the
MethodWrapper base class. We'll keep this around for a while in case
people shipped Tool modules that reached into the wrapper (like the
Tool/qt.py module does, or did). There shouldn't be a lot attribute
fetching or setting on these, so a little extra work shouldn't hurt.
"""
def __call__(self, target=None, source=_null, *args, **kw):
if source is _null:
source = target
target = None
if target is not None and not SCons.Util.is_List(target):
target = [target]
if source is not None and not SCons.Util.is_List(source):
source = [source]
return MethodWrapper.__call__(self, target, source, *args, **kw)
def __repr__(self):
return '<BuilderWrapper %s>' % repr(self.name)
def __str__(self):
return self.__repr__()
def __getattr__(self, name):
if name == 'env':
return self.object
elif name == 'builder':
return self.method
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name == 'env':
self.object = value
elif name == 'builder':
self.method = value
else:
self.__dict__[name] = value
# This allows a Builder to be executed directly
# through the Environment to which it's attached.
# In practice, we shouldn't need this, because
# builders actually get executed through a Node.
# But we do have a unit test for this, and can't
# yet rule out that it would be useful in the
# future, so leave it for now.
#def execute(self, **kw):
# kw['env'] = self.env
# self.builder.execute(**kw)
class BuilderDict(UserDict):
"""This is a dictionary-like class used by an Environment to hold
the Builders. We need to do this because every time someone changes
the Builders in the Environment's BUILDERS dictionary, we must
update the Environment's attributes."""
def __init__(self, dict, env):
# Set self.env before calling the superclass initialization,
# because it will end up calling our other methods, which will
# need to point the values in this dictionary to self.env.
self.env = env
UserDict.__init__(self, dict)
def __semi_deepcopy__(self):
return self.__class__(self.data, self.env)
def __setitem__(self, item, val):
try:
method = getattr(self.env, item).method
except AttributeError:
pass
else:
self.env.RemoveMethod(method)
UserDict.__setitem__(self, item, val)
BuilderWrapper(self.env, val, item)
def __delitem__(self, item):
UserDict.__delitem__(self, item)
delattr(self.env, item)
def update(self, dict):
for i, v in dict.items():
self.__setitem__(i, v)
_is_valid_var = re.compile(r'[_a-zA-Z]\w*$')
def is_valid_construction_var(varstr):
"""Return if the specified string is a legitimate construction
variable.
"""
return _is_valid_var.match(varstr)
class SubstitutionEnvironment(object):
"""Base class for different flavors of construction environments.
This class contains a minimal set of methods that handle contruction
variable expansion and conversion of strings to Nodes, which may or
may not be actually useful as a stand-alone class. Which methods
ended up in this class is pretty arbitrary right now. They're
basically the ones which we've empirically determined are common to
the different construction environment subclasses, and most of the
others that use or touch the underlying dictionary of construction
variables.
Eventually, this class should contain all the methods that we
determine are necessary for a "minimal" interface to the build engine.
A full "native Python" SCons environment has gotten pretty heavyweight
with all of the methods and Tools and construction variables we've
jammed in there, so it would be nice to have a lighter weight
alternative for interfaces that don't need all of the bells and
whistles. (At some point, we'll also probably rename this class
"Base," since that more reflects what we want this class to become,
but because we've released comments that tell people to subclass
Environment.Base to create their own flavors of construction
environment, we'll save that for a future refactoring when this
class actually becomes useful.)
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
def __init__(self, **kw):
"""Initialization of an underlying SubstitutionEnvironment class.
"""
if __debug__: logInstanceCreation(self, 'Environment.SubstitutionEnvironment')
self.fs = SCons.Node.FS.get_default_fs()
self.ans = SCons.Node.Alias.default_ans
self.lookup_list = SCons.Node.arg2nodes_lookups
self._dict = kw.copy()
self._init_special()
self.added_methods = []
#self._memo = {}
def _init_special(self):
"""Initial the dispatch tables for special handling of
special construction variables."""
self._special_del = {}
self._special_del['SCANNERS'] = _del_SCANNERS
self._special_set = {}
for key in reserved_construction_var_names:
self._special_set[key] = _set_reserved
for key in future_reserved_construction_var_names:
self._special_set[key] = _set_future_reserved
self._special_set['BUILDERS'] = _set_BUILDERS
self._special_set['SCANNERS'] = _set_SCANNERS
# Freeze the keys of self._special_set in a list for use by
# methods that need to check. (Empirically, list scanning has
# gotten better than dict.has_key() in Python 2.5.)
self._special_set_keys = list(self._special_set.keys())
def __cmp__(self, other):
return cmp(self._dict, other._dict)
def __delitem__(self, key):
special = self._special_del.get(key)
if special:
special(self, key)
else:
del self._dict[key]
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
# This is heavily used. This implementation is the best we have
# according to the timings in bench/env.__setitem__.py.
#
# The "key in self._special_set_keys" test here seems to perform
# pretty well for the number of keys we have. A hard-coded
# list works a little better in Python 2.5, but that has the
# disadvantage of maybe getting out of sync if we ever add more
# variable names. Using self._special_set.has_key() works a
# little better in Python 2.4, but is worse than this test.
# So right now it seems like a good trade-off, but feel free to
# revisit this with bench/env.__setitem__.py as needed (and
# as newer versions of Python come out).
if key in self._special_set_keys:
self._special_set[key](self, key, value)
else:
# If we already have the entry, then it's obviously a valid
# key and we don't need to check. If we do check, using a
# global, pre-compiled regular expression directly is more
# efficient than calling another function or a method.
if key not in self._dict \
and not _is_valid_var.match(key):
raise SCons.Errors.UserError("Illegal construction variable `%s'" % key)
self._dict[key] = value
def get(self, key, default=None):
"""Emulates the get() method of dictionaries."""
return self._dict.get(key, default)
def has_key(self, key):
return key in self._dict
def __contains__(self, key):
return self._dict.__contains__(key)
def items(self):
return list(self._dict.items())
def arg2nodes(self, args, node_factory=_null, lookup_list=_null, **kw):
if node_factory is _null:
node_factory = self.fs.File
if lookup_list is _null:
lookup_list = self.lookup_list
if not args:
return []
args = SCons.Util.flatten(args)
nodes = []
for v in args:
if SCons.Util.is_String(v):
n = None
for l in lookup_list:
n = l(v)
if n is not None:
break
if n is not None:
if SCons.Util.is_String(n):
# n = self.subst(n, raw=1, **kw)
kw['raw'] = 1
n = self.subst(n, **kw)
if node_factory:
n = node_factory(n)
if SCons.Util.is_List(n):
nodes.extend(n)
else:
nodes.append(n)
elif node_factory:
# v = node_factory(self.subst(v, raw=1, **kw))
kw['raw'] = 1
v = node_factory(self.subst(v, **kw))
if SCons.Util.is_List(v):
nodes.extend(v)
else:
nodes.append(v)
else:
nodes.append(v)
return nodes
def gvars(self):
return self._dict
def lvars(self):
return {}
def subst(self, string, raw=0, target=None, source=None, conv=None, executor=None):
"""Recursively interpolates construction variables from the
Environment into the specified string, returning the expanded
result. Construction variables are specified by a $ prefix
in the string and begin with an initial underscore or
alphabetic character followed by any number of underscores
or alphanumeric characters. The construction variable names
may be surrounded by curly braces to separate the name from
trailing characters.
"""
gvars = self.gvars()
lvars = self.lvars()
lvars['__env__'] = self
if executor:
lvars.update(executor.get_lvars())
return SCons.Subst.scons_subst(string, self, raw, target, source, gvars, lvars, conv)
def subst_kw(self, kw, raw=0, target=None, source=None):
nkw = {}
for k, v in kw.items():
k = self.subst(k, raw, target, source)
if SCons.Util.is_String(v):
v = self.subst(v, raw, target, source)
nkw[k] = v
return nkw
def subst_list(self, string, raw=0, target=None, source=None, conv=None, executor=None):
"""Calls through to SCons.Subst.scons_subst_list(). See
the documentation for that function."""
gvars = self.gvars()
lvars = self.lvars()
lvars['__env__'] = self
if executor:
lvars.update(executor.get_lvars())
return SCons.Subst.scons_subst_list(string, self, raw, target, source, gvars, lvars, conv)
def subst_path(self, path, target=None, source=None):
"""Substitute a path list, turning EntryProxies into Nodes
and leaving Nodes (and other objects) as-is."""
if not SCons.Util.is_List(path):
path = [path]
def s(obj):
"""This is the "string conversion" routine that we have our
substitutions use to return Nodes, not strings. This relies
on the fact that an EntryProxy object has a get() method that
returns the underlying Node that it wraps, which is a bit of
architectural dependence that we might need to break or modify
in the future in response to additional requirements."""
try:
get = obj.get
except AttributeError:
obj = SCons.Util.to_String_for_subst(obj)
else:
obj = get()
return obj
r = []
for p in path:
if SCons.Util.is_String(p):
p = self.subst(p, target=target, source=source, conv=s)
if SCons.Util.is_List(p):
if len(p) == 1:
p = p[0]
else:
# We have an object plus a string, or multiple
# objects that we need to smush together. No choice
# but to make them into a string.
p = ''.join(map(SCons.Util.to_String_for_subst, p))
else:
p = s(p)
r.append(p)
return r
subst_target_source = subst
def backtick(self, command):
import subprocess
# common arguments
kw = { 'stdin' : 'devnull',
'stdout' : subprocess.PIPE,
'stderr' : subprocess.PIPE,
'universal_newlines' : True,
}
# if the command is a list, assume it's been quoted
# othewise force a shell
if not SCons.Util.is_List(command): kw['shell'] = True
# run constructed command
p = SCons.Action._subproc(self, command, **kw)
out,err = p.communicate()
status = p.wait()
if err:
sys.stderr.write(unicode(err))
if status:
raise OSError("'%s' exited %d" % (command, status))
return out
def AddMethod(self, function, name=None):
"""
Adds the specified function as a method of this construction
environment with the specified name. If the name is omitted,
the default name is the name of the function itself.
"""
method = MethodWrapper(self, function, name)
self.added_methods.append(method)
def RemoveMethod(self, function):
"""
Removes the specified function's MethodWrapper from the
added_methods list, so we don't re-bind it when making a clone.
"""
self.added_methods = [dm for dm in self.added_methods if not dm.method is function]
def Override(self, overrides):
"""
Produce a modified environment whose variables are overriden by
the overrides dictionaries. "overrides" is a dictionary that
will override the variables of this environment.
This function is much more efficient than Clone() or creating
a new Environment because it doesn't copy the construction
environment dictionary, it just wraps the underlying construction
environment, and doesn't even create a wrapper object if there
are no overrides.
"""
if not overrides: return self
o = copy_non_reserved_keywords(overrides)
if not o: return self
overrides = {}
merges = None
for key, value in o.items():
if key == 'parse_flags':
merges = value
else:
overrides[key] = SCons.Subst.scons_subst_once(value, self, key)
env = OverrideEnvironment(self, overrides)
if merges: env.MergeFlags(merges)
return env
def ParseFlags(self, *flags):
"""
Parse the set of flags and return a dict with the flags placed
in the appropriate entry. The flags are treated as a typical
set of command-line flags for a GNU-like toolchain and used to
populate the entries in the dict immediately below. If one of
the flag strings begins with a bang (exclamation mark), it is
assumed to be a command and the rest of the string is executed;
the result of that evaluation is then added to the dict.
"""
dict = {
'ASFLAGS' : SCons.Util.CLVar(''),
'CFLAGS' : SCons.Util.CLVar(''),
'CCFLAGS' : SCons.Util.CLVar(''),
'CPPDEFINES' : [],
'CPPFLAGS' : SCons.Util.CLVar(''),
'CPPPATH' : [],
'FRAMEWORKPATH' : SCons.Util.CLVar(''),
'FRAMEWORKS' : SCons.Util.CLVar(''),
'LIBPATH' : [],
'LIBS' : [],
'LINKFLAGS' : SCons.Util.CLVar(''),
'RPATH' : [],
}
def do_parse(arg):
# if arg is a sequence, recurse with each element
if not arg:
return
if not SCons.Util.is_String(arg):
for t in arg: do_parse(t)
return
# if arg is a command, execute it
if arg[0] == '!':
arg = self.backtick(arg[1:])
# utility function to deal with -D option
def append_define(name, dict = dict):
t = name.split('=')
if len(t) == 1:
dict['CPPDEFINES'].append(name)
else:
dict['CPPDEFINES'].append([t[0], '='.join(t[1:])])
# Loop through the flags and add them to the appropriate option.
# This tries to strike a balance between checking for all possible
# flags and keeping the logic to a finite size, so it doesn't
# check for some that don't occur often. It particular, if the
# flag is not known to occur in a config script and there's a way
# of passing the flag to the right place (by wrapping it in a -W
# flag, for example) we don't check for it. Note that most
# preprocessor options are not handled, since unhandled options
# are placed in CCFLAGS, so unless the preprocessor is invoked
# separately, these flags will still get to the preprocessor.
# Other options not currently handled:
# -iqoutedir (preprocessor search path)
# -u symbol (linker undefined symbol)
# -s (linker strip files)
# -static* (linker static binding)
# -shared* (linker dynamic binding)
# -symbolic (linker global binding)
# -R dir (deprecated linker rpath)
# IBM compilers may also accept -qframeworkdir=foo
params = shlex.split(arg)
append_next_arg_to = None # for multi-word args
for arg in params:
if append_next_arg_to:
if append_next_arg_to == 'CPPDEFINES':
append_define(arg)
elif append_next_arg_to == '-include':
t = ('-include', self.fs.File(arg))
dict['CCFLAGS'].append(t)
elif append_next_arg_to == '-isysroot':
t = ('-isysroot', arg)
dict['CCFLAGS'].append(t)
dict['LINKFLAGS'].append(t)
elif append_next_arg_to == '-arch':
t = ('-arch', arg)
dict['CCFLAGS'].append(t)
dict['LINKFLAGS'].append(t)
else:
dict[append_next_arg_to].append(arg)
append_next_arg_to = None
elif not arg[0] in ['-', '+']:
dict['LIBS'].append(self.fs.File(arg))
elif arg[:2] == '-L':
if arg[2:]:
dict['LIBPATH'].append(arg[2:])
else:
append_next_arg_to = 'LIBPATH'
elif arg[:2] == '-l':
if arg[2:]:
dict['LIBS'].append(arg[2:])
else:
append_next_arg_to = 'LIBS'
elif arg[:2] == '-I':
if arg[2:]:
dict['CPPPATH'].append(arg[2:])
else:
append_next_arg_to = 'CPPPATH'
elif arg[:4] == '-Wa,':
dict['ASFLAGS'].append(arg[4:])
dict['CCFLAGS'].append(arg)
elif arg[:4] == '-Wl,':
if arg[:11] == '-Wl,-rpath=':
dict['RPATH'].append(arg[11:])
elif arg[:7] == '-Wl,-R,':
dict['RPATH'].append(arg[7:])
elif arg[:6] == '-Wl,-R':
dict['RPATH'].append(arg[6:])
else:
dict['LINKFLAGS'].append(arg)
elif arg[:4] == '-Wp,':
dict['CPPFLAGS'].append(arg)
elif arg[:2] == '-D':
if arg[2:]:
append_define(arg[2:])
else:
append_next_arg_to = 'CPPDEFINES'
elif arg == '-framework':
append_next_arg_to = 'FRAMEWORKS'
elif arg[:14] == '-frameworkdir=':
dict['FRAMEWORKPATH'].append(arg[14:])
elif arg[:2] == '-F':
if arg[2:]:
dict['FRAMEWORKPATH'].append(arg[2:])
else:
append_next_arg_to = 'FRAMEWORKPATH'
elif arg == '-mno-cygwin':
dict['CCFLAGS'].append(arg)
dict['LINKFLAGS'].append(arg)
elif arg == '-mwindows':
dict['LINKFLAGS'].append(arg)
elif arg == '-pthread':
dict['CCFLAGS'].append(arg)
dict['LINKFLAGS'].append(arg)
elif arg[:5] == '-std=':
dict['CFLAGS'].append(arg) # C only
elif arg[0] == '+':
dict['CCFLAGS'].append(arg)
dict['LINKFLAGS'].append(arg)
elif arg in ['-include', '-isysroot', '-arch']:
append_next_arg_to = arg
else:
dict['CCFLAGS'].append(arg)
for arg in flags:
do_parse(arg)
return dict
def MergeFlags(self, args, unique=1, dict=None):
"""
Merge the dict in args into the construction variables of this
env, or the passed-in dict. If args is not a dict, it is
converted into a dict using ParseFlags. If unique is not set,
the flags are appended rather than merged.
"""
if dict is None:
dict = self
if not SCons.Util.is_Dict(args):
args = self.ParseFlags(args)
if not unique:
self.Append(**args)
return self
for key, value in args.items():
if not value:
continue
try:
orig = self[key]
except KeyError:
orig = value
else:
if not orig:
orig = value
elif value:
# Add orig and value. The logic here was lifted from
# part of env.Append() (see there for a lot of comments
# about the order in which things are tried) and is
# used mainly to handle coercion of strings to CLVar to
# "do the right thing" given (e.g.) an original CCFLAGS
# string variable like '-pipe -Wall'.
try:
orig = orig + value
except (KeyError, TypeError):
try:
add_to_orig = orig.append
except AttributeError:
value.insert(0, orig)
orig = value
else:
add_to_orig(value)
t = []
if key[-4:] == 'PATH':
### keep left-most occurence
for v in orig:
if v not in t:
t.append(v)
else:
### keep right-most occurence
orig.reverse()
for v in orig:
if v not in t:
t.insert(0, v)
self[key] = t
return self
# def MergeShellPaths(self, args, prepend=1):
# """
# Merge the dict in args into the shell environment in env['ENV'].
# Shell path elements are appended or prepended according to prepend.
# Uses Pre/AppendENVPath, so it always appends or prepends uniquely.
# Example: env.MergeShellPaths({'LIBPATH': '/usr/local/lib'})
# prepends /usr/local/lib to env['ENV']['LIBPATH'].
# """
# for pathname, pathval in args.items():
# if not pathval:
# continue
# if prepend:
# self.PrependENVPath(pathname, pathval)
# else:
# self.AppendENVPath(pathname, pathval)
def default_decide_source(dependency, target, prev_ni):
f = SCons.Defaults.DefaultEnvironment().decide_source
return f(dependency, target, prev_ni)
def default_decide_target(dependency, target, prev_ni):
f = SCons.Defaults.DefaultEnvironment().decide_target
return f(dependency, target, prev_ni)
def default_copy_from_cache(src, dst):
f = SCons.Defaults.DefaultEnvironment().copy_from_cache
return f(src, dst)
class Base(SubstitutionEnvironment):
"""Base class for "real" construction Environments. These are the
primary objects used to communicate dependency and construction
information to the build engine.
Keyword arguments supplied when the construction Environment
is created are construction variables used to initialize the
Environment.
"""
memoizer_counters = []
#######################################################################
# This is THE class for interacting with the SCons build engine,
# and it contains a lot of stuff, so we're going to try to keep this
# a little organized by grouping the methods.
#######################################################################
#######################################################################
# Methods that make an Environment act like a dictionary. These have
# the expected standard names for Python mapping objects. Note that
# we don't actually make an Environment a subclass of UserDict for
# performance reasons. Note also that we only supply methods for
# dictionary functionality that we actually need and use.
#######################################################################
def __init__(self,
platform=None,
tools=None,
toolpath=None,
variables=None,
parse_flags = None,
**kw):
"""
Initialization of a basic SCons construction environment,
including setting up special construction variables like BUILDER,
PLATFORM, etc., and searching for and applying available Tools.
Note that we do *not* call the underlying base class
(SubsitutionEnvironment) initialization, because we need to
initialize things in a very specific order that doesn't work
with the much simpler base class initialization.
"""
if __debug__: logInstanceCreation(self, 'Environment.Base')
self._memo = {}
self.fs = SCons.Node.FS.get_default_fs()
self.ans = SCons.Node.Alias.default_ans
self.lookup_list = SCons.Node.arg2nodes_lookups
self._dict = semi_deepcopy(SCons.Defaults.ConstructionEnvironment)
self._init_special()
self.added_methods = []
# We don't use AddMethod, or define these as methods in this
# class, because we *don't* want these functions to be bound
# methods. They need to operate independently so that the
# settings will work properly regardless of whether a given
# target ends up being built with a Base environment or an
# OverrideEnvironment or what have you.
self.decide_target = default_decide_target
self.decide_source = default_decide_source
self.copy_from_cache = default_copy_from_cache
self._dict['BUILDERS'] = BuilderDict(self._dict['BUILDERS'], self)
if platform is None:
platform = self._dict.get('PLATFORM', None)
if platform is None:
platform = SCons.Platform.Platform()
if SCons.Util.is_String(platform):
platform = SCons.Platform.Platform(platform)
self._dict['PLATFORM'] = str(platform)
platform(self)
self._dict['HOST_OS'] = self._dict.get('HOST_OS',None)
self._dict['HOST_ARCH'] = self._dict.get('HOST_ARCH',None)
# Now set defaults for TARGET_{OS|ARCH}
self._dict['TARGET_OS'] = self._dict.get('HOST_OS',None)
self._dict['TARGET_ARCH'] = self._dict.get('HOST_ARCH',None)
# Apply the passed-in and customizable variables to the
# environment before calling the tools, because they may use
# some of them during initialization.
if 'options' in kw:
# Backwards compatibility: they may stll be using the
# old "options" keyword.
variables = kw['options']
del kw['options']
self.Replace(**kw)
keys = list(kw.keys())
if variables:
keys = keys + list(variables.keys())
variables.Update(self)
save = {}
for k in keys:
try:
save[k] = self._dict[k]
except KeyError:
# No value may have been set if they tried to pass in a
# reserved variable name like TARGETS.
pass
SCons.Tool.Initializers(self)
if tools is None:
tools = self._dict.get('TOOLS', None)
if tools is None:
tools = ['default']
apply_tools(self, tools, toolpath)
# Now restore the passed-in and customized variables
# to the environment, since the values the user set explicitly
# should override any values set by the tools.
for key, val in save.items():
self._dict[key] = val
# Finally, apply any flags to be merged in
if parse_flags: self.MergeFlags(parse_flags)
#######################################################################
# Utility methods that are primarily for internal use by SCons.
# These begin with lower-case letters.
#######################################################################
def get_builder(self, name):
"""Fetch the builder with the specified name from the environment.
"""
try:
return self._dict['BUILDERS'][name]
except KeyError:
return None
def get_CacheDir(self):
try:
path = self._CacheDir_path
except AttributeError:
path = SCons.Defaults.DefaultEnvironment()._CacheDir_path
try:
if path == self._last_CacheDir_path:
return self._last_CacheDir
except AttributeError:
pass
cd = SCons.CacheDir.CacheDir(path)
self._last_CacheDir_path = path
self._last_CacheDir = cd
return cd
def get_factory(self, factory, default='File'):
"""Return a factory function for creating Nodes for this
construction environment.
"""
name = default
try:
is_node = issubclass(factory, SCons.Node.FS.Base)
except TypeError:
# The specified factory isn't a Node itself--it's
# most likely None, or possibly a callable.
pass
else:
if is_node:
# The specified factory is a Node (sub)class. Try to
# return the FS method that corresponds to the Node's
# name--that is, we return self.fs.Dir if they want a Dir,
# self.fs.File for a File, etc.
try: name = factory.__name__
except AttributeError: pass
else: factory = None
if not factory:
# They passed us None, or we picked up a name from a specified
# class, so return the FS method. (Note that we *don't*
# use our own self.{Dir,File} methods because that would
# cause env.subst() to be called twice on the file name,
# interfering with files that have $$ in them.)
factory = getattr(self.fs, name)
return factory
memoizer_counters.append(SCons.Memoize.CountValue('_gsm'))
def _gsm(self):
try:
return self._memo['_gsm']
except KeyError:
pass
result = {}
try:
scanners = self._dict['SCANNERS']
except KeyError:
pass
else:
# Reverse the scanner list so that, if multiple scanners
# claim they can scan the same suffix, earlier scanners
# in the list will overwrite later scanners, so that
# the result looks like a "first match" to the user.
if not SCons.Util.is_List(scanners):
scanners = [scanners]
else:
scanners = scanners[:] # copy so reverse() doesn't mod original
scanners.reverse()
for scanner in scanners:
for k in scanner.get_skeys(self):
if k and self['PLATFORM'] == 'win32':
k = k.lower()
result[k] = scanner
self._memo['_gsm'] = result
return result
def get_scanner(self, skey):
"""Find the appropriate scanner given a key (usually a file suffix).
"""
if skey and self['PLATFORM'] == 'win32':
skey = skey.lower()
return self._gsm().get(skey)
def scanner_map_delete(self, kw=None):
"""Delete the cached scanner map (if we need to).
"""
try:
del self._memo['_gsm']
except KeyError:
pass
def _update(self, dict):
"""Update an environment's values directly, bypassing the normal
checks that occur when users try to set items.
"""
self._dict.update(dict)
def get_src_sig_type(self):
try:
return self.src_sig_type
except AttributeError:
t = SCons.Defaults.DefaultEnvironment().src_sig_type
self.src_sig_type = t
return t
def get_tgt_sig_type(self):
try:
return self.tgt_sig_type
except AttributeError:
t = SCons.Defaults.DefaultEnvironment().tgt_sig_type
self.tgt_sig_type = t
return t
#######################################################################
# Public methods for manipulating an Environment. These begin with
# upper-case letters. The essential characteristic of methods in
# this section is that they do *not* have corresponding same-named
# global functions. For example, a stand-alone Append() function
# makes no sense, because Append() is all about appending values to
# an Environment's construction variables.
#######################################################################
def Append(self, **kw):
"""Append values to existing construction variables
in an Environment.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
# It would be easier on the eyes to write this using
# "continue" statements whenever we finish processing an item,
# but Python 1.5.2 apparently doesn't let you use "continue"
# within try:-except: blocks, so we have to nest our code.
try:
orig = self._dict[key]
except KeyError:
# No existing variable in the environment, so just set
# it to the new value.
self._dict[key] = val
else:
try:
# Check if the original looks like a dictionary.
# If it is, we can't just try adding the value because
# dictionaries don't have __add__() methods, and
# things like UserList will incorrectly coerce the
# original dict to a list (which we don't want).
update_dict = orig.update
except AttributeError:
try:
# Most straightforward: just try to add them
# together. This will work in most cases, when the
# original and new values are of compatible types.
self._dict[key] = orig + val
except (KeyError, TypeError):
try:
# Check if the original is a list.
add_to_orig = orig.append
except AttributeError:
# The original isn't a list, but the new
# value is (by process of elimination),
# so insert the original in the new value
# (if there's one to insert) and replace
# the variable with it.
if orig:
val.insert(0, orig)
self._dict[key] = val
else:
# The original is a list, so append the new
# value to it (if there's a value to append).
if val:
add_to_orig(val)
else:
# The original looks like a dictionary, so update it
# based on what we think the value looks like.
if SCons.Util.is_List(val):
for v in val:
orig[v] = None
else:
try:
update_dict(val)
except (AttributeError, TypeError, ValueError):
if SCons.Util.is_Dict(val):
for k, v in val.items():
orig[k] = v
else:
orig[val] = None
self.scanner_map_delete(kw)
# allow Dirs and strings beginning with # for top-relative
# Note this uses the current env's fs (in self).
def _canonicalize(self, path):
if not SCons.Util.is_String(path): # typically a Dir
path = str(path)
if path and path[0] == '#':
path = str(self.fs.Dir(path))
return path
def AppendENVPath(self, name, newpath, envname = 'ENV',
sep = os.pathsep, delete_existing=1):
"""Append path elements to the path 'name' in the 'ENV'
dictionary for this environment. Will only add any particular
path once, and will normpath and normcase all paths to help
assure this. This can also handle the case where the env
variable is a list instead of a string.
If delete_existing is 0, a newpath which is already in the path
will not be moved to the end (it will be left where it is).
"""
orig = ''
if envname in self._dict and name in self._dict[envname]:
orig = self._dict[envname][name]
nv = SCons.Util.AppendPath(orig, newpath, sep, delete_existing,
canonicalize=self._canonicalize)
if envname not in self._dict:
self._dict[envname] = {}
self._dict[envname][name] = nv
def AppendUnique(self, delete_existing=0, **kw):
"""Append values to existing construction variables
in an Environment, if they're not already there.
If delete_existing is 1, removes existing values first, so
values move to end.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
if SCons.Util.is_List(val):
val = _delete_duplicates(val, delete_existing)
if key not in self._dict or self._dict[key] in ('', None):
self._dict[key] = val
elif SCons.Util.is_Dict(self._dict[key]) and \
SCons.Util.is_Dict(val):
self._dict[key].update(val)
elif SCons.Util.is_List(val):
dk = self._dict[key]
if not SCons.Util.is_List(dk):
dk = [dk]
if delete_existing:
dk = [x for x in dk if x not in val]
else:
val = [x for x in val if x not in dk]
self._dict[key] = dk + val
else:
dk = self._dict[key]
if SCons.Util.is_List(dk):
# By elimination, val is not a list. Since dk is a
# list, wrap val in a list first.
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = dk + [val]
else:
if not val in dk:
self._dict[key] = dk + [val]
else:
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = dk + val
self.scanner_map_delete(kw)
def Clone(self, tools=[], toolpath=None, parse_flags = None, **kw):
"""Return a copy of a construction Environment. The
copy is like a Python "deep copy"--that is, independent
copies are made recursively of each objects--except that
a reference is copied when an object is not deep-copyable
(like a function). There are no references to any mutable
objects in the original Environment.
"""
clone = copy.copy(self)
clone._dict = semi_deepcopy(self._dict)
try:
cbd = clone._dict['BUILDERS']
except KeyError:
pass
else:
clone._dict['BUILDERS'] = BuilderDict(cbd, clone)
# Check the methods added via AddMethod() and re-bind them to
# the cloned environment. Only do this if the attribute hasn't
# been overwritten by the user explicitly and still points to
# the added method.
clone.added_methods = []
for mw in self.added_methods:
if mw == getattr(self, mw.name):
clone.added_methods.append(mw.clone(clone))
clone._memo = {}
# Apply passed-in variables before the tools
# so the tools can use the new variables
kw = copy_non_reserved_keywords(kw)
new = {}
for key, value in kw.items():
new[key] = SCons.Subst.scons_subst_once(value, self, key)
clone.Replace(**new)
apply_tools(clone, tools, toolpath)
# apply them again in case the tools overwrote them
clone.Replace(**new)
# Finally, apply any flags to be merged in
if parse_flags: clone.MergeFlags(parse_flags)
if __debug__: logInstanceCreation(self, 'Environment.EnvironmentClone')
return clone
def Copy(self, *args, **kw):
global _warn_copy_deprecated
if _warn_copy_deprecated:
msg = "The env.Copy() method is deprecated; use the env.Clone() method instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedCopyWarning, msg)
_warn_copy_deprecated = False
return self.Clone(*args, **kw)
def _changed_build(self, dependency, target, prev_ni):
if dependency.changed_state(target, prev_ni):
return 1
return self.decide_source(dependency, target, prev_ni)
def _changed_content(self, dependency, target, prev_ni):
return dependency.changed_content(target, prev_ni)
def _changed_source(self, dependency, target, prev_ni):
target_env = dependency.get_build_env()
type = target_env.get_tgt_sig_type()
if type == 'source':
return target_env.decide_source(dependency, target, prev_ni)
else:
return target_env.decide_target(dependency, target, prev_ni)
def _changed_timestamp_then_content(self, dependency, target, prev_ni):
return dependency.changed_timestamp_then_content(target, prev_ni)
def _changed_timestamp_newer(self, dependency, target, prev_ni):
return dependency.changed_timestamp_newer(target, prev_ni)
def _changed_timestamp_match(self, dependency, target, prev_ni):
return dependency.changed_timestamp_match(target, prev_ni)
def _copy_from_cache(self, src, dst):
return self.fs.copy(src, dst)
def _copy2_from_cache(self, src, dst):
return self.fs.copy2(src, dst)
def Decider(self, function):
copy_function = self._copy2_from_cache
if function in ('MD5', 'content'):
if not SCons.Util.md5:
raise UserError("MD5 signatures are not available in this version of Python.")
function = self._changed_content
elif function == 'MD5-timestamp':
function = self._changed_timestamp_then_content
elif function in ('timestamp-newer', 'make'):
function = self._changed_timestamp_newer
copy_function = self._copy_from_cache
elif function == 'timestamp-match':
function = self._changed_timestamp_match
elif not callable(function):
raise UserError("Unknown Decider value %s" % repr(function))
# We don't use AddMethod because we don't want to turn the
# function, which only expects three arguments, into a bound
# method, which would add self as an initial, fourth argument.
self.decide_target = function
self.decide_source = function
self.copy_from_cache = copy_function
def Detect(self, progs):
"""Return the first available program in progs.
"""
if not SCons.Util.is_List(progs):
progs = [ progs ]
for prog in progs:
path = self.WhereIs(prog)
if path: return prog
return None
def Dictionary(self, *args):
if not args:
return self._dict
dlist = [self._dict[x] for x in args]
if len(dlist) == 1:
dlist = dlist[0]
return dlist
def Dump(self, key = None):
"""
Using the standard Python pretty printer, dump the contents of the
scons build environment to stdout.
If the key passed in is anything other than None, then that will
be used as an index into the build environment dictionary and
whatever is found there will be fed into the pretty printer. Note
that this key is case sensitive.
"""
import pprint
pp = pprint.PrettyPrinter(indent=2)
if key:
dict = self.Dictionary(key)
else:
dict = self.Dictionary()
return pp.pformat(dict)
def FindIxes(self, paths, prefix, suffix):
"""
Search a list of paths for something that matches the prefix and suffix.
paths - the list of paths or nodes.
prefix - construction variable for the prefix.
suffix - construction variable for the suffix.
"""
suffix = self.subst('$'+suffix)
prefix = self.subst('$'+prefix)
for path in paths:
dir,name = os.path.split(str(path))
if name[:len(prefix)] == prefix and name[-len(suffix):] == suffix:
return path
def ParseConfig(self, command, function=None, unique=1):
"""
Use the specified function to parse the output of the command
in order to modify the current environment. The 'command' can
be a string or a list of strings representing a command and
its arguments. 'Function' is an optional argument that takes
the environment, the output of the command, and the unique flag.
If no function is specified, MergeFlags, which treats the output
as the result of a typical 'X-config' command (i.e. gtk-config),
will merge the output into the appropriate variables.
"""
if function is None:
def parse_conf(env, cmd, unique=unique):
return env.MergeFlags(cmd, unique)
function = parse_conf
if SCons.Util.is_List(command):
command = ' '.join(command)
command = self.subst(command)
return function(self, self.backtick(command))
def ParseDepends(self, filename, must_exist=None, only_one=0):
"""
Parse a mkdep-style file for explicit dependencies. This is
completely abusable, and should be unnecessary in the "normal"
case of proper SCons configuration, but it may help make
the transition from a Make hierarchy easier for some people
to swallow. It can also be genuinely useful when using a tool
that can write a .d file, but for which writing a scanner would
be too complicated.
"""
filename = self.subst(filename)
try:
fp = open(filename, 'r')
except IOError:
if must_exist:
raise
return
lines = SCons.Util.LogicalLines(fp).readlines()
lines = [l for l in lines if l[0] != '#']
tdlist = []
for line in lines:
try:
target, depends = line.split(':', 1)
except (AttributeError, ValueError):
# Throws AttributeError if line isn't a string. Can throw
# ValueError if line doesn't split into two or more elements.
pass
else:
tdlist.append((target.split(), depends.split()))
if only_one:
targets = []
for td in tdlist:
targets.extend(td[0])
if len(targets) > 1:
raise SCons.Errors.UserError(
"More than one dependency target found in `%s': %s"
% (filename, targets))
for target, depends in tdlist:
self.Depends(target, depends)
def Platform(self, platform):
platform = self.subst(platform)
return SCons.Platform.Platform(platform)(self)
def Prepend(self, **kw):
"""Prepend values to existing construction variables
in an Environment.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
# It would be easier on the eyes to write this using
# "continue" statements whenever we finish processing an item,
# but Python 1.5.2 apparently doesn't let you use "continue"
# within try:-except: blocks, so we have to nest our code.
try:
orig = self._dict[key]
except KeyError:
# No existing variable in the environment, so just set
# it to the new value.
self._dict[key] = val
else:
try:
# Check if the original looks like a dictionary.
# If it is, we can't just try adding the value because
# dictionaries don't have __add__() methods, and
# things like UserList will incorrectly coerce the
# original dict to a list (which we don't want).
update_dict = orig.update
except AttributeError:
try:
# Most straightforward: just try to add them
# together. This will work in most cases, when the
# original and new values are of compatible types.
self._dict[key] = val + orig
except (KeyError, TypeError):
try:
# Check if the added value is a list.
add_to_val = val.append
except AttributeError:
# The added value isn't a list, but the
# original is (by process of elimination),
# so insert the the new value in the original
# (if there's one to insert).
if val:
orig.insert(0, val)
else:
# The added value is a list, so append
# the original to it (if there's a value
# to append).
if orig:
add_to_val(orig)
self._dict[key] = val
else:
# The original looks like a dictionary, so update it
# based on what we think the value looks like.
if SCons.Util.is_List(val):
for v in val:
orig[v] = None
else:
try:
update_dict(val)
except (AttributeError, TypeError, ValueError):
if SCons.Util.is_Dict(val):
for k, v in val.items():
orig[k] = v
else:
orig[val] = None
self.scanner_map_delete(kw)
def PrependENVPath(self, name, newpath, envname = 'ENV', sep = os.pathsep,
delete_existing=1):
"""Prepend path elements to the path 'name' in the 'ENV'
dictionary for this environment. Will only add any particular
path once, and will normpath and normcase all paths to help
assure this. This can also handle the case where the env
variable is a list instead of a string.
If delete_existing is 0, a newpath which is already in the path
will not be moved to the front (it will be left where it is).
"""
orig = ''
if envname in self._dict and name in self._dict[envname]:
orig = self._dict[envname][name]
nv = SCons.Util.PrependPath(orig, newpath, sep, delete_existing,
canonicalize=self._canonicalize)
if envname not in self._dict:
self._dict[envname] = {}
self._dict[envname][name] = nv
def PrependUnique(self, delete_existing=0, **kw):
"""Prepend values to existing construction variables
in an Environment, if they're not already there.
If delete_existing is 1, removes existing values first, so
values move to front.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
if SCons.Util.is_List(val):
val = _delete_duplicates(val, not delete_existing)
if key not in self._dict or self._dict[key] in ('', None):
self._dict[key] = val
elif SCons.Util.is_Dict(self._dict[key]) and \
SCons.Util.is_Dict(val):
self._dict[key].update(val)
elif SCons.Util.is_List(val):
dk = self._dict[key]
if not SCons.Util.is_List(dk):
dk = [dk]
if delete_existing:
dk = [x for x in dk if x not in val]
else:
val = [x for x in val if x not in dk]
self._dict[key] = val + dk
else:
dk = self._dict[key]
if SCons.Util.is_List(dk):
# By elimination, val is not a list. Since dk is a
# list, wrap val in a list first.
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = [val] + dk
else:
if not val in dk:
self._dict[key] = [val] + dk
else:
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = val + dk
self.scanner_map_delete(kw)
def Replace(self, **kw):
"""Replace existing construction variables in an Environment
with new construction variables and/or values.
"""
try:
kwbd = kw['BUILDERS']
except KeyError:
pass
else:
kwbd = semi_deepcopy(kwbd)
del kw['BUILDERS']
self.__setitem__('BUILDERS', kwbd)
kw = copy_non_reserved_keywords(kw)
self._update(semi_deepcopy(kw))
self.scanner_map_delete(kw)
def ReplaceIxes(self, path, old_prefix, old_suffix, new_prefix, new_suffix):
"""
Replace old_prefix with new_prefix and old_suffix with new_suffix.
env - Environment used to interpolate variables.
path - the path that will be modified.
old_prefix - construction variable for the old prefix.
old_suffix - construction variable for the old suffix.
new_prefix - construction variable for the new prefix.
new_suffix - construction variable for the new suffix.
"""
old_prefix = self.subst('$'+old_prefix)
old_suffix = self.subst('$'+old_suffix)
new_prefix = self.subst('$'+new_prefix)
new_suffix = self.subst('$'+new_suffix)
dir,name = os.path.split(str(path))
if name[:len(old_prefix)] == old_prefix:
name = name[len(old_prefix):]
if name[-len(old_suffix):] == old_suffix:
name = name[:-len(old_suffix)]
return os.path.join(dir, new_prefix+name+new_suffix)
def SetDefault(self, **kw):
for k in kw.keys():
if k in self._dict:
del kw[k]
self.Replace(**kw)
def _find_toolpath_dir(self, tp):
return self.fs.Dir(self.subst(tp)).srcnode().abspath
def Tool(self, tool, toolpath=None, **kw):
if SCons.Util.is_String(tool):
tool = self.subst(tool)
if toolpath is None:
toolpath = self.get('toolpath', [])
toolpath = list(map(self._find_toolpath_dir, toolpath))
tool = SCons.Tool.Tool(tool, toolpath, **kw)
tool(self)
def WhereIs(self, prog, path=None, pathext=None, reject=[]):
"""Find prog in the path.
"""
if path is None:
try:
path = self['ENV']['PATH']
except KeyError:
pass
elif SCons.Util.is_String(path):
path = self.subst(path)
if pathext is None:
try:
pathext = self['ENV']['PATHEXT']
except KeyError:
pass
elif SCons.Util.is_String(pathext):
pathext = self.subst(pathext)
prog = self.subst(prog)
path = SCons.Util.WhereIs(prog, path, pathext, reject)
if path: return path
return None
#######################################################################
# Public methods for doing real "SCons stuff" (manipulating
# dependencies, setting attributes on targets, etc.). These begin
# with upper-case letters. The essential characteristic of methods
# in this section is that they all *should* have corresponding
# same-named global functions.
#######################################################################
def Action(self, *args, **kw):
def subst_string(a, self=self):
if SCons.Util.is_String(a):
a = self.subst(a)
return a
nargs = list(map(subst_string, args))
nkw = self.subst_kw(kw)
return SCons.Action.Action(*nargs, **nkw)
def AddPreAction(self, files, action):
nodes = self.arg2nodes(files, self.fs.Entry)
action = SCons.Action.Action(action)
uniq = {}
for executor in [n.get_executor() for n in nodes]:
uniq[executor] = 1
for executor in uniq.keys():
executor.add_pre_action(action)
return nodes
def AddPostAction(self, files, action):
nodes = self.arg2nodes(files, self.fs.Entry)
action = SCons.Action.Action(action)
uniq = {}
for executor in [n.get_executor() for n in nodes]:
uniq[executor] = 1
for executor in uniq.keys():
executor.add_post_action(action)
return nodes
def Alias(self, target, source=[], action=None, **kw):
tlist = self.arg2nodes(target, self.ans.Alias)
if not SCons.Util.is_List(source):
source = [source]
source = [_f for _f in source if _f]
if not action:
if not source:
# There are no source files and no action, so just
# return a target list of classic Alias Nodes, without
# any builder. The externally visible effect is that
# this will make the wrapping Script.BuildTask class
# say that there's "Nothing to be done" for this Alias,
# instead of that it's "up to date."
return tlist
# No action, but there are sources. Re-call all the target
# builders to add the sources to each target.
result = []
for t in tlist:
bld = t.get_builder(AliasBuilder)
result.extend(bld(self, t, source))
return result
nkw = self.subst_kw(kw)
nkw.update({
'action' : SCons.Action.Action(action),
'source_factory' : self.fs.Entry,
'multi' : 1,
'is_explicit' : None,
})
bld = SCons.Builder.Builder(**nkw)
# Apply the Builder separately to each target so that the Aliases
# stay separate. If we did one "normal" Builder call with the
# whole target list, then all of the target Aliases would be
# associated under a single Executor.
result = []
for t in tlist:
# Calling the convert() method will cause a new Executor to be
# created from scratch, so we have to explicitly initialize
# it with the target's existing sources, plus our new ones,
# so nothing gets lost.
b = t.get_builder()
if b is None or b is AliasBuilder:
b = bld
else:
nkw['action'] = b.action + action
b = SCons.Builder.Builder(**nkw)
t.convert()
result.extend(b(self, t, t.sources + source))
return result
def AlwaysBuild(self, *targets):
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_always_build()
return tlist
def BuildDir(self, *args, **kw):
msg = """BuildDir() and the build_dir keyword have been deprecated;\n\tuse VariantDir() and the variant_dir keyword instead."""
SCons.Warnings.warn(SCons.Warnings.DeprecatedBuildDirWarning, msg)
if 'build_dir' in kw:
kw['variant_dir'] = kw['build_dir']
del kw['build_dir']
return self.VariantDir(*args, **kw)
def Builder(self, **kw):
nkw = self.subst_kw(kw)
return SCons.Builder.Builder(**nkw)
def CacheDir(self, path):
import SCons.CacheDir
if path is not None:
path = self.subst(path)
self._CacheDir_path = path
def Clean(self, targets, files):
global CleanTargets
tlist = self.arg2nodes(targets, self.fs.Entry)
flist = self.arg2nodes(files, self.fs.Entry)
for t in tlist:
try:
CleanTargets[t].extend(flist)
except KeyError:
CleanTargets[t] = flist
def Configure(self, *args, **kw):
nargs = [self]
if args:
nargs = nargs + self.subst_list(args)[0]
nkw = self.subst_kw(kw)
nkw['_depth'] = kw.get('_depth', 0) + 1
try:
nkw['custom_tests'] = self.subst_kw(nkw['custom_tests'])
except KeyError:
pass
return SCons.SConf.SConf(*nargs, **nkw)
def Command(self, target, source, action, **kw):
"""Builds the supplied target files from the supplied
source files using the supplied action. Action may
be any type that the Builder constructor will accept
for an action."""
bkw = {
'action' : action,
'target_factory' : self.fs.Entry,
'source_factory' : self.fs.Entry,
}
try: bkw['source_scanner'] = kw['source_scanner']
except KeyError: pass
else: del kw['source_scanner']
bld = SCons.Builder.Builder(**bkw)
return bld(self, target, source, **kw)
def Depends(self, target, dependency):
"""Explicity specify that 'target's depend on 'dependency'."""
tlist = self.arg2nodes(target, self.fs.Entry)
dlist = self.arg2nodes(dependency, self.fs.Entry)
for t in tlist:
t.add_dependency(dlist)
return tlist
def Dir(self, name, *args, **kw):
"""
"""
s = self.subst(name)
if SCons.Util.is_Sequence(s):
result=[]
for e in s:
result.append(self.fs.Dir(e, *args, **kw))
return result
return self.fs.Dir(s, *args, **kw)
def NoClean(self, *targets):
"""Tags a target so that it will not be cleaned by -c"""
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_noclean()
return tlist
def NoCache(self, *targets):
"""Tags a target so that it will not be cached"""
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_nocache()
return tlist
def Entry(self, name, *args, **kw):
"""
"""
s = self.subst(name)
if SCons.Util.is_Sequence(s):
result=[]
for e in s:
result.append(self.fs.Entry(e, *args, **kw))
return result
return self.fs.Entry(s, *args, **kw)
def Environment(self, **kw):
return SCons.Environment.Environment(**self.subst_kw(kw))
def Execute(self, action, *args, **kw):
"""Directly execute an action through an Environment
"""
action = self.Action(action, *args, **kw)
result = action([], [], self)
if isinstance(result, SCons.Errors.BuildError):
errstr = result.errstr
if result.filename:
errstr = result.filename + ': ' + errstr
sys.stderr.write("scons: *** %s\n" % errstr)
return result.status
else:
return result
def File(self, name, *args, **kw):
"""
"""
s = self.subst(name)
if SCons.Util.is_Sequence(s):
result=[]
for e in s:
result.append(self.fs.File(e, *args, **kw))
return result
return self.fs.File(s, *args, **kw)
def FindFile(self, file, dirs):
file = self.subst(file)
nodes = self.arg2nodes(dirs, self.fs.Dir)
return SCons.Node.FS.find_file(file, tuple(nodes))
def Flatten(self, sequence):
return SCons.Util.flatten(sequence)
def GetBuildPath(self, files):
result = list(map(str, self.arg2nodes(files, self.fs.Entry)))
if SCons.Util.is_List(files):
return result
else:
return result[0]
def Glob(self, pattern, ondisk=True, source=False, strings=False):
return self.fs.Glob(self.subst(pattern), ondisk, source, strings)
def Ignore(self, target, dependency):
"""Ignore a dependency."""
tlist = self.arg2nodes(target, self.fs.Entry)
dlist = self.arg2nodes(dependency, self.fs.Entry)
for t in tlist:
t.add_ignore(dlist)
return tlist
def Literal(self, string):
return SCons.Subst.Literal(string)
def Local(self, *targets):
ret = []
for targ in targets:
if isinstance(targ, SCons.Node.Node):
targ.set_local()
ret.append(targ)
else:
for t in self.arg2nodes(targ, self.fs.Entry):
t.set_local()
ret.append(t)
return ret
def Precious(self, *targets):
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_precious()
return tlist
def Repository(self, *dirs, **kw):
dirs = self.arg2nodes(list(dirs), self.fs.Dir)
self.fs.Repository(*dirs, **kw)
def Requires(self, target, prerequisite):
"""Specify that 'prerequisite' must be built before 'target',
(but 'target' does not actually depend on 'prerequisite'
and need not be rebuilt if it changes)."""
tlist = self.arg2nodes(target, self.fs.Entry)
plist = self.arg2nodes(prerequisite, self.fs.Entry)
for t in tlist:
t.add_prerequisite(plist)
return tlist
def Scanner(self, *args, **kw):
nargs = []
for arg in args:
if SCons.Util.is_String(arg):
arg = self.subst(arg)
nargs.append(arg)
nkw = self.subst_kw(kw)
return SCons.Scanner.Base(*nargs, **nkw)
def SConsignFile(self, name=".sconsign", dbm_module=None):
if name is not None:
name = self.subst(name)
if not os.path.isabs(name):
name = os.path.join(str(self.fs.SConstruct_dir), name)
if name:
name = os.path.normpath(name)
sconsign_dir = os.path.dirname(name)
if sconsign_dir and not os.path.exists(sconsign_dir):
self.Execute(SCons.Defaults.Mkdir(sconsign_dir))
SCons.SConsign.File(name, dbm_module)
def SideEffect(self, side_effect, target):
"""Tell scons that side_effects are built as side
effects of building targets."""
side_effects = self.arg2nodes(side_effect, self.fs.Entry)
targets = self.arg2nodes(target, self.fs.Entry)
for side_effect in side_effects:
if side_effect.multiple_side_effect_has_builder():
raise SCons.Errors.UserError("Multiple ways to build the same target were specified for: %s" % str(side_effect))
side_effect.add_source(targets)
side_effect.side_effect = 1
self.Precious(side_effect)
for target in targets:
target.side_effects.append(side_effect)
return side_effects
def SourceCode(self, entry, builder):
"""Arrange for a source code builder for (part of) a tree."""
msg = """SourceCode() has been deprecated and there is no replacement.
\tIf you need this function, please contact [email protected]."""
SCons.Warnings.warn(SCons.Warnings.DeprecatedSourceCodeWarning, msg)
entries = self.arg2nodes(entry, self.fs.Entry)
for entry in entries:
entry.set_src_builder(builder)
return entries
def SourceSignatures(self, type):
global _warn_source_signatures_deprecated
if _warn_source_signatures_deprecated:
msg = "The env.SourceSignatures() method is deprecated;\n" + \
"\tconvert your build to use the env.Decider() method instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedSourceSignaturesWarning, msg)
_warn_source_signatures_deprecated = False
type = self.subst(type)
self.src_sig_type = type
if type == 'MD5':
if not SCons.Util.md5:
raise UserError("MD5 signatures are not available in this version of Python.")
self.decide_source = self._changed_content
elif type == 'timestamp':
self.decide_source = self._changed_timestamp_match
else:
raise UserError("Unknown source signature type '%s'" % type)
def Split(self, arg):
"""This function converts a string or list into a list of strings
or Nodes. This makes things easier for users by allowing files to
be specified as a white-space separated list to be split.
The input rules are:
- A single string containing names separated by spaces. These will be
split apart at the spaces.
- A single Node instance
- A list containing either strings or Node instances. Any strings
in the list are not split at spaces.
In all cases, the function returns a list of Nodes and strings."""
if SCons.Util.is_List(arg):
return list(map(self.subst, arg))
elif SCons.Util.is_String(arg):
return self.subst(arg).split()
else:
return [self.subst(arg)]
def TargetSignatures(self, type):
global _warn_target_signatures_deprecated
if _warn_target_signatures_deprecated:
msg = "The env.TargetSignatures() method is deprecated;\n" + \
"\tconvert your build to use the env.Decider() method instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedTargetSignaturesWarning, msg)
_warn_target_signatures_deprecated = False
type = self.subst(type)
self.tgt_sig_type = type
if type in ('MD5', 'content'):
if not SCons.Util.md5:
raise UserError("MD5 signatures are not available in this version of Python.")
self.decide_target = self._changed_content
elif type == 'timestamp':
self.decide_target = self._changed_timestamp_match
elif type == 'build':
self.decide_target = self._changed_build
elif type == 'source':
self.decide_target = self._changed_source
else:
raise UserError("Unknown target signature type '%s'"%type)
def Value(self, value, built_value=None):
"""
"""
return SCons.Node.Python.Value(value, built_value)
def VariantDir(self, variant_dir, src_dir, duplicate=1):
variant_dir = self.arg2nodes(variant_dir, self.fs.Dir)[0]
src_dir = self.arg2nodes(src_dir, self.fs.Dir)[0]
self.fs.VariantDir(variant_dir, src_dir, duplicate)
def FindSourceFiles(self, node='.'):
""" returns a list of all source files.
"""
node = self.arg2nodes(node, self.fs.Entry)[0]
sources = []
def build_source(ss):
for s in ss:
if isinstance(s, SCons.Node.FS.Dir):
build_source(s.all_children())
elif s.has_builder():
build_source(s.sources)
elif isinstance(s.disambiguate(), SCons.Node.FS.File):
sources.append(s)
build_source(node.all_children())
# THIS CODE APPEARS TO HAVE NO EFFECT
# # get the final srcnode for all nodes, this means stripping any
# # attached build node by calling the srcnode function
# for file in sources:
# srcnode = file.srcnode()
# while srcnode != file.srcnode():
# srcnode = file.srcnode()
# remove duplicates
return list(set(sources))
def FindInstalledFiles(self):
""" returns the list of all targets of the Install and InstallAs Builder.
"""
from SCons.Tool import install
if install._UNIQUE_INSTALLED_FILES is None:
install._UNIQUE_INSTALLED_FILES = SCons.Util.uniquer_hashables(install._INSTALLED_FILES)
return install._UNIQUE_INSTALLED_FILES
class OverrideEnvironment(Base):
"""A proxy that overrides variables in a wrapped construction
environment by returning values from an overrides dictionary in
preference to values from the underlying subject environment.
This is a lightweight (I hope) proxy that passes through most use of
attributes to the underlying Environment.Base class, but has just
enough additional methods defined to act like a real construction
environment with overridden values. It can wrap either a Base
construction environment, or another OverrideEnvironment, which
can in turn nest arbitrary OverrideEnvironments...
Note that we do *not* call the underlying base class
(SubsitutionEnvironment) initialization, because we get most of those
from proxying the attributes of the subject construction environment.
But because we subclass SubstitutionEnvironment, this class also
has inherited arg2nodes() and subst*() methods; those methods can't
be proxied because they need *this* object's methods to fetch the
values from the overrides dictionary.
"""
def __init__(self, subject, overrides={}):
if __debug__: logInstanceCreation(self, 'Environment.OverrideEnvironment')
self.__dict__['__subject'] = subject
self.__dict__['overrides'] = overrides
# Methods that make this class act like a proxy.
def __getattr__(self, name):
return getattr(self.__dict__['__subject'], name)
def __setattr__(self, name, value):
setattr(self.__dict__['__subject'], name, value)
# Methods that make this class act like a dictionary.
def __getitem__(self, key):
try:
return self.__dict__['overrides'][key]
except KeyError:
return self.__dict__['__subject'].__getitem__(key)
def __setitem__(self, key, value):
if not is_valid_construction_var(key):
raise SCons.Errors.UserError("Illegal construction variable `%s'" % key)
self.__dict__['overrides'][key] = value
def __delitem__(self, key):
try:
del self.__dict__['overrides'][key]
except KeyError:
deleted = 0
else:
deleted = 1
try:
result = self.__dict__['__subject'].__delitem__(key)
except KeyError:
if not deleted:
raise
result = None
return result
def get(self, key, default=None):
"""Emulates the get() method of dictionaries."""
try:
return self.__dict__['overrides'][key]
except KeyError:
return self.__dict__['__subject'].get(key, default)
def has_key(self, key):
try:
self.__dict__['overrides'][key]
return 1
except KeyError:
return key in self.__dict__['__subject']
def __contains__(self, key):
if self.__dict__['overrides'].__contains__(key):
return 1
return self.__dict__['__subject'].__contains__(key)
def Dictionary(self):
"""Emulates the items() method of dictionaries."""
d = self.__dict__['__subject'].Dictionary().copy()
d.update(self.__dict__['overrides'])
return d
def items(self):
"""Emulates the items() method of dictionaries."""
return list(self.Dictionary().items())
# Overridden private construction environment methods.
def _update(self, dict):
"""Update an environment's values directly, bypassing the normal
checks that occur when users try to set items.
"""
self.__dict__['overrides'].update(dict)
def gvars(self):
return self.__dict__['__subject'].gvars()
def lvars(self):
lvars = self.__dict__['__subject'].lvars()
lvars.update(self.__dict__['overrides'])
return lvars
# Overridden public construction environment methods.
def Replace(self, **kw):
kw = copy_non_reserved_keywords(kw)
self.__dict__['overrides'].update(semi_deepcopy(kw))
# The entry point that will be used by the external world
# to refer to a construction environment. This allows the wrapper
# interface to extend a construction environment for its own purposes
# by subclassing SCons.Environment.Base and then assigning the
# class to SCons.Environment.Environment.
Environment = Base
# An entry point for returning a proxy subclass instance that overrides
# the subst*() methods so they don't actually perform construction
# variable substitution. This is specifically intended to be the shim
# layer in between global function calls (which don't want construction
# variable substitution) and the DefaultEnvironment() (which would
# substitute variables if left to its own devices)."""
#
# We have to wrap this in a function that allows us to delay definition of
# the class until it's necessary, so that when it subclasses Environment
# it will pick up whatever Environment subclass the wrapper interface
# might have assigned to SCons.Environment.Environment.
def NoSubstitutionProxy(subject):
class _NoSubstitutionProxy(Environment):
def __init__(self, subject):
self.__dict__['__subject'] = subject
def __getattr__(self, name):
return getattr(self.__dict__['__subject'], name)
def __setattr__(self, name, value):
return setattr(self.__dict__['__subject'], name, value)
def raw_to_mode(self, dict):
try:
raw = dict['raw']
except KeyError:
pass
else:
del dict['raw']
dict['mode'] = raw
def subst(self, string, *args, **kwargs):
return string
def subst_kw(self, kw, *args, **kwargs):
return kw
def subst_list(self, string, *args, **kwargs):
nargs = (string, self,) + args
nkw = kwargs.copy()
nkw['gvars'] = {}
self.raw_to_mode(nkw)
return SCons.Subst.scons_subst_list(*nargs, **nkw)
def subst_target_source(self, string, *args, **kwargs):
nargs = (string, self,) + args
nkw = kwargs.copy()
nkw['gvars'] = {}
self.raw_to_mode(nkw)
return SCons.Subst.scons_subst(*nargs, **nkw)
return _NoSubstitutionProxy(subject)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 2,234,776,760,919,675,600 | 38.395168 | 135 | 0.558203 | false |
joymarquis/mscc | general/lib/python/pexpect-4.0.1/pexpect/pxssh.py | 12 | 18725 | '''This class extends pexpect.spawn to specialize setting up SSH connections.
This adds methods for login, logout, and expecting the shell prompt.
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from pexpect import ExceptionPexpect, TIMEOUT, EOF, spawn
import time
import os
__all__ = ['ExceptionPxssh', 'pxssh']
# Exception classes used by this module.
class ExceptionPxssh(ExceptionPexpect):
'''Raised for pxssh exceptions.
'''
class pxssh (spawn):
'''This class extends pexpect.spawn to specialize setting up SSH
connections. This adds methods for login, logout, and expecting the shell
prompt. It does various tricky things to handle many situations in the SSH
login process. For example, if the session is your first login, then pxssh
automatically accepts the remote certificate; or if you have public key
authentication setup then pxssh won't wait for the password prompt.
pxssh uses the shell prompt to synchronize output from the remote host. In
order to make this more robust it sets the shell prompt to something more
unique than just $ or #. This should work on most Borne/Bash or Csh style
shells.
Example that runs a few commands on a remote server and prints the result::
import pxssh
import getpass
try:
s = pxssh.pxssh()
hostname = raw_input('hostname: ')
username = raw_input('username: ')
password = getpass.getpass('password: ')
s.login(hostname, username, password)
s.sendline('uptime') # run a command
s.prompt() # match the prompt
print(s.before) # print everything before the prompt.
s.sendline('ls -l')
s.prompt()
print(s.before)
s.sendline('df')
s.prompt()
print(s.before)
s.logout()
except pxssh.ExceptionPxssh as e:
print("pxssh failed on login.")
print(e)
Example showing how to specify SSH options::
import pxssh
s = pxssh.pxssh(options={
"StrictHostKeyChecking": "no",
"UserKnownHostsFile": "/dev/null"})
...
Note that if you have ssh-agent running while doing development with pxssh
then this can lead to a lot of confusion. Many X display managers (xdm,
gdm, kdm, etc.) will automatically start a GUI agent. You may see a GUI
dialog box popup asking for a password during development. You should turn
off any key agents during testing. The 'force_password' attribute will turn
off public key authentication. This will only work if the remote SSH server
is configured to allow password logins. Example of using 'force_password'
attribute::
s = pxssh.pxssh()
s.force_password = True
hostname = raw_input('hostname: ')
username = raw_input('username: ')
password = getpass.getpass('password: ')
s.login (hostname, username, password)
'''
def __init__ (self, timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, cwd=None, env=None, ignore_sighup=True, echo=True,
options={}, encoding=None, codec_errors='strict'):
spawn.__init__(self, None, timeout=timeout, maxread=maxread,
searchwindowsize=searchwindowsize, logfile=logfile,
cwd=cwd, env=env, ignore_sighup=ignore_sighup, echo=echo,
encoding=encoding, codec_errors=codec_errors)
self.name = '<pxssh>'
#SUBTLE HACK ALERT! Note that the command that SETS the prompt uses a
#slightly different string than the regular expression to match it. This
#is because when you set the prompt the command will echo back, but we
#don't want to match the echoed command. So if we make the set command
#slightly different than the regex we eliminate the problem. To make the
#set command different we add a backslash in front of $. The $ doesn't
#need to be escaped, but it doesn't hurt and serves to make the set
#prompt command different than the regex.
# used to match the command-line prompt
self.UNIQUE_PROMPT = "\[PEXPECT\][\$\#] "
self.PROMPT = self.UNIQUE_PROMPT
# used to set shell command-line prompt to UNIQUE_PROMPT.
self.PROMPT_SET_SH = "PS1='[PEXPECT]\$ '"
self.PROMPT_SET_CSH = "set prompt='[PEXPECT]\$ '"
self.SSH_OPTS = ("-o'RSAAuthentication=no'"
+ " -o 'PubkeyAuthentication=no'")
# Disabling host key checking, makes you vulnerable to MITM attacks.
# + " -o 'StrictHostKeyChecking=no'"
# + " -o 'UserKnownHostsFile /dev/null' ")
# Disabling X11 forwarding gets rid of the annoying SSH_ASKPASS from
# displaying a GUI password dialog. I have not figured out how to
# disable only SSH_ASKPASS without also disabling X11 forwarding.
# Unsetting SSH_ASKPASS on the remote side doesn't disable it! Annoying!
#self.SSH_OPTS = "-x -o'RSAAuthentication=no' -o 'PubkeyAuthentication=no'"
self.force_password = False
# User defined SSH options, eg,
# ssh.otions = dict(StrictHostKeyChecking="no",UserKnownHostsFile="/dev/null")
self.options = options
def levenshtein_distance(self, a, b):
'''This calculates the Levenshtein distance between a and b.
'''
n, m = len(a), len(b)
if n > m:
a,b = b,a
n,m = m,n
current = range(n+1)
for i in range(1,m+1):
previous, current = current, [i]+[0]*n
for j in range(1,n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def try_read_prompt(self, timeout_multiplier):
'''This facilitates using communication timeouts to perform
synchronization as quickly as possible, while supporting high latency
connections with a tunable worst case performance. Fast connections
should be read almost immediately. Worst case performance for this
method is timeout_multiplier * 3 seconds.
'''
# maximum time allowed to read the first response
first_char_timeout = timeout_multiplier * 0.5
# maximum time allowed between subsequent characters
inter_char_timeout = timeout_multiplier * 0.1
# maximum time for reading the entire prompt
total_timeout = timeout_multiplier * 3.0
prompt = self.string_type()
begin = time.time()
expired = 0.0
timeout = first_char_timeout
while expired < total_timeout:
try:
prompt += self.read_nonblocking(size=1, timeout=timeout)
expired = time.time() - begin # updated total time expired
timeout = inter_char_timeout
except TIMEOUT:
break
return prompt
def sync_original_prompt (self, sync_multiplier=1.0):
'''This attempts to find the prompt. Basically, press enter and record
the response; press enter again and record the response; if the two
responses are similar then assume we are at the original prompt.
This can be a slow function. Worst case with the default sync_multiplier
can take 12 seconds. Low latency connections are more likely to fail
with a low sync_multiplier. Best case sync time gets worse with a
high sync multiplier (500 ms with default). '''
# All of these timing pace values are magic.
# I came up with these based on what seemed reliable for
# connecting to a heavily loaded machine I have.
self.sendline()
time.sleep(0.1)
try:
# Clear the buffer before getting the prompt.
self.try_read_prompt(sync_multiplier)
except TIMEOUT:
pass
self.sendline()
x = self.try_read_prompt(sync_multiplier)
self.sendline()
a = self.try_read_prompt(sync_multiplier)
self.sendline()
b = self.try_read_prompt(sync_multiplier)
ld = self.levenshtein_distance(a,b)
len_a = len(a)
if len_a == 0:
return False
if float(ld)/len_a < 0.4:
return True
return False
### TODO: This is getting messy and I'm pretty sure this isn't perfect.
### TODO: I need to draw a flow chart for this.
def login (self, server, username, password='', terminal_type='ansi',
original_prompt=r"[#$]", login_timeout=10, port=None,
auto_prompt_reset=True, ssh_key=None, quiet=True,
sync_multiplier=1, check_local_ip=True):
'''This logs the user into the given server.
It uses
'original_prompt' to try to find the prompt right after login. When it
finds the prompt it immediately tries to reset the prompt to something
more easily matched. The default 'original_prompt' is very optimistic
and is easily fooled. It's more reliable to try to match the original
prompt as exactly as possible to prevent false matches by server
strings such as the "Message Of The Day". On many systems you can
disable the MOTD on the remote server by creating a zero-length file
called :file:`~/.hushlogin` on the remote server. If a prompt cannot be found
then this will not necessarily cause the login to fail. In the case of
a timeout when looking for the prompt we assume that the original
prompt was so weird that we could not match it, so we use a few tricks
to guess when we have reached the prompt. Then we hope for the best and
blindly try to reset the prompt to something more unique. If that fails
then login() raises an :class:`ExceptionPxssh` exception.
In some situations it is not possible or desirable to reset the
original prompt. In this case, pass ``auto_prompt_reset=False`` to
inhibit setting the prompt to the UNIQUE_PROMPT. Remember that pxssh
uses a unique prompt in the :meth:`prompt` method. If the original prompt is
not reset then this will disable the :meth:`prompt` method unless you
manually set the :attr:`PROMPT` attribute.
'''
ssh_options = ''.join([" -o '%s=%s'" % (o, v) for (o, v) in self.options.items()])
if quiet:
ssh_options = ssh_options + ' -q'
if not check_local_ip:
ssh_options = ssh_options + " -o'NoHostAuthenticationForLocalhost=yes'"
if self.force_password:
ssh_options = ssh_options + ' ' + self.SSH_OPTS
if port is not None:
ssh_options = ssh_options + ' -p %s'%(str(port))
if ssh_key is not None:
try:
os.path.isfile(ssh_key)
except:
raise ExceptionPxssh('private ssh key does not exist')
ssh_options = ssh_options + ' -i %s' % (ssh_key)
cmd = "ssh %s -l %s %s" % (ssh_options, username, server)
# This does not distinguish between a remote server 'password' prompt
# and a local ssh 'passphrase' prompt (for unlocking a private key).
spawn._spawn(self, cmd)
i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT, "(?i)connection closed by remote host"], timeout=login_timeout)
# First phase
if i==0:
# New certificate -- always accept it.
# This is what you get if SSH does not have the remote host's
# public key stored in the 'known_hosts' cache.
self.sendline("yes")
i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT])
if i==2: # password or passphrase
self.sendline(password)
i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT])
if i==4:
self.sendline(terminal_type)
i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT])
# Second phase
if i==0:
# This is weird. This should not happen twice in a row.
self.close()
raise ExceptionPxssh('Weird error. Got "are you sure" prompt twice.')
elif i==1: # can occur if you have a public key pair set to authenticate.
### TODO: May NOT be OK if expect() got tricked and matched a false prompt.
pass
elif i==2: # password prompt again
# For incorrect passwords, some ssh servers will
# ask for the password again, others return 'denied' right away.
# If we get the password prompt again then this means
# we didn't get the password right the first time.
self.close()
raise ExceptionPxssh('password refused')
elif i==3: # permission denied -- password was bad.
self.close()
raise ExceptionPxssh('permission denied')
elif i==4: # terminal type again? WTF?
self.close()
raise ExceptionPxssh('Weird error. Got "terminal type" prompt twice.')
elif i==5: # Timeout
#This is tricky... I presume that we are at the command-line prompt.
#It may be that the shell prompt was so weird that we couldn't match
#it. Or it may be that we couldn't log in for some other reason. I
#can't be sure, but it's safe to guess that we did login because if
#I presume wrong and we are not logged in then this should be caught
#later when I try to set the shell prompt.
pass
elif i==6: # Connection closed by remote host
self.close()
raise ExceptionPxssh('connection closed')
else: # Unexpected
self.close()
raise ExceptionPxssh('unexpected login response')
if not self.sync_original_prompt(sync_multiplier):
self.close()
raise ExceptionPxssh('could not synchronize with original prompt')
# We appear to be in.
# set shell prompt to something unique.
if auto_prompt_reset:
if not self.set_unique_prompt():
self.close()
raise ExceptionPxssh('could not set shell prompt '
'(received: %r, expected: %r).' % (
self.before, self.PROMPT,))
return True
def logout (self):
'''Sends exit to the remote shell.
If there are stopped jobs then this automatically sends exit twice.
'''
self.sendline("exit")
index = self.expect([EOF, "(?i)there are stopped jobs"])
if index==1:
self.sendline("exit")
self.expect(EOF)
self.close()
def prompt(self, timeout=-1):
'''Match the next shell prompt.
This is little more than a short-cut to the :meth:`~pexpect.spawn.expect`
method. Note that if you called :meth:`login` with
``auto_prompt_reset=False``, then before calling :meth:`prompt` you must
set the :attr:`PROMPT` attribute to a regex that it will use for
matching the prompt.
Calling :meth:`prompt` will erase the contents of the :attr:`before`
attribute even if no prompt is ever matched. If timeout is not given or
it is set to -1 then self.timeout is used.
:return: True if the shell prompt was matched, False if the timeout was
reached.
'''
if timeout == -1:
timeout = self.timeout
i = self.expect([self.PROMPT, TIMEOUT], timeout=timeout)
if i==1:
return False
return True
def set_unique_prompt(self):
'''This sets the remote prompt to something more unique than ``#`` or ``$``.
This makes it easier for the :meth:`prompt` method to match the shell prompt
unambiguously. This method is called automatically by the :meth:`login`
method, but you may want to call it manually if you somehow reset the
shell prompt. For example, if you 'su' to a different user then you
will need to manually reset the prompt. This sends shell commands to
the remote host to set the prompt, so this assumes the remote host is
ready to receive commands.
Alternatively, you may use your own prompt pattern. In this case you
should call :meth:`login` with ``auto_prompt_reset=False``; then set the
:attr:`PROMPT` attribute to a regular expression. After that, the
:meth:`prompt` method will try to match your prompt pattern.
'''
self.sendline("unset PROMPT_COMMAND")
self.sendline(self.PROMPT_SET_SH) # sh-style
i = self.expect ([TIMEOUT, self.PROMPT], timeout=10)
if i == 0: # csh-style
self.sendline(self.PROMPT_SET_CSH)
i = self.expect([TIMEOUT, self.PROMPT], timeout=10)
if i == 0:
return False
return True
# vi:ts=4:sw=4:expandtab:ft=python:
| gpl-3.0 | 2,238,996,445,178,845,200 | 45.12069 | 255 | 0.619546 | false |
Zkin/pf-kernel-updates | tools/perf/scripts/python/check-perf-trace.py | 1997 | 2539 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 | 8,903,976,013,017,196,000 | 29.963415 | 78 | 0.64553 | false |
APM602/APM602 | mk/PX4/Tools/genmsg/src/genmsg/template_tools.py | 215 | 9443 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
## ROS Message generatation
##
##
import sys
import os
import em
import genmsg.command_line
import genmsg.msgs
import genmsg.msg_loader
import genmsg.gentools
# generate msg or srv files from a template file
# template_map of the form { 'template_file':'output_file'} output_file can contain @NAME@ which will be replaced by the message/service name
def _generate_from_spec(input_file, output_dir, template_dir, msg_context, spec, template_map, search_path):
md5sum = genmsg.gentools.compute_md5(msg_context, spec)
# precompute msg definition once
if isinstance(spec, genmsg.msgs.MsgSpec):
msg_definition = genmsg.gentools.compute_full_text(msg_context, spec)
# Loop over all files to generate
for template_file_name, output_file_name in template_map.items():
template_file = os.path.join(template_dir, template_file_name)
output_file = os.path.join(output_dir, output_file_name.replace("@NAME@", spec.short_name))
#print "generate_from_template %s %s %s" % (input_file, template_file, output_file)
ofile = open(output_file, 'w') #todo try
# Set dictionary for the generator interpreter
g = {
"file_name_in": input_file,
"spec": spec,
"md5sum": md5sum,
"search_path": search_path,
"msg_context": msg_context
}
if isinstance(spec, genmsg.msgs.MsgSpec):
g['msg_definition'] = msg_definition
# todo, reuse interpreter
interpreter = em.Interpreter(output=ofile, globals=g, options={em.RAW_OPT:True,em.BUFFERED_OPT:True})
if not os.path.isfile(template_file):
ofile.close()
os.remove(output_file)
raise RuntimeError("Template file %s not found in template dir %s" % (template_file_name, template_dir))
interpreter.file(open(template_file)) #todo try
interpreter.shutdown()
def _generate_msg_from_file(input_file, output_dir, template_dir, search_path, package_name, msg_template_dict):
# Read MsgSpec from .msg file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(package_name, os.path.basename(input_file))
spec = genmsg.msg_loader.load_msg_from_file(msg_context, input_file, full_type_name)
# Load the dependencies
genmsg.msg_loader.load_depends(msg_context, spec, search_path)
# Generate the language dependent msg file
_generate_from_spec(input_file,
output_dir,
template_dir,
msg_context,
spec,
msg_template_dict,
search_path)
def _generate_srv_from_file(input_file, output_dir, template_dir, search_path, package_name, srv_template_dict, msg_template_dict):
# Read MsgSpec from .srv.file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(package_name, os.path.basename(input_file))
spec = genmsg.msg_loader.load_srv_from_file(msg_context, input_file, full_type_name)
# Load the dependencies
genmsg.msg_loader.load_depends(msg_context, spec, search_path)
# Generate the language dependent srv file
_generate_from_spec(input_file,
output_dir,
template_dir,
msg_context,
spec,
srv_template_dict,
search_path)
# Generate the language dependent msg file for the srv request
_generate_from_spec(input_file,
output_dir,
template_dir,
msg_context,
spec.request,
msg_template_dict,
search_path)
# Generate the language dependent msg file for the srv response
_generate_from_spec(input_file,
output_dir,
template_dir,
msg_context,
spec.response,
msg_template_dict,
search_path)
# uniform interface for genering either srv or msg files
def generate_from_file(input_file, package_name, output_dir, template_dir, include_path, msg_template_dict, srv_template_dict):
# Normalize paths
input_file = os.path.abspath(input_file)
output_dir = os.path.abspath(output_dir)
# Create output dir
try:
os.makedirs(output_dir)
except OSError as e:
if e.errno != 17: # ignore file exists error
raise
# Parse include path dictionary
if( include_path ):
search_path = genmsg.command_line.includepath_to_dict(include_path)
else:
search_path = {}
# Generate the file(s)
if input_file.endswith(".msg"):
_generate_msg_from_file(input_file, output_dir, template_dir, search_path, package_name, msg_template_dict)
elif input_file.endswith(".srv"):
_generate_srv_from_file(input_file, output_dir, template_dir, search_path, package_name, srv_template_dict, msg_template_dict)
else:
assert False, "Uknown file extension for %s"%input_file
def generate_module(package_name, output_dir, template_dir, template_dict):
# Locate generate msg files
files = os.listdir(output_dir)
# Loop over all files to generate
for template_file_name, output_file_name in template_dict.items():
template_file = os.path.join(template_dir, template_file_name)
output_file = os.path.join(output_dir, output_file_name)
ofile = open(output_file, 'w') #todo try
# Set dictionary for the generator intepreter
g = dict(files=files,
package=package_name)
# todo, reuse interpreter
interpreter = em.Interpreter(output=ofile, options={em.RAW_OPT:True,em.BUFFERED_OPT:True})
interpreter.updateGlobals(g)
if not os.path.isfile(template_file):
ofile.close()
os.remove(output_file)
raise RuntimeError("Template file %s not found in template dir %s" % (template_file_name, template_dir))
interpreter.file(open(template_file)) #todo try
interpreter.shutdown()
# Uniform interface to support the standard command line options
def generate_from_command_line_options(argv, msg_template_dict, srv_template_dict, module_template_dict = {}):
from optparse import OptionParser
parser = OptionParser("[options] <srv file>")
parser.add_option("-p", dest='package',
help="ros package the generated msg/srv files belongs to")
parser.add_option("-o", dest='outdir',
help="directory in which to place output files")
parser.add_option("-I", dest='includepath',
help="include path to search for messages",
action="append")
parser.add_option("-m", dest='module',
help="write the module file",
action='store_true', default=False)
parser.add_option("-e", dest='emdir',
help="directory containing template files",
default=sys.path[0])
(options, argv) = parser.parse_args(argv)
if( not options.package or not options.outdir or not options.emdir):
parser.print_help()
exit(-1)
if( options.module ):
generate_module(options.package, options.outdir, options.emdir, module_template_dict)
else:
if len(argv) > 1:
generate_from_file(argv[1], options.package, options.outdir, options.emdir, options.includepath, msg_template_dict, srv_template_dict)
else:
parser.print_help()
exit(-1)
| gpl-3.0 | -2,896,237,510,579,740,700 | 42.717593 | 146 | 0.644181 | false |
ville-k/tensorflow | tensorflow/python/kernel_tests/string_join_op_test.py | 134 | 1896 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_join_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringJoinOpTest(test.TestCase):
def testStringJoin(self):
input0 = ["a", "b"]
input1 = "a"
input2 = [["b"], ["c"]]
with self.test_session():
output = string_ops.string_join([input0, input1])
self.assertAllEqual(output.eval(), [b"aa", b"ba"])
output = string_ops.string_join([input0, input1], separator="--")
self.assertAllEqual(output.eval(), [b"a--a", b"b--a"])
output = string_ops.string_join([input0, input1, input0], separator="--")
self.assertAllEqual(output.eval(), [b"a--a--a", b"b--a--b"])
output = string_ops.string_join([input1] * 4, separator="!")
self.assertEqual(output.eval(), b"a!a!a!a")
output = string_ops.string_join([input2] * 2, separator="")
self.assertAllEqual(output.eval(), [[b"bb"], [b"cc"]])
with self.assertRaises(ValueError): # Inconsistent shapes
string_ops.string_join([input0, input2]).eval()
if __name__ == "__main__":
test.main()
| apache-2.0 | -8,828,361,939,091,260,000 | 35.461538 | 80 | 0.647152 | false |
brandonium21/snowflake | snowflakeEnv/lib/python2.7/site-packages/pip/vcs/bazaar.py | 280 | 4427 | from __future__ import absolute_import
import logging
import os
import tempfile
import re
# TODO: Get this into six.moves.urllib.parse
try:
from urllib import parse as urllib_parse
except ImportError:
import urlparse as urllib_parse
from pip.utils import rmtree, display_path
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
logger = logging.getLogger(__name__)
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
schemes = (
'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp',
'bzr+lp',
)
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(['lp'])
urllib_parse.non_hierarchical.extend(['lp'])
def export(self, location):
"""
Export the Bazaar repository at the url to the destination location
"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
self.run_command(['export', location], cwd=temp_dir,
show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
self.run_command(['switch', url], cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = self.run_command(['info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = self.run_command(
['revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_tag_revs(self, location):
tags = self.run_command(
['tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([.\w-]+)\s*(.*)$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo:
return None
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
current_rev = self.get_revision(location)
tag_revs = self.get_tag_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
else:
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), current_rev)
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
vcs.register(Bazaar)
| bsd-2-clause | -5,683,181,305,543,581,000 | 32.537879 | 79 | 0.545516 | false |
etuna-SBF-kog/Stadsparken | env/lib/python2.7/site-packages/django/contrib/auth/hashers.py | 2 | 15500 | import functools
import hashlib
from django.conf import settings
from django.utils import importlib
from django.utils.datastructures import SortedDict
from django.utils.encoding import smart_str
from django.core.exceptions import ImproperlyConfigured
from django.utils.crypto import (
pbkdf2, constant_time_compare, get_random_string)
from django.utils.translation import ugettext_noop as _
UNUSABLE_PASSWORD = '!' # This will never be a valid encoded hash
MAXIMUM_PASSWORD_LENGTH = 4096 # The maximum length a password can be to prevent DoS
HASHERS = None # lazily loaded from PASSWORD_HASHERS
PREFERRED_HASHER = None # defaults to first item in PASSWORD_HASHERS
def password_max_length(max_length):
def inner(fn):
@functools.wraps(fn)
def wrapper(self, password, *args, **kwargs):
if len(password) > max_length:
raise ValueError("Invalid password; Must be less than or equal"
" to %d bytes" % max_length)
return fn(self, password, *args, **kwargs)
return wrapper
return inner
def is_password_usable(encoded):
return (encoded is not None and encoded != UNUSABLE_PASSWORD)
def check_password(password, encoded, setter=None, preferred='default'):
"""
Returns a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
"""
if not password or not is_password_usable(encoded):
return False
preferred = get_hasher(preferred)
raw_password = password
password = smart_str(password)
encoded = smart_str(encoded)
# Ancient versions of Django created plain MD5 passwords and accepted
# MD5 passwords with an empty salt.
if ((len(encoded) == 32 and '$' not in encoded) or
(len(encoded) == 37 and encoded.startswith('md5$$'))):
hasher = get_hasher('unsalted_md5')
# Ancient versions of Django accepted SHA1 passwords with an empty salt.
elif len(encoded) == 46 and encoded.startswith('sha1$$'):
hasher = get_hasher('unsalted_sha1')
else:
algorithm = encoded.split('$', 1)[0]
hasher = get_hasher(algorithm)
must_update = hasher.algorithm != preferred.algorithm
is_correct = hasher.verify(password, encoded)
if setter and is_correct and must_update:
setter(raw_password)
return is_correct
def make_password(password, salt=None, hasher='default'):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generates a new random salt. If
password is None or blank then UNUSABLE_PASSWORD will be
returned which disallows logins.
"""
if not password:
return UNUSABLE_PASSWORD
hasher = get_hasher(hasher)
password = smart_str(password)
if not salt:
salt = hasher.salt()
salt = smart_str(salt)
return hasher.encode(password, salt)
def load_hashers(password_hashers=None):
global HASHERS
global PREFERRED_HASHER
hashers = []
if not password_hashers:
password_hashers = settings.PASSWORD_HASHERS
for backend in password_hashers:
try:
mod_path, cls_name = backend.rsplit('.', 1)
mod = importlib.import_module(mod_path)
hasher_cls = getattr(mod, cls_name)
except (AttributeError, ImportError, ValueError):
raise ImproperlyConfigured("hasher not found: %s" % backend)
hasher = hasher_cls()
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % backend)
hashers.append(hasher)
HASHERS = dict([(hasher.algorithm, hasher) for hasher in hashers])
PREFERRED_HASHER = hashers[0]
def get_hasher(algorithm='default'):
"""
Returns an instance of a loaded password hasher.
If algorithm is 'default', the default hasher will be returned.
This function will also lazy import hashers specified in your
settings file if needed.
"""
if hasattr(algorithm, 'algorithm'):
return algorithm
elif algorithm == 'default':
if PREFERRED_HASHER is None:
load_hashers()
return PREFERRED_HASHER
else:
if HASHERS is None:
load_hashers()
if algorithm not in HASHERS:
raise ValueError("Unknown password hashing algorithm '%s'. "
"Did you specify it in the PASSWORD_HASHERS "
"setting?" % algorithm)
return HASHERS[algorithm]
def mask_hash(hash, show=6, char="*"):
"""
Returns the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked
class BasePasswordHasher(object):
"""
Abstract base class for password hashers
When creating your own hasher, you need to override algorithm,
verify(), encode() and safe_summary().
PasswordHasher objects are immutable.
"""
algorithm = None
library = None
def _load_library(self):
if self.library is not None:
if isinstance(self.library, (tuple, list)):
name, mod_path = self.library
else:
name = mod_path = self.library
try:
module = importlib.import_module(mod_path)
except ImportError:
raise ValueError("Couldn't load %s password algorithm "
"library" % name)
return module
raise ValueError("Hasher '%s' doesn't specify a library attribute" %
self.__class__)
def salt(self):
"""
Generates a cryptographically secure nonce salt in ascii
"""
return get_random_string()
def verify(self, password, encoded):
"""
Checks if the given password is correct
"""
raise NotImplementedError()
def encode(self, password, salt):
"""
Creates an encoded database value
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError()
def safe_summary(self, encoded):
"""
Returns a summary of safe values
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError()
class PBKDF2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the PBKDF2 algorithm (recommended)
Configured to use PBKDF2 + HMAC + SHA256 with 10000 iterations.
The result is a 64 byte binary string. Iterations may be changed
safely but you must rename the algorithm if you change SHA256.
"""
algorithm = "pbkdf2_sha256"
iterations = 10000
digest = hashlib.sha256
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt, iterations=None):
assert password
assert salt and '$' not in salt
if not iterations:
iterations = self.iterations
hash = pbkdf2(password, salt, iterations, digest=self.digest)
hash = hash.encode('base64').strip()
return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt, int(iterations))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('iterations'), iterations),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(hash)),
])
class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher):
"""
Alternate PBKDF2 hasher which uses SHA1, the default PRF
recommended by PKCS #5. This is compatible with other
implementations of PBKDF2, such as openssl's
PKCS5_PBKDF2_HMAC_SHA1().
"""
algorithm = "pbkdf2_sha1"
digest = hashlib.sha1
class BCryptPasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the bcrypt algorithm (recommended)
This is considered by many to be the most secure algorithm but you
must first install the py-bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
"""
algorithm = "bcrypt"
library = ("py-bcrypt", "bcrypt")
rounds = 12
def salt(self):
bcrypt = self._load_library()
return bcrypt.gensalt(self.rounds)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt):
bcrypt = self._load_library()
data = bcrypt.hashpw(password, salt)
return "%s$%s" % (self.algorithm, data)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
algorithm, data = encoded.split('$', 1)
assert algorithm == self.algorithm
bcrypt = self._load_library()
return constant_time_compare(data, bcrypt.hashpw(password, data))
def safe_summary(self, encoded):
algorithm, empty, algostr, work_factor, data = encoded.split('$', 4)
assert algorithm == self.algorithm
salt, checksum = data[:22], data[22:]
return SortedDict([
(_('algorithm'), algorithm),
(_('work factor'), work_factor),
(_('salt'), mask_hash(salt)),
(_('checksum'), mask_hash(checksum)),
])
class SHA1PasswordHasher(BasePasswordHasher):
"""
The SHA1 password hashing algorithm (not recommended)
"""
algorithm = "sha1"
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt):
assert password
assert salt and '$' not in salt
hash = hashlib.sha1(salt + password).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class MD5PasswordHasher(BasePasswordHasher):
"""
The Salted MD5 password hashing algorithm (not recommended)
"""
algorithm = "md5"
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt):
assert password
assert salt and '$' not in salt
hash = hashlib.md5(salt + password).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class UnsaltedSHA1PasswordHasher(BasePasswordHasher):
"""
Very insecure algorithm that you should *never* use; stores SHA1 hashes
with an empty salt.
This class is implemented because Django used to accept such password
hashes. Some older Django installs still have these values lingering
around so we need to handle and upgrade them properly.
"""
algorithm = "unsalted_sha1"
def salt(self):
return ''
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt):
assert salt == ''
hash = hashlib.sha1(password).hexdigest()
return 'sha1$$%s' % hash
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
assert encoded.startswith('sha1$$')
hash = encoded[6:]
return SortedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(hash)),
])
class UnsaltedMD5PasswordHasher(BasePasswordHasher):
"""
Incredibly insecure algorithm that you should *never* use; stores unsalted
MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an
empty salt.
This class is implemented because Django used to store passwords this way
and to accept such password hashes. Some older Django installs still have
these values lingering around so we need to handle and upgrade them
properly.
"""
algorithm = "unsalted_md5"
def salt(self):
return ''
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt):
assert salt == ''
return hashlib.md5(password).hexdigest()
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
if len(encoded) == 37 and encoded.startswith('md5$$'):
encoded = encoded[5:]
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
return SortedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(encoded, show=3)),
])
class CryptPasswordHasher(BasePasswordHasher):
"""
Password hashing using UNIX crypt (not recommended)
The crypt module is not supported on all platforms.
"""
algorithm = "crypt"
library = "crypt"
def salt(self):
return get_random_string(2)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def encode(self, password, salt):
crypt = self._load_library()
assert len(salt) == 2
data = crypt.crypt(password, salt)
# we don't need to store the salt, but Django used to do this
return "%s$%s$%s" % (self.algorithm, '', data)
@password_max_length(MAXIMUM_PASSWORD_LENGTH)
def verify(self, password, encoded):
crypt = self._load_library()
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return constant_time_compare(data, crypt.crypt(password, data))
def safe_summary(self, encoded):
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return SortedDict([
(_('algorithm'), algorithm),
(_('salt'), salt),
(_('hash'), mask_hash(data, show=3)),
])
| gpl-3.0 | 379,891,299,878,125,100 | 32.477322 | 85 | 0.630129 | false |
srajag/contrail-controller | src/config/common/svc_info.py | 9 | 2236 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
_MGMT_STR = "management"
_LEFT_STR = "left"
_RIGHT_STR = "right"
_SVC_VN_MGMT = "svc-vn-mgmt"
_SVC_VN_LEFT = "svc-vn-left"
_SVC_VN_RIGHT = "svc-vn-right"
_VN_MGMT_SUBNET_CIDR = '10.250.1.0/24'
_VN_LEFT_SUBNET_CIDR = '10.250.2.0/24'
_VN_RIGHT_SUBNET_CIDR = '10.250.3.0/24'
_VN_SNAT_PREFIX_NAME = 'snat-si-left'
_VN_SNAT_SUBNET_CIDR = '100.64.0.0/29'
_CHECK_SVC_VM_HEALTH_INTERVAL = 30
_VM_INSTANCE_TYPE = 'virtual-machine'
_NETNS_INSTANCE_TYPE = 'network-namespace'
_SNAT_SVC_TYPE = 'source-nat'
_LB_SVC_TYPE = 'loadbalancer'
_ACTIVE_LOCAL_PREFERENCE = 200
_STANDBY_LOCAL_PREFERENCE = 100
# Version from the vrouter agent can manage service instances
_VROUTER_NETNS_SUPPORTED_VERSION = '1.10'
def get_management_if_str():
return _MGMT_STR
def get_left_if_str():
return _LEFT_STR
def get_right_if_str():
return _RIGHT_STR
def get_if_str_list():
if_str_list = []
if_str_list.append(get_management_if_str())
if_str_list.append(get_left_if_str())
if_str_list.append(get_right_if_str())
return if_str_list
def get_management_vn_name():
return _SVC_VN_MGMT
def get_left_vn_name():
return _SVC_VN_LEFT
def get_right_vn_name():
return _SVC_VN_RIGHT
def get_shared_vn_list():
shared_vn_list = []
shared_vn_list.append(get_management_vn_name())
shared_vn_list.append(get_left_vn_name())
shared_vn_list.append(get_right_vn_name())
return shared_vn_list
def get_management_vn_subnet():
return _VN_MGMT_SUBNET_CIDR
def get_left_vn_subnet():
return _VN_LEFT_SUBNET_CIDR
def get_right_vn_subnet():
return _VN_RIGHT_SUBNET_CIDR
def get_snat_left_vn_prefix():
return _VN_SNAT_PREFIX_NAME
def get_snat_left_subnet():
return _VN_SNAT_SUBNET_CIDR
def get_vm_instance_type():
return _VM_INSTANCE_TYPE
def get_netns_instance_type():
return _NETNS_INSTANCE_TYPE
def get_snat_service_type():
return _SNAT_SVC_TYPE
def get_lb_service_type():
return _LB_SVC_TYPE
def get_vm_health_interval():
return _CHECK_SVC_VM_HEALTH_INTERVAL
def get_active_preference():
return _ACTIVE_LOCAL_PREFERENCE
def get_standby_preference():
return _STANDBY_LOCAL_PREFERENCE
| apache-2.0 | 3,348,741,132,744,944,600 | 21.585859 | 64 | 0.681574 | false |
google/starthinker | dags/sheets_copy_dag.py | 1 | 4462 | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
Sheet Copy
Copy tab from a sheet to a sheet.
- Provide the full edit URL for both sheets.
- Provide the tab name for both sheets.
- The tab will only be copied if it does not already exist.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'auth_read': 'user', # Credentials used for reading data.
'from_sheet': '',
'from_tab': '',
'to_sheet': '',
'to_tab': '',
}
RECIPE = {
'tasks': [
{
'sheets': {
'auth': {
'field': {
'name': 'auth_read',
'kind': 'authentication',
'order': 1,
'default': 'user',
'description': 'Credentials used for reading data.'
}
},
'template': {
'sheet': {
'field': {
'name': 'from_sheet',
'kind': 'string',
'order': 1,
'default': ''
}
},
'tab': {
'field': {
'name': 'from_tab',
'kind': 'string',
'order': 2,
'default': ''
}
}
},
'sheet': {
'field': {
'name': 'to_sheet',
'kind': 'string',
'order': 3,
'default': ''
}
},
'tab': {
'field': {
'name': 'to_tab',
'kind': 'string',
'order': 4,
'default': ''
}
}
}
}
]
}
dag_maker = DAG_Factory('sheets_copy', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
| apache-2.0 | 1,329,078,104,768,230,100 | 29.353741 | 145 | 0.53429 | false |
jtattermusch/grpc | src/python/grpcio/grpc/experimental/session_cache.py | 27 | 1533 | # Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gRPC's APIs for TLS Session Resumption support"""
from grpc._cython import cygrpc as _cygrpc
def ssl_session_cache_lru(capacity):
"""Creates an SSLSessionCache with LRU replacement policy
Args:
capacity: Size of the cache
Returns:
An SSLSessionCache with LRU replacement policy that can be passed as a value for
the grpc.ssl_session_cache option to a grpc.Channel. SSL session caches are used
to store session tickets, which clients can present to resume previous TLS sessions
with a server.
"""
return SSLSessionCache(_cygrpc.SSLSessionCacheLRU(capacity))
class SSLSessionCache(object):
"""An encapsulation of a session cache used for TLS session resumption.
Instances of this class can be passed to a Channel as values for the
grpc.ssl_session_cache option
"""
def __init__(self, cache):
self._cache = cache
def __int__(self):
return int(self._cache)
| apache-2.0 | -8,292,878,371,721,113,000 | 33.066667 | 89 | 0.72407 | false |
jstoja/TsinghuaMailSystem | src/com/mailsystem/__main__.py | 1 | 1654 | #!/usr/bin/env python
# coding: utf-8
import sys
import json
import argparse
from bottle import run
import src.com.mailsystem.api.routes as api
import src.com.mailsystem.codes as codes
from src.com.mailsystem.orm.Database import Database
from src.com.mailsystem.populate import populate_db
def read_config(setup_file):
try:
with open(setup_file) as f:
setup = json.load(f)
except Exception as e:
print(
"Can't process setup file '{}' : {}", setup_file, e
)
sys.exit(1)
return setup
def connect_dbs(setup):
databases = {}
databases['users'] = Database(
'thumailusers',
setup['users']['uri']
)
for db in setup['departments']:
infos = setup['departments'][db]
if db not in databases:
databases[db] = Database(
db,
infos['uri']
)
return databases
if __name__ == '__main__':
parser = argparse.ArgumentParser("thumail")
parser.add_argument(
'--setup', default='setup.json',
help="A JSON file with DB's configuration (defaults to setup.json)"
)
parser.add_argument(
'--populate', action='store_true', default=False,
help="Generate data to populate the databases"
)
args = parser.parse_args()
setup_file = args.setup
populate = args.populate
setup = read_config(setup_file)
dbs = connect_dbs(setup)
api.app.dbs = dbs
codes.codes = setup['codes']
codes.rev_codes = {v: k for k, v in setup['codes'].items()}
if populate:
populate_db(dbs)
run(api.app, host='0.0.0.0', port=8080)
| mit | -3,172,127,379,199,545,000 | 24.446154 | 75 | 0.598549 | false |
openstack/keystone | keystone/common/sql/expand_repo/versions/066_expand_add_role_and_project_option_tables.py | 2 | 1880 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
from keystone.common import sql as ks_sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
role_table = sql.Table('role', meta, autoload=True)
project_table = sql.Table('project', meta, autoload=True)
role_resource_options_table = sql.Table(
'role_option',
meta,
sql.Column('role_id', sql.String(64), sql.ForeignKey(role_table.c.id,
ondelete='CASCADE'), nullable=False, primary_key=True),
sql.Column('option_id', sql.String(4), nullable=False,
primary_key=True),
sql.Column('option_value', ks_sql.JsonBlob, nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
project_resource_options_table = sql.Table(
'project_option',
meta,
sql.Column('project_id', sql.String(64),
sql.ForeignKey(project_table.c.id, ondelete='CASCADE'),
nullable=False, primary_key=True),
sql.Column('option_id', sql.String(4), nullable=False,
primary_key=True),
sql.Column('option_value', ks_sql.JsonBlob, nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
project_resource_options_table.create()
role_resource_options_table.create()
| apache-2.0 | 1,087,692,778,344,981,500 | 35.862745 | 77 | 0.65266 | false |
SecurityFTW/cs-suite | tools/Scout2/AWSScout2/services/cloudformation.py | 3 | 1472 | # -*- coding: utf-8 -*-
import json
from AWSScout2.configs.regions import RegionalServiceConfig, RegionConfig, api_clients
########################################
# CloudFormationRegionConfig
########################################
class CloudFormationRegionConfig(RegionConfig):
"""
CloudFormation configuration for a single AWS region
"""
def parse_stack(self, global_params, region, stack):
"""
Parse a single stack and fetch additional attributes
:param global_params: Parameters shared for all regions
:param region: Name of the AWS region
:param stack_url: URL of the AWS stack
"""
stack['id'] = stack.pop('StackId')
stack['name'] = stack.pop('StackName')
stack_policy = api_clients[region].get_stack_policy(StackName = stack['name'])
if 'StackPolicyBody' in stack_policy:
stack['policy'] = json.loads(stack_policy['StackPolicyBody'])
self.stacks[stack['name']] = stack
########################################
# CloudFormationConfig
########################################
class CloudFormationConfig(RegionalServiceConfig):
"""
CloudFormation configuration for all AWS regions
"""
region_config_class = CloudFormationRegionConfig
def __init__(self, service_metadata, thread_config = 4):
super(CloudFormationConfig, self).__init__(service_metadata, thread_config)
| gpl-3.0 | -305,419,418,660,980,740 | 30.319149 | 86 | 0.58288 | false |
xuweiliang/Codelibrary | openstack_dashboard/api/authcode_back.py | 1 | 2953 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import base64
import hashlib
class AuthCode(object):
@classmethod
def code_init(cls, cipher):
try:
encrypted_data = getattr(cipher, 'encrypted_license')
encrypted_str = encrypted_data.strip()
decryption = None
if hasattr(cipher, 'system_uuid'):
key, pwd= cipher.system_uuid.split(":")
if getattr(cipher, 'disabled'):
decryption=cls.decode(encrypted_str, 'fr1e54b8t4n4m47')
else:
decryption=cls.decode(encrypted_str, key)
return eval(decryption)
except Exception:
return None
@classmethod
def decode(cls, string, key, expiry=0):
try:
return cls._auth_code(string, 'DECODE', key, expiry)
except:
return
@staticmethod
def _md5(source_string):
return hashlib.md5(source_string).hexdigest()
@classmethod
def _auth_code(cls, input_string, operation='DECODE', key='', expiry=3600):
rand_key_length = 4
key = cls._md5(key)
key_a = cls._md5(key[:16])
key_b = cls._md5(key[16:])
if rand_key_length:
if operation == 'DECODE':
key_c = input_string[:rand_key_length]
else:
key_c = cls._md5(str(time.time()))[-rand_key_length:]
else:
key_c = ''
crypt_key = key_a + cls._md5(key_a + key_c)
if operation == 'DECODE':
handled_string = base64.b64decode(input_string[rand_key_length:])
else:
expiration_time = expiry + int(time.time) if expiry else 0
handled_string = '%010d' % expiration_time + cls._md5(input_string + key_b)[:16] + input_string
rand_key = list()
for i in xrange(256):
rand_key.append(ord(crypt_key[i % len(crypt_key)]))
# ----------------------------------------------------------
box = range(256)
j = 0
for i in xrange(256):
j = (j + box[i] + rand_key[i]) % 256
tmp = box[i]
box[i] = box[j]
box[j] = tmp
result = ''
a = 0
j = 0
for i in xrange(len(handled_string)):
a = (a + 1) % 256
j = (j + box[a]) % 256
tmp = box[a]
box[a] = box[j]
box[j] = tmp
result += chr(ord(handled_string[i])^(box[(box[a]+box[j])%256]))
if operation == 'DECODE':
if (int(result[:10]) == 0 or (int(result[:10]) - time.time() > 0)) and \
(result[10:26] == cls._md5(result[26:] + key_b)[:16]):
output_string = result[26:]
else:
output_string = ''
else:
output_string = key_c + base64.b64encode(result)
return output_string
| apache-2.0 | -6,609,390,570,486,604,000 | 28.237624 | 107 | 0.483915 | false |
ashhher3/cvxpy | cvxpy/atoms/elementwise/elementwise.py | 10 | 1354 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import abc
from cvxpy.atoms.atom import Atom
import operator as op
if sys.version_info >= (3, 0):
from functools import reduce
class Elementwise(Atom):
""" Abstract base class for elementwise atoms. """
__metaclass__ = abc.ABCMeta
def shape_from_args(self):
"""Shape is the same as the sum of the arguments.
"""
return reduce(op.add, [arg._dcp_attr.shape for arg in self.args])
def validate_arguments(self):
"""
Verify that all the shapes are the same
or can be promoted.
"""
shape = self.args[0]._dcp_attr.shape
for arg in self.args[1:]:
shape = shape + arg._dcp_attr.shape
| gpl-3.0 | 6,384,567,152,979,306,000 | 29.772727 | 73 | 0.694239 | false |
JackKelly/neuralnilm_prototype | scripts/e127.py | 2 | 4534 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
"""
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
],
layer_changes={
501: {
'remove_from': -3,
'new_layers':
[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
}
}
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=5000)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
| mit | -3,386,471,041,713,052,000 | 24.47191 | 92 | 0.576533 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.