repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Tennyson53/SUR | magnum/common/pythonk8sclient/client/models/V1beta3_NodeList.py | 15 | 2562 | #!/usr/bin/env python
"""
Copyright 2015 Reverb Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class V1beta3_NodeList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'apiVersion': 'str',
'items': 'list[V1beta3_Node]',
'kind': 'str',
'resourceVersion': 'str',
'selfLink': 'str'
}
self.attributeMap = {
'apiVersion': 'apiVersion',
'items': 'items',
'kind': 'kind',
'resourceVersion': 'resourceVersion',
'selfLink': 'selfLink'
}
#version of the schema the object should have
self.apiVersion = None # str
#list of nodes
self.items = None # list[V1beta3_Node]
#kind of object, in CamelCase; cannot be updated
self.kind = None # str
#string that identifies the internal version of this object that can be used by clients to determine when objects have changed; populated by the system, read-only; value must be treated as opaque by clients and passed unmodified back to the server: https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api-conventions.md#concurrency-control-and-consistency
self.resourceVersion = None # str
#URL for the object; populated by the system, read-only
self.selfLink = None # str
| apache-2.0 | -9,012,315,937,409,428,000 | 30.62963 | 378 | 0.569477 | false |
ccellis/WHACK2016 | flask/lib/python2.7/site-packages/migrate/versioning/pathed.py | 146 | 2059 | """
A path/directory class.
"""
import os
import shutil
import logging
from migrate import exceptions
from migrate.versioning.config import *
from migrate.versioning.util import KeyedInstance
log = logging.getLogger(__name__)
class Pathed(KeyedInstance):
"""
A class associated with a path/directory tree.
Only one instance of this class may exist for a particular file;
__new__ will return an existing instance if possible
"""
parent = None
@classmethod
def _key(cls, path):
return str(path)
def __init__(self, path):
self.path = path
if self.__class__.parent is not None:
self._init_parent(path)
def _init_parent(self, path):
"""Try to initialize this object's parent, if it has one"""
parent_path = self.__class__._parent_path(path)
self.parent = self.__class__.parent(parent_path)
log.debug("Getting parent %r:%r" % (self.__class__.parent, parent_path))
self.parent._init_child(path, self)
def _init_child(self, child, path):
"""Run when a child of this object is initialized.
Parameters: the child object; the path to this object (its
parent)
"""
@classmethod
def _parent_path(cls, path):
"""
Fetch the path of this object's parent from this object's path.
"""
# os.path.dirname(), but strip directories like files (like
# unix basename)
#
# Treat directories like files...
if path[-1] == '/':
path = path[:-1]
ret = os.path.dirname(path)
return ret
@classmethod
def require_notfound(cls, path):
"""Ensures a given path does not already exist"""
if os.path.exists(path):
raise exceptions.PathFoundError(path)
@classmethod
def require_found(cls, path):
"""Ensures a given path already exists"""
if not os.path.exists(path):
raise exceptions.PathNotFoundError(path)
def __str__(self):
return self.path
| bsd-3-clause | -8,203,305,717,697,806,000 | 26.453333 | 80 | 0.606119 | false |
tomMoulard/python-projetcs | scripts3/say_chat.py | 1 | 2153 | __author__ = "Brian Lenihan <[email protected]"
__copyright__ = "Copyright (c) 2012 Python for Android Project"
__license__ = "Apache License, Version 2.0"
import logging
import sl4a
from pyxmpp2.jid import JID
from pyxmpp2.client import Client
from pyxmpp2.settings import XMPPSettings
from pyxmpp2.interfaces import XMPPFeatureHandler
from pyxmpp2.interfaces import EventHandler, event_handler, QUIT
from pyxmpp2.interfaces import message_stanza_handler
from pyxmpp2.streamevents import DisconnectedEvent
from pyxmpp2.ext.version import VersionProvider
logging.basicConfig(level = logging.INFO)
xmpp_trace = False
class SayChat(EventHandler, XMPPFeatureHandler):
def __init__(self):
self.droid = sl4a.Android()
settings = XMPPSettings({"software_name": "Say Chat"})
settings["jid"] = self.droid.dialogGetInput("Google Talk Username").result
settings["password"] = self.droid.dialogGetInput("Google Talk Password").result
settings["server"] = "talk.google.com"
settings["starttls"] = True
self.client = Client(
JID(settings["jid"]),
[self, VersionProvider(settings)],
settings)
def connect(self):
self.client.connect()
self.client.run()
def disconnect(self):
self.client.disconnect()
self.client.run(timeout = 2)
@message_stanza_handler()
def handle_message(self, stanza):
self.droid.ttsSpeak(
"{!s} says {!s}".format(stanza.from_jid.as_unicode(),
stanza.body))
return ""
@event_handler(DisconnectedEvent)
def handle_disconnected(self, event):
return QUIT
@event_handler()
def handle_all(self, event):
"""If it's not logged, it didn't happen."""
logging.info("-- {}".format(event))
def run(self):
try:
self.connect()
except KeyboardInterrupt:
self.disconnect()
if xmpp_trace:
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
for logger in ("pyxmpp2.IN", "pyxmpp2.OUT"):
logger = logging.getLogger(logger)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.propagate = False
saychat = SayChat()
saychat.run()
| apache-2.0 | -4,344,941,393,585,683,500 | 28.493151 | 85 | 0.694844 | false |
vlinhd11/vlinhd11-android-scripting | python/src/Lib/test/pydocfodder.py | 194 | 6329 | """Something just to look at via pydoc."""
import types
class A_classic:
"A classic class."
def A_method(self):
"Method defined in A."
def AB_method(self):
"Method defined in A and B."
def AC_method(self):
"Method defined in A and C."
def AD_method(self):
"Method defined in A and D."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
class B_classic(A_classic):
"A classic class, derived from A_classic."
def AB_method(self):
"Method defined in A and B."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def B_method(self):
"Method defined in B."
def BC_method(self):
"Method defined in B and C."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
class C_classic(A_classic):
"A classic class, derived from A_classic."
def AC_method(self):
"Method defined in A and C."
def ABC_method(self):
"Method defined in A, B and C."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BC_method(self):
"Method defined in B and C."
def BCD_method(self):
"Method defined in B, C and D."
def C_method(self):
"Method defined in C."
def CD_method(self):
"Method defined in C and D."
class D_classic(B_classic, C_classic):
"A classic class, derived from B_classic and C_classic."
def AD_method(self):
"Method defined in A and D."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
def CD_method(self):
"Method defined in C and D."
def D_method(self):
"Method defined in D."
class A_new(object):
"A new-style class."
def A_method(self):
"Method defined in A."
def AB_method(self):
"Method defined in A and B."
def AC_method(self):
"Method defined in A and C."
def AD_method(self):
"Method defined in A and D."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def A_classmethod(cls, x):
"A class method defined in A."
A_classmethod = classmethod(A_classmethod)
def A_staticmethod():
"A static method defined in A."
A_staticmethod = staticmethod(A_staticmethod)
def _getx(self):
"A property getter function."
def _setx(self, value):
"A property setter function."
def _delx(self):
"A property deleter function."
A_property = property(fdel=_delx, fget=_getx, fset=_setx,
doc="A sample property defined in A.")
A_int_alias = int
class B_new(A_new):
"A new-style class, derived from A_new."
def AB_method(self):
"Method defined in A and B."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def B_method(self):
"Method defined in B."
def BC_method(self):
"Method defined in B and C."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
class C_new(A_new):
"A new-style class, derived from A_new."
def AC_method(self):
"Method defined in A and C."
def ABC_method(self):
"Method defined in A, B and C."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BC_method(self):
"Method defined in B and C."
def BCD_method(self):
"Method defined in B, C and D."
def C_method(self):
"Method defined in C."
def CD_method(self):
"Method defined in C and D."
class D_new(B_new, C_new):
"""A new-style class, derived from B_new and C_new.
"""
def AD_method(self):
"Method defined in A and D."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
def CD_method(self):
"Method defined in C and D."
def D_method(self):
"Method defined in D."
class FunkyProperties(object):
"""From SF bug 472347, by Roeland Rengelink.
Property getters etc may not be vanilla functions or methods,
and this used to make GUI pydoc blow up.
"""
def __init__(self):
self.desc = {'x':0}
class get_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print 'Get called', self, inst
return inst.desc[self.attr]
class set_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst, val):
print 'Set called', self, inst, val
inst.desc[self.attr] = val
class del_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print 'Del called', self, inst
del inst.desc[self.attr]
x = property(get_desc('x'), set_desc('x'), del_desc('x'), 'prop x')
submodule = types.ModuleType(__name__ + '.submodule',
"""A submodule, which should appear in its parent's summary""")
| apache-2.0 | 134,855,285,307,707,060 | 28.300926 | 71 | 0.570232 | false |
frreiss/tensorflow-fred | tensorflow/python/autograph/pyct/static_analysis/__init__.py | 27 | 1375 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Static information resolution.
This module contains utilities to help annotate AST nodes with as much runtime
information as can be possibly extracted without actually executing the code,
under that assumption that the context in which the code will run is known.
Overall, the different analyses have the functions listed below:
* activity: inventories symbols read, written to, params, etc. at different
levels
* liveness, reaching_definitions: dataflow analyses based on the program's CFG
and using the symbol information gathered by activity analysis
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| apache-2.0 | 4,562,122,267,984,788,000 | 43.354839 | 80 | 0.731636 | false |
ramielrowe/magnum | magnum/common/pythonk8sclient/client/models/V1beta3_ServiceStatus.py | 15 | 1172 | #!/usr/bin/env python
"""
Copyright 2015 Reverb Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class V1beta3_ServiceStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
}
self.attributeMap = {
}
| apache-2.0 | 7,261,299,544,535,258,000 | 30.675676 | 97 | 0.653584 | false |
manjunaths/tensorflow | tensorflow/contrib/framework/python/framework/tensor_util.py | 37 | 13232 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
__all__ = [
'assert_same_float_dtype',
'assert_scalar',
'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor',
'is_tensor',
'reduce_sum_n',
'remove_squeezable_dimensions',
'with_shape',
'with_same_shape']
convert_to_tensor_or_sparse_tensor = (
sparse_tensor.convert_to_tensor_or_sparse_tensor)
def _assert_same_base_type(items, expected_type=None):
r"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be `dtypes.float32` or `dtypes.float64`. If neither `tensors` nor
`dtype` is supplied, default to `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected float, got %s.' % dtype)
return dtype
def assert_scalar_int(tensor, name=None):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: `Tensor` to test.
name: Name of the op and of the new `Tensor` if one is created.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of type `tf.int32` or `tf.int64`.
"""
with ops.name_scope(name, 'assert_scalar_int', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor)
data_type = tensor.dtype
if data_type.base_dtype not in [dtypes.int32, dtypes.int64]:
raise ValueError('Unexpected type %s for %s.' % (data_type, tensor.name))
return assert_scalar(tensor, name=name_scope)
def assert_scalar(tensor, name=None):
with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor, name=name_scope)
shape = tensor.get_shape()
if shape.ndims != 0:
raise ValueError('Unexpected shape %s for %s.' % (shape, tensor.name))
return tensor
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
with ops.name_scope(name, 'reduce_sum_n', tensors) as name_scope:
tensors = [
math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
return math_ops.add_n(tensors, name=name_scope)
def remove_squeezable_dimensions(predictions, labels, name=None):
"""Squeeze last dim if ranks of `predictions` and `labels` differ by 1.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Label values, a `Tensor` whose dimensions match `predictions`.
name: Name of the op.
Returns:
Tuple of `predictions` and `labels`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[predictions, labels]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == -1:
labels = array_ops.squeeze(labels, [-1])
elif rank_diff == 1:
predictions = array_ops.squeeze(predictions, [-1])
return predictions, labels
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(-1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return predictions, labels
def _all_equal(tensor0, tensor1):
with ops.name_scope('all_equal', values=[tensor0, tensor1]) as scope:
return math_ops.reduce_all(
math_ops.equal(tensor0, tensor1, name='equal'), name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('is_rank', values=[actual_tensor]) as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
Tuple of (actual_tensor, label_tensor), possibly with assert ops added.
"""
with ops.name_scope('%s/' % tensor.op.name, values=[expected_tensor, tensor]):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def is_tensor(x):
"""Check for tensor types.
Check whether an object is a tensor. Equivalent to
`isinstance(x, [tf.Tensor, tf.SparseTensor, tf.Variable])`.
Args:
x: An python object to check.
Returns:
`True` if `x` is a tensor, `False` if not.
"""
tensor_types = (ops.Tensor, sparse_tensor.SparseTensor, variables.Variable)
return isinstance(x, tensor_types)
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if not actual_shape.is_fully_defined() or is_tensor(expected_shape):
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
if not is_tensor(expected_shape) and (len(expected_shape) < 1):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
| apache-2.0 | 8,406,202,043,612,333,000 | 35.054496 | 80 | 0.67367 | false |
cwisecarver/osf.io | scripts/osfstorage/migrate_to_generic.py | 5 | 6466 | from __future__ import unicode_literals
import sys
import logging
import datetime
from django.utils import timezone
from modularodm import Q
from modularodm.storage.base import KeyExistsException
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
from website.files import models
from website.app import init_app
from addons.osfstorage import model as osfstorage_model
NOW = timezone.now()
logger = logging.getLogger(__name__)
def paginated(model, query=None, increment=200):
last_id = ''
pages = (model.find(query).count() / increment) + 1
for i in xrange(pages):
q = Q('_id', 'gt', last_id)
if query:
q &= query
page = list(model.find(q).limit(increment))
for item in page:
yield item
if page:
last_id = item._id
def do_migration():
logger.info('Migration: OsfStorageFileNode -> FileNode')
migrate_filenodes()
logger.info('Migration: OsfStorageTrashedFileNode -> TrashedFileNode')
migrate_trashedfilenodes()
logger.info('Checking that all Files have been migrated...')
diff = osfstorage_model.OsfStorageFileNode.find().count() - models.FileNode.find().count()
if diff > 0:
logger.error('Missing {} FileNodes; canceling transaction')
raise Exception('{} unmigrated FileNodes'.format(diff))
logger.info('Checking that all File versions have been migrated...')
diff = osfstorage_model.OsfStorageFileVersion.find().count() - models.FileVersion.find().count()
if diff != 0:
logger.error('{} OsfStorageFileVersions did not get migrated'.format(diff))
logger.error('This is most likely because they are orphaned')
logger.error('This is not a show stopper; The migration was still successful')
else:
logger.info('Migration successful')
def migrate_trashedfilenodes():
for trashed in osfstorage_model.OsfStorageTrashedFileNode.find():
logger.debug('Migrating OsfStorageTrashedFileNode {}'.format(trashed._id))
if trashed.node_settings is None:
logger.warning('OsfStorageTrashedFileNode {} has no node_settings; skipping'.format(trashed._id))
continue
parent_id = trashed.to_storage()['parent']
parent = osfstorage_model.OsfStorageTrashedFileNode.load(parent_id) or osfstorage_model.OsfStorageFileNode.load(parent_id)
if parent:
if isinstance(parent, osfstorage_model.OsfStorageFileNode):
parent = (parent._id, 'storedfilenode')
else:
parent = (parent._id, 'trashedfilenode')
models.TrashedFileNode(
_id=trashed._id,
versions=translate_versions(trashed.versions),
node=trashed.node_settings.owner,
parent=parent,
is_file=trashed.kind == 'file',
provider='osfstorage',
name=trashed.name,
path='/' + trashed._id + ('' if trashed.kind == 'file' else '/'),
materialized_path=''
).save()
def migrate_filenodes():
for node_settings in paginated(osfstorage_model.OsfStorageNodeSettings):
if node_settings.owner is None:
logger.warning('NodeSettings {} has no parent; skipping'.format(node_settings._id))
continue
logger.info('Migrating files for {!r}'.format(node_settings.owner))
listing = []
for filenode in osfstorage_model.OsfStorageFileNode.find(Q('node_settings', 'eq', node_settings._id)):
logger.debug('Migrating OsfStorageFileNode {}'.format(filenode._id))
versions = translate_versions(filenode.versions)
if filenode.is_file and not filenode.node.is_deleted:
if not filenode.versions:
logger.warning('File {!r} has no versions'.format(filenode))
elif not versions:
logger.warning('{!r} is a file with no translatable versions'.format(filenode))
new_node = models.StoredFileNode(
_id=filenode._id,
versions=versions,
node=node_settings.owner,
parent=None if not filenode.parent else filenode.parent._id,
is_file=filenode.kind == 'file',
provider='osfstorage',
name=filenode.name,
last_touched=NOW
)
# Wrapped's save will populate path and materialized_path
new_node.save()
listing.append(new_node)
assert node_settings.get_root()
for x in listing:
# Make sure everything transfered properly
if x.to_storage()['parent']:
assert x.parent, '{!r}\'s parent {} does not exist'.format(x, x.to_storage()['parent'])
def translate_versions(versions):
translated = []
for index, version in enumerate(versions):
if version is None:
raise Exception('Version {} missing from database'.format(version))
if not version.metadata or not version.location:
logger.error('Version {} missing metadata or location'.format(version))
continue
translated.append(translate_version(version, index))
return translated
def translate_version(version, index):
version = models.FileVersion(
_id=version._id,
creator=version.creator,
identifier=index + 1,
date_created=version.date_created,
location=version.location,
metadata=version.metadata,
size=version.size,
content_type=version.content_type,
date_modified=version.date_modified,
)
try:
version.save()
except KeyExistsException:
version = models.FileVersion.load(version._id)
return version
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
main(dry=dry)
| apache-2.0 | 92,737,624,281,065,180 | 34.922222 | 130 | 0.633003 | false |
storborg/manhattan | manhattan/tests/test_middleware.py | 1 | 5386 | from __future__ import absolute_import, division, print_function
import re
from unittest import TestCase
from webob import Request, Response
from webtest import TestApp, TestRequest
from manhattan.middleware import ManhattanMiddleware
from manhattan.record import Record
from manhattan.log.memory import MemoryLog
class SampleApp(object):
def __call__(self, environ, start_response):
req = Request(environ)
if req.path_info.endswith('.txt'):
s = 'Hello %s' % req.path_info
resp = Response(s)
resp.content_type = 'text/plain'
elif req.path_info.endswith('.iter'):
resp = Response()
s = 'Hello %s' % req.path_info.encode('ascii')
def app_iter(sample):
for piece in ('<html><body>', sample, '</body>', '</html>'):
yield piece
self.consumed_iter = True
yield ' '
self.consumed_iter = False
resp.content_type = 'text/html'
resp.app_iter = app_iter(s)
else:
s = '<html><body><h1>Hello %s</h1></body></html>' % req.path_info
resp = Response(s)
resp.content_type = 'text/html'
return resp(environ, start_response)
log = MemoryLog()
host_map = {'localhost': 3,
'example.com': 5}
inner_app = SampleApp()
wrapped_app = ManhattanMiddleware(inner_app, log, 'secret', host_map=host_map)
app = TestApp(wrapped_app)
class TestMiddleware(TestCase):
def setUp(self):
app.reset()
log.purge()
def process(self):
records = list(log.process())
self.assertEqual(len(records), 1)
record = Record.from_list(records[0][0])
return record
def test_request(self):
resp = app.get('/')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertEqual(record.site_id, '3')
first_vid = record.vid
m = re.search('<img (.+)src="(.+)" alt="" />', resp.body)
pixel_path = m.group(2)
resp = app.get(pixel_path)
self.assertEqual(resp.content_type, 'image/gif')
record = self.process()
self.assertEqual(record.key, 'pixel')
self.assertEqual(record.site_id, '3')
self.assertEqual(first_vid, record.vid)
resp = app.get('/foo')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertTrue(record.url.endswith('/foo'))
self.assertEqual(record.site_id, '3')
self.assertEqual(first_vid, record.vid)
def test_host_map(self):
resp = app.get('/hello', extra_environ={'HTTP_HOST': 'example.com'})
self.assertEqual(resp.content_type, 'text/html')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertTrue(record.url.endswith('/hello'))
self.assertEqual(record.site_id, '5')
def test_unknown_host(self):
resp = app.get('/somepage',
extra_environ={'HTTP_HOST':
'supercalifragilicious.com'})
self.assertEqual(resp.content_type, 'text/html')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertTrue(record.url.endswith('/somepage'))
self.assertEqual(record.site_id, '0')
def test_pixel_req(self):
resp = app.get('/vpixel.gif')
self.assertEqual(resp.content_type, 'image/gif',
'An html response should have a pixel tag.')
def test_non_html_pixel(self):
resp = app.get('/non-html-page.txt')
self.assertNotIn('/vpixel.gif', resp.body,
'A non-html response should not have a pixel tag.')
def test_generator_response(self):
req = Request.blank('/quux.iter')
resp = req.get_response(wrapped_app)
self.assertFalse(inner_app.consumed_iter,
'The generator response has been buffered by '
'middleware before instead of being returned as an '
'iterable.')
self.assertIn('/vpixel.gif', resp.body)
self.assertTrue(inner_app.consumed_iter)
def test_latin1_user_agent(self):
# Example user agent is latin1-encoded, so should be preserved.
sample_ua = '\xc0 \xe0 hello'
app.get('/somepage', extra_environ={'HTTP_USER_AGENT': sample_ua})
record = self.process()
self.assertEqual(record.user_agent, sample_ua.decode('latin1'))
def test_nongetpost_methods_not_processed(self):
app.put('/somepage')
app.delete('/somepage')
app.options('/somepage')
records = list(log.process())
self.assertEqual(len(records), 0)
def test_safari_top_sites_not_counted(self):
app.get('/blah', headers={'X-Purpose': 'preview'})
records = list(log.process())
self.assertEqual(len(records), 0)
def test_signature_mangled(self):
app.get('/')
orig_cookie = app.cookies['manhattan']
# truncate the last 4 chars, which will blow the sig
bad_cookie = orig_cookie[:-4]
bad_request = TestRequest.blank('/', cookies={'manhattan': bad_cookie})
app.request(bad_request)
new_cookie = app.cookies['manhattan']
self.assertNotEqual(bad_cookie, new_cookie)
| mit | 1,058,024,441,326,132,900 | 32.874214 | 79 | 0.585592 | false |
kubeflow/pipelines | components/arena/docker/job_generator.py | 3 | 2817 | import argparse
import datetime
import json
import os
import sys
import logging
import requests
import subprocess
import six
import time
import yaml
from subprocess import Popen,PIPE
from shlex import split
from utils import *
# Generate common options
def generate_options(args):
gpus = args.gpus
cpu = args.cpu
memory = args.memory
tensorboard = args.tensorboard
output_data = args.output_data
data = args.data
env = args.env
tensorboard_image = args.tensorboard_image
tensorboard = str2bool(args.tensorboard)
log_dir = args.log_dir
sync_source = args.sync_source
options = []
if gpus > 0:
options.extend(['--gpus', str(gpus)])
if cpu != '0':
options.extend(['--cpu', str(cpu)])
if memory != '0':
options.extend(['--memory', str(memory)])
if tensorboard_image != "tensorflow/tensorflow:1.12.0":
options.extend(['--tensorboardImage', tensorboard_image])
if tensorboard:
options.append("--tensorboard")
if os.path.isdir(args.log_dir):
options.extend(['--logdir', args.log_dir])
else:
logging.info("skip log dir :{0}".format(args.log_dir))
if len(data) > 0:
for d in data:
if ":" in d:
options.append("--data={0}".format(d))
else:
logging.info("--data={0} is illegal, skip.".format(d))
if len(env) > 0:
for e in env:
if "=" in e:
options.append("--env={0}".format(e))
else:
logging.info("--env={0} is illegal, skip.".format(e))
if len(args.workflow_name) > 0:
options.append("--env=WORKFLOW_NAME={0}".format(args.workflow_name))
if len(args.step_name) > 0:
options.append("--env=STEP_NAME={0}".format(args.step_name))
if len(sync_source) > 0:
if not sync_source.endswith(".git"):
raise ValueError("sync_source must be an http git url")
options.extend(['--sync-mode','git'])
options.extend(['--sync-source',sync_source])
return options
# Generate standalone job
def generate_job_command(args):
name = args.name
image = args.image
commandArray = [
'arena', 'submit', 'tfjob',
'--name={0}'.format(name),
'--image={0}'.format(image),
]
commandArray.extend(generate_options(args))
return commandArray, "tfjob"
# Generate mpi job
def generate_mpjob_command(args):
name = args.name
workers = args.workers
image = args.image
rdma = args.rdma
commandArray = [
'arena', 'submit', 'mpijob',
'--name={0}'.format(name),
'--workers={0}'.format(workers),
'--image={0}'.format(image),
]
if rdma.lower() == "true":
commandArray.append("--rdma")
commandArray.extend(generate_options(args))
return commandArray, "mpijob"
| apache-2.0 | -3,693,265,036,789,507,000 | 23.284483 | 76 | 0.606674 | false |
cgstudiomap/cgstudiomap | main/parts/odoo/addons/account_analytic_plans/__openerp__.py | 264 | 3114 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Multiple Analytic Plans',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
This module allows to use several analytic plans according to the general journal.
==================================================================================
Here multiple analytic lines are created when the invoice or the entries
are confirmed.
For example, you can define the following analytic structure:
-------------------------------------------------------------
* **Projects**
* Project 1
+ SubProj 1.1
+ SubProj 1.2
* Project 2
* **Salesman**
* Eric
* Fabien
Here, we have two plans: Projects and Salesman. An invoice line must be able to write analytic entries in the 2 plans: SubProj 1.1 and Fabien. The amount can also be split.
The following example is for an invoice that touches the two subprojects and assigned to one salesman:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**Plan1:**
* SubProject 1.1 : 50%
* SubProject 1.2 : 50%
**Plan2:**
Eric: 100%
So when this line of invoice will be confirmed, it will generate 3 analytic lines,for one account entry.
The analytic plan validates the minimum and maximum percentage at the time of creation of distribution models.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/accounting',
'depends': ['account', 'account_analytic_default'],
'data': [
'security/account_analytic_plan_security.xml',
'security/ir.model.access.csv',
'account_analytic_plans_view.xml',
'account_analytic_plans_report.xml',
'wizard/analytic_plan_create_model_view.xml',
'wizard/account_crossovered_analytic_view.xml',
'views/report_crossoveredanalyticplans.xml',
'views/account_analytic_plans.xml',
],
'demo': [],
'test': ['test/acount_analytic_plans_report.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,192,563,892,042,360,800 | 36.071429 | 172 | 0.585742 | false |
HybridF5/jacket | jacket/db/migration.py | 1 | 1220 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Database setup and migration commands."""
from oslo_log import log as logging
from jacket.db.sqlalchemy import migration
LOG = logging.getLogger(__name__)
IMPL = migration
def db_sync(version=None, database='main'):
"""Migrate the database to `version` or the most recent version."""
return IMPL.db_sync(version=version, database=database)
def db_version(database='main'):
"""Display the current database version."""
return IMPL.db_version(database=database)
| apache-2.0 | -8,622,411,863,977,372,000 | 31.972973 | 78 | 0.736066 | false |
rahulguptakota/paper-To-Reviewer-Matching-System | citeSentClassifier_gurki.py | 1 | 9088 | import xml.etree.ElementTree as ET
import re
import time
import os, csv
from nltk.tokenize import sent_tokenize
from textblob.classifiers import NaiveBayesClassifier
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from sklearn import naive_bayes
from random import shuffle
import numpy as np
import scipy as sc
import matplotlib.pyplot as plt
from prettyprint import pp
import os, re, pickle
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, precision_score, recall_score, classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC, NuSVC, SVC
from sklearn.grid_search import GridSearchCV
from datetime import datetime as dt
from ipy_table import *
def testClassifier(x_train, y_train, x_test, y_test, clf, name):
"""
this method will first train the classifier on the training data
and will then test the trained classifier on test data.
Finally it will report some metrics on the classifier performance.
Parameters
----------
x_train: np.ndarray
train data matrix
y_train: list
train data label
x_test: np.ndarray
test data matrix
y_test: list
test data label
clf: sklearn classifier object implementing fit() and predict() methods
Returns
-------
metrics: list
[training time, testing time, recall and precision for every class, macro-averaged F1 score]
"""
print(name)
metrics = []
start = dt.now()
clf.fit(x_train, y_train)
end = dt.now()
print 'training time: ', (end - start)
pickle.dump( clf, open( name+".p", "wb" ) )
# add training time to metrics
metrics.append(end-start)
start = dt.now()
yhat = clf.predict(x_test)
end = dt.now()
print 'testing time: ', (end - start)
# add testing time to metrics
metrics.append(end-start)
print 'classification report: '
# print classification_report(y_test, yhat)
pp(classification_report(y_test, yhat))
print 'f1 score'
print f1_score(y_test, yhat, average='macro')
print 'accuracy score'
print accuracy_score(y_test, yhat)
precision = precision_score(y_test, yhat, average=None)
recall = recall_score(y_test, yhat, average=None)
# add precision and recall values to metrics
for p, r in zip(precision, recall):
metrics.append(p)
metrics.append(r)
#add macro-averaged F1 score to metrics
metrics.append(f1_score(y_test, yhat, average='macro'))
print 'confusion matrix:'
print confusion_matrix(y_test, yhat)
# plotting the confusion matrix
plt.imshow(confusion_matrix(y_test, yhat), interpolation='nearest')
# plt.show()
return metrics
stop_words = set(stopwords.words('english'))
clfrNB = naive_bayes.MultinomialNB()
train = []
test = []
rootDir = './data_label'
one_label = 0
zero_label = 0
ps = PorterStemmer()
for dirName, subdirList, fileList in os.walk(rootDir, topdown=False):
try:
# print(dirName)
fo = open(dirName + "/citeSents.csv", "r")
except:
continue
lines = fo.readlines()
for line in lines:
line = line.strip().lower()
# print(line)
splitsent = line.split(",,")
# print(splitsent)
word_tokens = word_tokenize(splitsent[0])
if splitsent[1] != '1' and splitsent[1] != '0' :
print(splitsent)
elif splitsent[1] == "1":
one_label += 1
else:
zero_label += 1
filtered_sentence = [w for w in word_tokens if not w in stop_words]
line = " ".join(filtered_sentence)
stemmed = [ps.stem(word) for word in line.split()]
stemmed = filter(lambda x: not(len(x)<3 or re.findall(r"[0-9]+",x)) , stemmed)
stemmed = list(stemmed)
line = " ".join(stemmed)
# print(line)
train.append((line, splitsent[1]))
shuffle(train)
# testindex = int(len(train)*4/5)
# test = train[testindex:]
# train = train[:testindex]
train_arr = []
# test_arr = []
train_lbl = []
# test_lbl = []
for x in train:
train_arr.append(x[0])
train_lbl.append(x[1])
# for x in test:
# test_arr.append(x[0])
# test_lbl.append(x[1])
vectorizer = CountVectorizer()
vectorizer.fit(train_arr)
pickle.dump(vectorizer, open("vectorizer.p", "wb"))
train_mat = vectorizer.transform(train_arr)
print train_mat
# print train_mat.shape
# test_mat = vectorizer.transform(test_arr)
# print test_mat.shape
tfidf = TfidfTransformer()
tfidf.fit(train_mat)
pickle.dump(tfidf, open("tfidf.p", "wb"))
train_tfmat = tfidf.transform(train_mat)
print train_tfmat.shape
print train_tfmat[0]
# test_tfmat = tfidf.transform(test_mat)
# print test_tfmat.shape
testindex = int(len(train)*4/5)
test_tfmat = train_tfmat[testindex:]
test_lbl = train_lbl[testindex:]
train_tfmat = train_tfmat[:testindex]
train_lbl = train_lbl[:testindex]
metrics_dict = []
bnb = BernoulliNB()
bnb_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, bnb, "bernoulliNB")
metrics_dict.append({'name':'BernoulliNB', 'metrics':bnb_me})
gnb = GaussianNB()
gnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, gnb, "guassianNB")
metrics_dict.append({'name':'GaussianNB', 'metrics':gnb_me})
mnb = MultinomialNB()
mnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, mnb, "MultinomialNB")
metrics_dict.append({'name':'MultinomialNB', 'metrics':mnb_me})
for nn in [5]:
print 'knn with ', nn, ' neighbors'
knn = KNeighborsClassifier(n_neighbors=nn)
knn_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, knn, "knn"+str(nn))
metrics_dict.append({'name':'5NN', 'metrics':knn_me})
print ' '
print("linear SVM starts:")
lsvm = LinearSVC( class_weight={'1': 1, '0' : 1})
lsvm_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, lsvm, "linearSVM")
metrics_dict.append({'name':'LinearSVM', 'metrics':lsvm_me})
rbfsvm = SVC(kernel = 'poly',degree=2,coef0=1 ,class_weight={'1': zero_label, '0' : one_label})
rbfsvm_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, rbfsvm, "rbfSVM")
metrics_dict.append({'name':'SVM with RBF kernel', 'metrics':rbfsvm_me})
bnb_params = {'alpha': [a*0.1 for a in range(0,11)]}
bnb_clf = GridSearchCV(BernoulliNB(), bnb_params, cv=10)
bnb_clf.fit(train_tfmat, train_lbl)
print 'best parameters'
print bnb_clf.best_params_
best_bnb = BernoulliNB(alpha=bnb_clf.best_params_['alpha'])
best_bnb_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, best_bnb,"bernoulliNB")
metrics_dict.append({'name':'Best BernoulliNB', 'metrics':best_bnb_me})
best_gnb = GaussianNB()
best_gnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, best_gnb, "guassianNB")
metrics_dict.append({'name':'Best GaussianNB', 'metrics':best_gnb_me})
mbn_params = {'alpha': [a*0.1 for a in range(0,11)]}
mbn_clf = GridSearchCV(MultinomialNB(), mbn_params, cv=10)
mbn_clf.fit(train_tfmat, train_lbl)
print 'best parameters'
print mbn_clf.best_params_
best_mbn = MultinomialNB(alpha=mbn_clf.best_params_['alpha'])
best_mbn_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, best_mbn, "MultinomialNB")
metrics_dict.append({'name':'Best MultinomialNB', 'metrics':best_mbn_me})
print metrics_dict
# knn_params = {'n_neighbors': range(1,21), 'weights': ['uniform', 'distance'], 'algorithm': ['ball_tree', 'kd_tree'],
# 'leaf_size': [15, 30, 50, 100], 'p': [1,2]}
# knn_clf = GridSearchCV(KNeighborsClassifier(), knn_params, cv=10)
# knn_clf.fit(train_tfmat, train_lbl)
# print 'best parameters'
# print knn_clf.best_params_
# best_knn = KNeighborsClassifier(n_neighbors=knn_clf.best_params_['n_neighbors'], weights=knn_clf.best_params_['weights'],
# algorithm=knn_clf.best_params_['algorithm'], leaf_size=knn_clf.best_params_['leaf_size'])
# best_knn_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, best_knn)
# metrics_dict.append({'name':'Best KNN', 'metrics':best_knn_me})
# nusvm = NuSVC()
# nusvm_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, nusvm)
# metrics_dict.append({'name':'nuSVM', 'metrics':nusvm_me})
# traindata = [data[0] for data in train]
# trainlabel = [data[1] for data in train]
# clfrNB.fit(traindata, trainlabel)
# print(test)
# cl = NaiveBayesClassifier(train)
# print(cl.classify("It is also possible to focus on non-compositional compounds, a key point in bilingual applications (CITATION; CITATION; Lin, 99)")) # "pos"
# print(cl.classify("I don't like their pizza.")) # "neg"
# for item in test:
# if(cl.classify(item[0]) == '1'):
# print(item, cl.classify(item[0]))
# print(cl.accuracy(test))
# print(cl.show_informative_features(100))
# print(train)
| mit | -835,216,885,992,110,500 | 34.639216 | 161 | 0.674846 | false |
cfe-lab/MiCall | micall/utils/find_missing_samples.py | 1 | 2703 | import re
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import logging
from csv import DictReader
from pathlib import Path
from micall.utils.sample_sheet_parser import sample_sheet_parser
logger = logging.getLogger(__name__)
def parse_args():
parser = ArgumentParser(
description="Look for samples that didn't get processed.",
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('start_folder',
nargs='?',
default='/media/raw_data/MiSeq/runs',
help='a run folder, or a parent of many run folders',
type=Path)
parser.add_argument('--skip_mid_hcv',
action='store_true',
help="Don't report missing samples with the MidHCV project.")
return parser.parse_args()
def process_run(run_folder: Path, skip_mid_hcv: bool):
if not (run_folder / 'needsprocessing').exists():
return False
if (run_folder / 'errorprocessing').exists():
return True
sample_sheet_path = run_folder / 'SampleSheet.csv'
with sample_sheet_path.open() as f:
try:
run_info = sample_sheet_parser(f)
except Exception:
raise RuntimeError(f'Failed to process run {run_folder.name}.')
sample_names = set(run_info['Data'])
if skip_mid_hcv:
sample_names = {sample_name
for sample_name in sample_names
if not re.match(r'.*MidHCV_S\d+$', sample_name)}
cascade_path = run_folder / 'Results' / 'version_7.9' / 'cascade.csv'
with cascade_path.open() as f:
reader = DictReader(f)
cascade_samples = {row['sample'] for row in reader}
missing_samples = sample_names - cascade_samples
if missing_samples:
logger.error('Missing samples in run %s: %s',
run_folder.name,
sorted(missing_samples))
return True
def process_runs(runs_folder: Path, skip_mid_hcv: bool):
for file_path in sorted(runs_folder.iterdir()):
if file_path.is_dir():
# noinspection PyBroadException
try:
process_run(file_path, skip_mid_hcv)
except Exception:
logger.warning('Run %s failed.', file_path.name, exc_info=True)
def main():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s[%(levelname)s]%(name)s: %(message)s')
logger.info('Starting.')
args = parse_args()
if not process_run(args.start_folder, args.skip_mid_hcv):
process_runs(args.start_folder, args.skip_mid_hcv)
logger.info('Done.')
main()
| agpl-3.0 | -970,750,006,730,824,600 | 35.04 | 85 | 0.600074 | false |
haroldl/homeworklog | django/contrib/auth/tests/permissions.py | 231 | 1654 | try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.contrib.auth.management import create_permissions
from django.contrib.auth import models as auth_models
from django.contrib.contenttypes import models as contenttypes_models
from django.core.management import call_command
from django.test import TestCase
class TestAuthPermissions(TestCase):
def tearDown(self):
# These tests mess with content types, but content type lookups
# are cached, so we need to make sure the effects of this test
# are cleaned up.
contenttypes_models.ContentType.objects.clear_cache()
def test_permission_register_order(self):
"""Test that the order of registered permissions doesn't break"""
# Changeset 14413 introduced a regression in the ordering of
# newly created permissions for objects. When loading a fixture
# after the initial creation (such as during unit tests), the
# expected IDs for the permissions may not match up, leading to
# SQL errors. This is ticket 14731
# Start with a clean slate and build the permissions as we
# expect to see them in the fixtures.
auth_models.Permission.objects.all().delete()
contenttypes_models.ContentType.objects.all().delete()
create_permissions(auth_models, [], verbosity=0)
create_permissions(contenttypes_models, [], verbosity=0)
stderr = StringIO()
call_command('loaddata', 'test_permissions.json',
verbosity=0, commit=False, stderr=stderr)
self.assertEqual(stderr.getvalue(), '')
| bsd-3-clause | -6,697,439,261,716,369,000 | 42.526316 | 73 | 0.704353 | false |
ruuk/script.module.youtube.dl | lib/youtube_dl/extractor/streamable.py | 53 | 3891 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
)
class StreamableIE(InfoExtractor):
_VALID_URL = r'https?://streamable\.com/(?:[es]/)?(?P<id>\w+)'
_TESTS = [
{
'url': 'https://streamable.com/dnd1',
'md5': '3e3bc5ca088b48c2d436529b64397fef',
'info_dict': {
'id': 'dnd1',
'ext': 'mp4',
'title': 'Mikel Oiarzabal scores to make it 0-3 for La Real against Espanyol',
'thumbnail': r're:https?://.*\.jpg$',
'uploader': 'teabaker',
'timestamp': 1454964157.35115,
'upload_date': '20160208',
'duration': 61.516,
'view_count': int,
}
},
# older video without bitrate, width/height, etc. info
{
'url': 'https://streamable.com/moo',
'md5': '2cf6923639b87fba3279ad0df3a64e73',
'info_dict': {
'id': 'moo',
'ext': 'mp4',
'title': '"Please don\'t eat me!"',
'thumbnail': r're:https?://.*\.jpg$',
'timestamp': 1426115495,
'upload_date': '20150311',
'duration': 12,
'view_count': int,
}
},
{
'url': 'https://streamable.com/e/dnd1',
'only_matching': True,
},
{
'url': 'https://streamable.com/s/okkqk/drxjds',
'only_matching': True,
}
]
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+src=(?P<q1>[\'"])(?P<src>(?:https?:)?//streamable\.com/(?:(?!\1).+))(?P=q1)',
webpage)
if mobj:
return mobj.group('src')
def _real_extract(self, url):
video_id = self._match_id(url)
# Note: Using the ajax API, as the public Streamable API doesn't seem
# to return video info like the title properly sometimes, and doesn't
# include info like the video duration
video = self._download_json(
'https://ajax.streamable.com/videos/%s' % video_id, video_id)
# Format IDs:
# 0 The video is being uploaded
# 1 The video is being processed
# 2 The video has at least one file ready
# 3 The video is unavailable due to an error
status = video.get('status')
if status != 2:
raise ExtractorError(
'This video is currently unavailable. It may still be uploading or processing.',
expected=True)
title = video.get('reddit_title') or video['title']
formats = []
for key, info in video['files'].items():
if not info.get('url'):
continue
formats.append({
'format_id': key,
'url': self._proto_relative_url(info['url']),
'width': int_or_none(info.get('width')),
'height': int_or_none(info.get('height')),
'filesize': int_or_none(info.get('size')),
'fps': int_or_none(info.get('framerate')),
'vbr': float_or_none(info.get('bitrate'), 1000)
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'thumbnail': self._proto_relative_url(video.get('thumbnail_url')),
'uploader': video.get('owner', {}).get('user_name'),
'timestamp': float_or_none(video.get('date_added')),
'duration': float_or_none(video.get('duration')),
'view_count': int_or_none(video.get('plays')),
'formats': formats
}
| gpl-2.0 | -6,422,476,062,328,418,000 | 33.741071 | 103 | 0.492675 | false |
mmarchini/python-mingus | mingus/extra/tunings.py | 10 | 26880 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# mingus - Music theory Python package, tunings module.
# Copyright (C) 2009, Bart Spaans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Dozens of standard tunings, a StringTuning class and some functions to help
you search through them."""
from mingus.containers.note import Note
from mingus.containers.note_container import NoteContainer
from mingus.core.mt_exceptions import RangeError
import mingus.core.notes as notes
class StringTuning(object):
"""A class to store and work with tunings and fingerings."""
def __init__(self, instrument, description, tuning):
"""Create a new StringTuning instance.
The instrument and description parameters should be strings; tuning
should be a list of strings or a list of lists of strings that
denote courses.
See tunings.add_tuning for examples.
"""
self.instrument = instrument
self.tuning = []
# convert to Note
for x in tuning:
if type(x) == list:
self.tuning.append([Note(n) for n in x])
else:
self.tuning.append(Note(x))
self.description = description
def count_strings(self):
"""Return the number of strings."""
return len(self.tuning)
def count_courses(self):
"""Return the average number of courses per string."""
c = 0
for x in self.tuning:
if type(x) == list:
c += len(x)
else:
c += 1
return float(c) / len(self.tuning)
def find_frets(self, note, maxfret=24):
"""Return a list with for each string the fret on which the note is
played or None if it can't be played on that particular string.
The maxfret parameter is the highest fret that can be played; note
should either be a string or a Note object.
Example:
>>> t = tunings.StringTuning('test', 'test', ['A-3', 'E-4'])
>>> t.find_frets(Note('C-4')
[3, None]
>>> t.find_frets(Note('A-4')
[12, 5]
"""
result = []
if type(note) == str:
note = Note(note)
for x in self.tuning:
if type(x) == list:
base = x[0]
else:
base = x
diff = base.measure(note)
if 0 <= diff <= maxfret:
result.append(diff)
else:
result.append(None)
return result
def find_fingering(self, notes, max_distance=4, not_strings=[]):
"""Return a list [(string, fret)] of possible fingerings for
'notes'.
The notes parameter should be a list of strings or Notes or a
NoteContainer; max_distance denotes the maximum distance between
frets; not_strings can be used to disclude certain strings and is
used internally to recurse.
Example:
>>> t = tunings.StringTuning('test', 'test', ['A-3', 'E-4', 'A-5'])
>>> t.find_fingering(['E-4', 'B-4'])
[[(0, 7), (1, 7)], [(1, 0), (0, 14)]]
"""
if notes is None:
return []
if len(notes) == 0:
return []
first = notes[0]
notes = notes[1:]
frets = self.find_frets(first)
result = []
for (string, fret) in enumerate(frets):
if fret is not None and string not in not_strings:
if len(notes) > 0:
# recursively find fingerings for
# remaining notes
r = self.find_fingering(notes, max_distance, not_strings
+ [string])
if r != []:
for f in r:
result.append([(string, fret)] + f)
else:
result.append([(string, fret)])
# filter impossible fingerings and sort
res = []
for r in result:
(min, max) = (1000, -1)
frets = 0
for (string, fret) in r:
if fret > max:
max = fret
if fret < min and fret != 0:
min = fret
frets += fret
if 0 <= max - min < max_distance or min == 1000 or max == -1:
res.append((frets, r))
return [r for (_, r) in sorted(res)]
def find_chord_fingering(self, notes, max_distance=4, maxfret=18,
max_fingers=4, return_best_as_NoteContainer=False):
"""Return a list of fret lists that are considered possible fingerings.
This function only looks at and matches on the note _names_ so it
does more than find_fingering.
Example:
>>> t = tunings.get_tuning('guitar', 'standard', 6, 1)
>>> t.find_chord_fingering(NoteContainer().from_chord('Am'))
[[0, 0, 2, 2, 1, 0], [0, 3, 2, 2, 1, 0], ......]
"""
def follow(string, next, name, prev=-1):
"""Follow the fret 'next' on 'string'; build result on the way."""
if string >= len(self.tuning) - 1:
return [[(next, name)]]
result = []
cur = res[string][next]
if cur != []:
for y in cur[1]:
for sub in follow(string + 1, y[0], y[1]):
if prev < 0:
result.append([(next, name)] + sub)
else:
if sub[0][0] == 0 or abs(sub[0][0] - prev)\
< max_distance:
result.append([(next, name)] + sub)
for s in follow(string + 1, maxfret + 1, None, next):
result.append([(next, name)] + s)
return [[(next, name)]] if result == [] else result
def make_lookup_table():
"""Prepare the lookup table.
table[string][fret] = (name, dest_frets)
"""
res = [[[] for x in xrange(maxfret + 2)] for x in
xrange(len(self.tuning) - 1)]
for x in xrange(0, len(self.tuning) - 1):
addedNone = -1
next = fretdict[x + 1]
for (fret, name) in fretdict[x]:
for (f2, n2) in next:
if n2 != name and (f2 == 0 or abs(fret - f2)
< max_distance):
if res[x][fret] != []:
res[x][fret][1].append((f2, n2))
else:
res[x][fret] = (name, [(f2, n2)])
if addedNone < x:
if res[x][maxfret + 1] != []:
res[x][maxfret + 1][1].append((f2, n2))
else:
res[x][maxfret + 1] = (None, [(f2, n2)])
addedNone = x
return res
# Convert to NoteContainer if necessary
n = notes
if notes != [] and type(notes) == list and type(notes[0]) == str:
n = NoteContainer(notes)
# Check number of note names.
notenames = [x.name for x in n]
if len(notenames) == 0 or len(notenames) > len(self.tuning):
return []
# Make string-fret dictionary
fretdict = []
for x in xrange(0, len(self.tuning)):
fretdict.append(self.find_note_names(notes, x, maxfret))
# Build table
res = make_lookup_table()
# Build result using table
result = []
# For each fret on the first string
for (i, y) in enumerate(res[0]):
if y != []:
(yname, next) = (y[0], y[1])
# For each destination fret in y
for (fret, name) in next:
# For each followed result
for s in follow(1, fret, name):
subresult = [(i, yname)] + s
# Get boundaries
(mi, ma, names) = (1000, -1000, [])
for (f, n) in subresult:
if n is not None:
if f != 0 and f <= mi:
mi = f
if f != 0 and f >= ma:
ma = f
names.append(n)
# Enforce boundaries
if abs(ma - mi) < max_distance:
# Check if all note
# names are present
covered = True
for n in notenames:
if n not in names:
covered = False
# Add to result
if covered and names != []:
result.append([y[0] if y[1]
is not None else y[1] for y in
subresult])
# Return semi-sorted list
s = sorted(result, key=lambda x: sum([t if t is not None else 1000
for (i, t) in enumerate(x)]))
s = filter(lambda a: fingers_needed(a) <= max_fingers, s)
if not return_best_as_NoteContainer:
return s
else:
rnotes = self.frets_to_NoteContainer(s[0])
for (i, x) in enumerate(rnotes):
if x.string < len(self.tuning) - 1:
if res[x.string][x.fret] != []:
rnotes[i].name = res[x.string][x.fret][0]
return rnotes
def frets_to_NoteContainer(self, fingering):
"""Convert a list such as returned by find_fret to a NoteContainer."""
res = []
for (string, fret) in enumerate(fingering):
if fret is not None:
res.append(self.get_Note(string, fret))
return NoteContainer(res)
def find_note_names(self, notelist, string=0, maxfret=24):
"""Return a list [(fret, notename)] in ascending order.
Notelist should be a list of Notes, note-strings or a NoteContainer.
Example:
>>> t = tunings.StringTuning('test', 'test', ['A-3', 'A-4'])
>>> t.find_note_names(['A', 'C', 'E'], 0, 12)
[(0, 'E'), (5, 'A'), (8, 'C'), (12, 'E')]
"""
n = notelist
if notelist != [] and type(notelist[0]) == str:
n = NoteContainer(notelist)
result = []
names = [x.name for x in n]
int_notes = [notes.note_to_int(x) for x in names]
# Base of the string
s = int(self.tuning[string]) % 12
for x in xrange(0, maxfret + 1):
if (s + x) % 12 in int_notes:
result.append((x, names[int_notes.index((s + x) % 12)]))
return result
def get_Note(self, string=0, fret=0, maxfret=24):
"""Return the Note on 'string', 'fret'.
Throw a RangeError if either the fret or string is unplayable.
Examples:
>>> t = tunings.StringTuning('test', 'test', ['A-3', 'A-4'])
>>> t,get_Note(0, 0)
'A-3'
>>> t.get_Note(0, 1)
'A#-3'
>>> t.get_Note(1, 0)
'A-4'
"""
if 0 <= string < self.count_strings():
if 0 <= fret <= maxfret:
s = self.tuning[string]
if type(s) == list:
s = s[0]
n = Note(int(s) + fret)
n.string = string
n.fret = fret
return n
else:
raise RangeError("Fret '%d' on string '%d' is out of range"
% (string, fret))
else:
raise RangeError("String '%d' out of range" % string)
def fingers_needed(fingering):
"""Return the number of fingers needed to play the given fingering."""
split = False # True if an open string must be played, thereby making any
# subsequent strings impossible to bar with the index finger
indexfinger = False # True if the index finger was already accounted for
# in the count
minimum = min(finger for finger in fingering if finger) # the index finger
# plays the lowest
# finger position
result = 0
for finger in reversed(fingering):
if finger == 0: # an open string is played
split = True # subsequent strings are impossible to bar with the
# index finger
else:
if not split and finger == minimum: # if an open string hasn't been
# played and this is a job for
# the index finger:
if not indexfinger: # if the index finger hasn't been accounted
# for:
result += 1
indexfinger = True # index finger has now been accounted for
else:
result += 1
return result
# The index
_known = {}
def add_tuning(instrument, description, tuning):
"""Add a new tuning to the index.
The instrument and description parameters should be strings; tuning
should be a list of strings or a list of lists to denote courses.
Example:
>>> std_strings = ['E-2', 'A-2', 'D-3', 'G-3', 'B-3', 'E-4']
>>> tuning.add_tuning('Guitar', 'standard', std_strings)
>>> tw_strings = [['E-2', 'E-3'], ['A-2', 'A-3'], ...........]
>>> tuning.add_tuning('Guitar', 'twelve string', tw_string)
"""
t = StringTuning(instrument, description, tuning)
if _known.has_key(str.upper(instrument)):
_known[str.upper(instrument)][1][str.upper(description)] = t
else:
_known[str.upper(instrument)] = (instrument,
{str.upper(description): t})
def get_tuning(instrument, description, nr_of_strings=None, nr_of_courses=None):
"""Get the first tuning that satisfies the constraints.
The instrument and description arguments are treated like
case-insensitive prefixes. So search for 'bass' is the same is
'Bass Guitar'.
Example:
>>> tunings.get_tuning('guitar', 'standard')
<tunings.StringTuning instance at 0x139ac20>
"""
searchi = str.upper(instrument)
searchd = str.upper(description)
keys = _known.keys()
for x in keys:
if (searchi not in keys and x.find(searchi) == 0 or searchi in keys and
x == searchi):
for (desc, tun) in _known[x][1].iteritems():
if desc.find(searchd) == 0:
if nr_of_strings is None and nr_of_courses is None:
return tun
elif nr_of_strings is not None and nr_of_courses is None:
if tun.count_strings() == nr_of_strings:
return tun
elif nr_of_strings is None and nr_of_courses is not None:
if tun.count_courses() == nr_of_courses:
return tun
else:
if tun.count_courses() == nr_of_courses\
and tun.count_strings() == nr_of_strings:
return tun
def get_tunings(instrument=None, nr_of_strings=None, nr_of_courses=None):
"""Search tunings on instrument, strings, courses or a combination.
The instrument is actually treated like a case-insensitive prefix. So
asking for 'bass' yields the same tunings as 'Bass Guitar'; the string
'ba' yields all the instruments starting with 'ba'.
Example:
>>> tunings.get_tunings(nr_of_string = 4)
>>> tunings.get_tunings('bass')
"""
search = ''
if instrument is not None:
search = str.upper(instrument)
result = []
keys = _known.keys()
inkeys = search in keys
for x in keys:
if (instrument is None or not inkeys and x.find(search) == 0 or
inkeys and search == x):
if nr_of_strings is None and nr_of_courses is None:
result += _known[x][1].values()
elif nr_of_strings is not None and nr_of_courses is None:
result += [y for y in _known[x][1].itervalues()
if y.count_strings() == nr_of_strings]
elif nr_of_strings is None and nr_of_courses is not None:
result += [y for y in _known[x][1].itervalues()
if y.count_courses() == nr_of_courses]
else:
result += [y for y in _known[x][1].itervalues()
if y.count_strings() == nr_of_strings
and y.count_courses() == nr_of_courses]
return result
def get_instruments():
"""Return a sorted list of instruments that have string tunings defined
for them."""
return sorted([_known[upname][0] for upname in _known])
add_tuning('Baglamas (Greek)', 'Modal D standard tuning', [['D-4', 'D-5'],
['A-4', 'A-4'], ['D-5', 'D-5']])
add_tuning('Bajo quinto', 'Standard tuning.', [['A-2', 'A-1'], ['D-3', 'D-2'],
['G-2', 'G-2'], ['C-3', 'C-3'], ['F-3', 'F-3']])
add_tuning('Bajo Sexto', 'Standard tuning', [
['E-2', 'E-1'],
['A-2', 'A-1'],
['D-3', 'D-2'],
['G-2', 'G-2'],
['C-3', 'C-3'],
['F-3', 'F-3'],
])
add_tuning('Bandola Oriental', 'Standard tuning.', [['G-3', 'G-3'], ['D-4',
'D-4'], ['A-4', 'A-4'], ['E-5', 'E-5']])
add_tuning('Banjo (bass)',
'A cello banjo is sometimes called a "bass banjo",but there are true bass banjos as well'
, ['E-1', 'A-1', 'D-2', 'G-2'])
add_tuning('Banjo (cello)', 'Standard tuning. Same as cello and mandocello',
['C-2', 'G-2', 'D-3', 'A-3'])
add_tuning('Banjo (tenor)', 'Standard tenor jazz tuning', ['C-3', 'G-3', 'D-4',
'A-4'])
add_tuning('Banjo (tenor)', 'Irish tenor tuning', ['G-2', 'D-3', 'A-3', 'E-4'])
add_tuning('Banjo (5-string)', 'Open G tuning', ['G-4', 'D-3', 'G-3', 'B-3',
'D-4'])
add_tuning('Baritone guitar', 'Standard 5th lower tuning', [
'A-1',
'D-2',
'G-2',
'C-3',
'E-3',
'A-3',
])
add_tuning('Baritone guitar', 'Octave lower tuning', [
'E-1',
'A-1',
'D-2',
'G-2',
'B-2',
'E-3',
])
add_tuning('Bass guitar', 'Standard 4-string tuning', ['E-1', 'A-1', 'D-2',
'G-2'])
add_tuning('Bass guitar', 'Standard 5-string tuning', ['B-0', 'E-1', 'A-1',
'D-2', 'G-2'])
add_tuning('Bass guitar', 'Alternate 5-string tuning', ['E-1', 'A-1', 'D-2',
'G-2', 'C-3'])
add_tuning('Bass guitar', 'Standard 6-string tuning', [
'B-0',
'E-1',
'A-1',
'D-2',
'G-2',
'C-3',
])
add_tuning('Cello', 'Standard tuning', ['C-2', 'G-2', 'D-3', 'A-3'])
add_tuning('Cello', '"5th Suite" tuning', ['C-2', 'G-2', 'D-3', 'G-3'])
add_tuning('Cello banjo', 'Standard tuning', ['C-2', 'G-2', 'D-3', 'A-3'])
add_tuning('Charango', 'Standard C6 tuning. 3rd course is an octave pair.',
[['G-4', 'G-4'], ['C-4', 'C-4'], ['E-5', 'E-4'], ['A-4', 'A-4'],
['E-5', 'E-5']])
add_tuning('Charangon', 'F6 tuning', [['C-4', 'C-4'], ['F-4', 'F-4'], ['A-5',
'A-4'], ['D-5', 'D-5'], ['A-5', 'A-5']])
add_tuning('Charangon', 'G6 tuning', [['D-4', 'D-4'], ['G-4', 'G-4'], ['B-5',
'B-4'], ['E-5', 'E-5'], ['B-5', 'B-5']])
add_tuning('Cuatro', 'Standard tuning', [['B-3', 'B-2'], ['E-4', 'E-3'], ['A-3'
, 'A-3'], ['D-4', 'D-4'], ['G-4', 'G-4']])
add_tuning('Double bass', 'Orchestral tuning', ['E-1', 'A-1', 'D-2', 'G-2'])
add_tuning('Dulcimer',
'Ionian Tuning (The traditional dulcimer is fretted diatonically whole, whole, half, whole, whole, half, whole. )'
, ['A-3', 'A-3', 'D-3'])
add_tuning('Dulcimer', 'Mixolydian Tuning', ['D-4', 'A-3', 'D-3'])
add_tuning('Dulcimer', 'Dorian Tuning', ['G-3', 'A-3', 'D-3'])
add_tuning('Dulcimer', 'Aeolian Tuning', ['C-4', 'A-3', 'D-3'])
add_tuning('Fiddle', 'Standard tuning', ['G-3', 'D-4', 'A-4', 'E-5'])
add_tuning('Fiddle', 'Cajun tuning', ['F-3', 'C-4', 'G-4', 'F-5'])
add_tuning('Fiddle', 'Open G tuning', ['G-3', 'D-4', 'G-4', 'B-4'])
add_tuning('Fiddle', 'Sawmill tuning', ['G-3', 'D-4', 'G-4', 'D-5'])
add_tuning('Fiddle', '"Gee-dad"', ['G-3', 'D-4', 'A-4', 'D-5'])
add_tuning('Fiddle', 'Open D tuning', ['D-3', 'D-4', 'A-4', 'D-5'])
add_tuning('Fiddle', 'Old-timey D tuning', ['A-3', 'D-4', 'A-4', 'E-5'])
add_tuning('Fiddle', 'Cross Tuning, High bass, high counter', ['A-3', 'E-4',
'A-4', 'E-5'])
add_tuning('Gadulka', '3 playing strings, with up to 10 sympathetic strings.',
['A-3', 'E-3', 'A-4'])
add_tuning('Greek Bouzouki', 'Standard F6 tuning', [['C-3', 'C-4'], ['F-3',
'F-4'], ['A-3', 'A-3'], ['D-4', 'D-4']])
add_tuning('Greek Bouzouki', 'Standard F6 tuning', [['D-3', 'D-4'], ['A-3',
'A-3'], ['D-4', 'D-4']])
add_tuning('Guitar', 'Standard tuning', [
'E-2',
'A-2',
'D-3',
'G-3',
'B-3',
'E-4',
])
add_tuning('Guitar', '*DADGAD* Dsus4 tuning', [
'D-2',
'A-2',
'D-3',
'G-3',
'A-3',
'D-4',
])
add_tuning('Guitar', 'Double drop D tuning', [
'D-2',
'A-2',
'D-3',
'G-3',
'B-3',
'D-4',
])
add_tuning('Guitar', 'Drop D tuning', [
'D-2',
'A-2',
'D-3',
'G-3',
'B-3',
'E-4',
])
add_tuning('Guitar', 'Open C major tuning', [
'C-2',
'G-2',
'C-3',
'G-3',
'C-3',
'E-4',
])
add_tuning('Guitar', 'Open E minor tuning', [
'E-2',
'B-2',
'E-3',
'G-3',
'B-3',
'E-4',
])
add_tuning('Guitar', 'Open G major tuning', [
'D-2',
'G-2',
'D-3',
'G-3',
'B-3',
'D-4',
])
add_tuning('Guitar',
'Standard tuning. Some players tune the second course G string to unison to minimize breakage.'
, [
['E-2', 'E-3'],
['A-2', 'A-3'],
['D-3', 'D-4'],
['G-3', 'G-4'],
['B-3', 'B-3'],
['E-4', 'E-4'],
])
add_tuning('Guitar Banjo', 'Standard guitar tuning', [
'E-2',
'A-2',
'D-3',
'G-3',
'B-3',
'E-4',
])
add_tuning("Guitarrón", 'Standard tuning', [
'A-1',
'D-2',
'G-2',
'C-3',
'E-3',
'A-2',
])
add_tuning('Huapanguera', '', ['G-2', ['D-3', 'D-4'], ['G-3', 'G-3'], ['B-3',
'B-3'], 'E-3'])
add_tuning('Irish bouzouki', 'Irish tuning (octaves)', [['G-3', 'G-2'], ['D-4',
'D-3'], ['A-3', 'A-3'], ['D-4', 'D-4']])
add_tuning('Irish bouzouki', 'Irish tuning (unison pairs)', [['G-2', 'G-2'],
['D-3', 'D-3'], ['A-3', 'A-3'], ['D-4', 'D-4']])
add_tuning('Irish bouzouki', '"Mandolin" tuning (octaves)', [['G-3', 'G-2'],
['D-4', 'D-3'], ['A-3', 'A-3'], ['E-4', 'E-4']])
add_tuning('Irish bouzouki', '"Mandolin" tuning (unison pairs)', [['G-2', 'G-2'
], ['D-3', 'D-3'], ['A-3', 'A-3'], ['E-4', 'E-4']])
add_tuning('Irish bouzouki', 'Modal D tuning (octaves)', [['A-3', 'A-2'], ['D-4'
, 'D-3'], ['A-3', 'A-3'], ['D-4', 'D-4']])
add_tuning('Irish bouzouki', 'Modal D tuning (unison pairs)', [['A-2', 'A-2'],
['D-3', 'D-3'], ['A-3', 'A-3'], ['D-4', 'D-4']])
add_tuning('Mandobass', 'Standard tuning', ['E-1', 'A-1', 'D-2', 'G-2'])
add_tuning('Mandola',
'Standard tuning. Pitched a 5th below mandolin tuning. Known in Europe as the tenor mandola.'
, [['C-3', 'C-3'], ['G-3', 'G-3'], ['D-4', 'D-4'], ['A-4', 'A-4']])
add_tuning('Mandocello', 'Standard tuning. Pitched an octave below the mandola.'
, [['C-2', 'C-2'], ['G-2', 'G-2'], ['D-3', 'D-3'], ['A-3', 'A-3']])
add_tuning('Mandolin', 'Standard tuning', [['G-3', 'G-3'], ['D-4', 'D-4'],
['A-4', 'A-4'], ['E-5', 'E-5']])
add_tuning('Mandolin (piccolo)', 'Standard tuning', [['C-4', 'C-4'], ['G-4',
'G-4'], ['D-5', 'D-5'], ['A-5', 'A-5']])
add_tuning('Mandolin (Octave)',
'Standard tuning. Known in Europe as the octave mandola. Pitched an octave below the mandolin.'
, [['G-2', 'G-2'], ['D-3', 'D-3'], ['A-3', 'A-3'], ['E-4', 'E-4']])
add_tuning('Mejorana', 'Standard tuning', ['D-4', 'A-4', 'A-3', 'B-3', 'E-4'])
add_tuning('Mejorana', 'Alternative tuning', ['D-4', 'G-4', 'G-3', 'B-3', 'E-3'
])
add_tuning('Octave Guitar', 'see *Soprano guitar*', [
'E-3',
'A-4',
'D-4',
'G-4',
'B-4',
'E-5',
])
add_tuning('Requinto', 'Standard tuning', [
'A-2',
'D-3',
'G-3',
'C-4',
'E-4',
'A-4',
])
add_tuning('Ronroco', 'Standard C6 tuning (tuned an octave below the charango).'
, [['G-3', 'G-3'], ['C-3', 'C-3'], ['E-4', 'E-3'], ['A-3', 'A-3'],
['E-4', 'E-4']])
add_tuning('Soprano guitar', 'Standard tuning', [
'E-3',
'A-4',
'D-4',
'G-4',
'B-4',
'E-5',
])
add_tuning('Taro patch',
'Standard C6 tuning. The taro patch is a double-string ukulele.',
[['G-3', 'G-4'], ['C-3', 'C-4'], ['E-4', 'E-4'], ['A-4', 'A-4']])
add_tuning('Tenor guitar', 'Standard tuning.', ['C-3', 'G-3', 'D-4', 'A-4'])
add_tuning('Tiple', 'Standard Colombian G6 tuning.', [['D-4', 'D-3', 'D-4'],
['G-4', 'G-3', 'G-4'], ['B-3', 'B-3', 'B-3'], ['E-4', 'E-4', 'E-4']])
add_tuning('Tres', 'Standard C major tuning', [['G-4', 'G-3'], ['C-4', 'C-4'],
['E-4', 'E-3']])
add_tuning('Ukulele', 'Standard C6 tuning for soprano, concert and tenor.',
['G-4', 'C-4', 'E-4', 'A-4'])
add_tuning('Viola', 'Standard tuning. Pitched a 5th below the violin.', ['C-3',
'G-3', 'D-4', 'A-4'])
add_tuning('Violin', 'Standard tuning', ['G-3', 'D-4', 'A-4', 'E-5'])
add_tuning('Violin', 'Cajun tuning to accompany accordion', ['F-3', 'C-4', 'G-4'
, 'D-5'])
add_tuning('Walaycho', 'F6 tuning', [['C-4', 'C-4'], ['F-4', 'F-4'], ['A-5',
'A-4'], ['D-5', 'D-5'], ['A-5', 'A-5']])
add_tuning('Walaycho', 'G6 tuning', [['D-4', 'D-4'], ['G-4', 'G-4'], ['B-5',
'B-4'], ['E-5', 'E-5'], ['B-5', 'B-5']])
| gpl-3.0 | 2,327,941,679,829,215,000 | 37.343795 | 125 | 0.478515 | false |
rizzatti/luigi | luigi/tools/range.py | 12 | 31004 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Produces contiguous completed ranges of recurring tasks.
See RangeDaily and RangeHourly for basic usage.
Caveat - if gaps accumulate, their causes (e.g. missing dependencies) going
unmonitored/unmitigated, then this will eventually keep retrying the same gaps
over and over and make no progress to more recent times. (See 'task_limit' and
'reverse' parameters.)
TODO foolproof against that kind of misuse?
"""
import itertools
import functools
import logging
import warnings
import operator
import re
import time
from datetime import datetime, timedelta
from luigi import six
import luigi
from luigi.parameter import ParameterException
from luigi.target import FileSystemTarget
from luigi.task import Register, flatten_output
logger = logging.getLogger('luigi-interface')
class RangeEvent(luigi.Event): # Not sure if subclassing currently serves a purpose. Stringly typed, events are.
"""
Events communicating useful metrics.
COMPLETE_COUNT would normally be nondecreasing, and its derivative would
describe performance (how many instances complete
invocation-over-invocation).
COMPLETE_FRACTION reaching 1 would be a telling event in case of a backfill
with defined start and stop. Would not be strikingly useful for a typical
recurring task without stop defined, fluctuating close to 1.
DELAY is measured from the first found missing datehour till (current time
+ hours_forward), or till stop if it is defined. In hours for Hourly.
TBD different units for other frequencies?
TODO any different for reverse mode? From first missing till last missing?
From last gap till stop?
"""
COMPLETE_COUNT = "event.tools.range.complete.count"
COMPLETE_FRACTION = "event.tools.range.complete.fraction"
DELAY = "event.tools.range.delay"
class RangeBase(luigi.WrapperTask):
"""
Produces a contiguous completed range of a recurring task.
Made for the common use case where a task is parameterized by e.g.
DateParameter, and assurance is needed that any gaps arising from downtime
are eventually filled.
Emits events that one can use to monitor gaps and delays.
At least one of start and stop needs to be specified.
(This is quite an abstract base class for subclasses with different
datetime parameter class, e.g. DateParameter, DateHourParameter, ..., and
different parameter naming, e.g. days_back/forward, hours_back/forward,
..., as well as different documentation wording, for good user experience.)
Subclasses will need to use the ``of`` parameter when overriding methods.
"""
# TODO lift the single parameter constraint by passing unknown parameters through WrapperTask?
of = luigi.TaskParameter(
description="task name to be completed. The task must take a single datetime parameter")
of_params = luigi.DictParameter(default=dict(), description="Arguments to be provided to the 'of' class when instantiating")
# The common parameters 'start' and 'stop' have type (e.g. DateParameter,
# DateHourParameter) dependent on the concrete subclass, cumbersome to
# define here generically without dark magic. Refer to the overrides.
start = luigi.Parameter()
stop = luigi.Parameter()
reverse = luigi.BoolParameter(
default=False,
description="specifies the preferred order for catching up. False - work from the oldest missing outputs onward; True - from the newest backward")
task_limit = luigi.IntParameter(
default=50,
description="how many of 'of' tasks to require. Guards against scheduling insane amounts of tasks in one go")
# TODO overridable exclude_datetimes or something...
now = luigi.IntParameter(
default=None,
description="set to override current time. In seconds since epoch")
param_name = luigi.Parameter(
default=None,
description="parameter name used to pass in parameterized value. Defaults to None, meaning use first positional parameter",
positional=False)
@property
def of_cls(self):
"""
DONT USE. Will be deleted soon. Use ``self.of``!
"""
if isinstance(self.of, six.string_types):
warnings.warn('When using Range programatically, dont pass "of" param as string!')
return Register.get_task_cls(self.of)
return self.of
# a bunch of datetime arithmetic building blocks that need to be provided in subclasses
def datetime_to_parameter(self, dt):
raise NotImplementedError
def parameter_to_datetime(self, p):
raise NotImplementedError
def datetime_to_parameters(self, dt):
"""
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
"""
raise NotImplementedError
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
raise NotImplementedError
def moving_start(self, now):
"""
Returns a datetime from which to ensure contiguousness in the case when
start is None or unfeasibly far back.
"""
raise NotImplementedError
def moving_stop(self, now):
"""
Returns a datetime till which to ensure contiguousness in the case when
stop is None or unfeasibly far forward.
"""
raise NotImplementedError
def finite_datetimes(self, finite_start, finite_stop):
"""
Returns the individual datetimes in interval [finite_start, finite_stop)
for which task completeness should be required, as a sorted list.
"""
raise NotImplementedError
def _emit_metrics(self, missing_datetimes, finite_start, finite_stop):
"""
For consistent metrics one should consider the entire range, but
it is open (infinite) if stop or start is None.
Hence make do with metrics respective to the finite simplification.
"""
datetimes = self.finite_datetimes(
finite_start if self.start is None else min(finite_start, self.parameter_to_datetime(self.start)),
finite_stop if self.stop is None else max(finite_stop, self.parameter_to_datetime(self.stop)))
delay_in_jobs = len(datetimes) - datetimes.index(missing_datetimes[0]) if datetimes and missing_datetimes else 0
self.trigger_event(RangeEvent.DELAY, self.of_cls.task_family, delay_in_jobs)
expected_count = len(datetimes)
complete_count = expected_count - len(missing_datetimes)
self.trigger_event(RangeEvent.COMPLETE_COUNT, self.of_cls.task_family, complete_count)
self.trigger_event(RangeEvent.COMPLETE_FRACTION, self.of_cls.task_family, float(complete_count) / expected_count if expected_count else 1)
def _format_datetime(self, dt):
return self.datetime_to_parameter(dt)
def _format_range(self, datetimes):
param_first = self._format_datetime(datetimes[0])
param_last = self._format_datetime(datetimes[-1])
return '[%s, %s]' % (param_first, param_last)
def _instantiate_task_cls(self, param):
return self.of(**self._task_parameters(param))
@property
def _param_name(self):
if self.param_name is None:
return next(x[0] for x in self.of.get_params() if x[1].positional)
else:
return self.param_name
def _task_parameters(self, param):
kwargs = dict(**self.of_params)
kwargs[self._param_name] = param
return kwargs
def requires(self):
# cache because we anticipate a fair amount of computation
if hasattr(self, '_cached_requires'):
return self._cached_requires
if not self.start and not self.stop:
raise ParameterException("At least one of start and stop needs to be specified")
if not self.start and not self.reverse:
raise ParameterException("Either start needs to be specified or reverse needs to be True")
if self.start and self.stop and self.start > self.stop:
raise ParameterException("Can't have start > stop")
# TODO check overridden complete() and exists()
now = datetime.utcfromtimestamp(time.time() if self.now is None else self.now)
moving_start = self.moving_start(now)
finite_start = moving_start if self.start is None else max(self.parameter_to_datetime(self.start), moving_start)
moving_stop = self.moving_stop(now)
finite_stop = moving_stop if self.stop is None else min(self.parameter_to_datetime(self.stop), moving_stop)
datetimes = self.finite_datetimes(finite_start, finite_stop) if finite_start <= finite_stop else []
if datetimes:
logger.debug('Actually checking if range %s of %s is complete',
self._format_range(datetimes), self.of_cls.task_family)
missing_datetimes = sorted(self._missing_datetimes(datetimes))
logger.debug('Range %s lacked %d of expected %d %s instances',
self._format_range(datetimes), len(missing_datetimes), len(datetimes), self.of_cls.task_family)
else:
missing_datetimes = []
logger.debug('Empty range. No %s instances expected', self.of_cls.task_family)
self._emit_metrics(missing_datetimes, finite_start, finite_stop)
if self.reverse:
required_datetimes = missing_datetimes[-self.task_limit:]
else:
required_datetimes = missing_datetimes[:self.task_limit]
if required_datetimes:
logger.debug('Requiring %d missing %s instances in range %s',
len(required_datetimes), self.of_cls.task_family, self._format_range(required_datetimes))
if self.reverse:
required_datetimes.reverse() # TODO priorities, so that within the batch tasks are ordered too
self._cached_requires = [self._instantiate_task_cls(self.datetime_to_parameter(d)) for d in required_datetimes]
return self._cached_requires
def missing_datetimes(self, finite_datetimes):
"""
Override in subclasses to do bulk checks.
Returns a sorted list.
This is a conservative base implementation that brutally checks completeness, instance by instance.
Inadvisable as it may be slow.
"""
return [d for d in finite_datetimes if not self._instantiate_task_cls(self.datetime_to_parameter(d)).complete()]
def _missing_datetimes(self, finite_datetimes):
"""
Backward compatible wrapper. Will be deleted eventually (stated on Dec 2015)
"""
try:
return self.missing_datetimes(finite_datetimes)
except TypeError as ex:
if 'missing_datetimes()' in repr(ex):
warnings.warn('In your Range* subclass, missing_datetimes() should only take 1 argument (see latest docs)')
return self.missing_datetimes(self.of_cls, finite_datetimes)
else:
raise
class RangeDailyBase(RangeBase):
"""
Produces a contiguous completed range of a daily recurring task.
"""
start = luigi.DateParameter(
default=None,
description="beginning date, inclusive. Default: None - work backward forever (requires reverse=True)")
stop = luigi.DateParameter(
default=None,
description="ending date, exclusive. Default: None - work forward forever")
days_back = luigi.IntParameter(
default=100, # slightly more than three months
description=("extent to which contiguousness is to be assured into "
"past, in days from current time. Prevents infinite loop "
"when start is none. If the dataset has limited retention"
" (i.e. old outputs get removed), this should be set "
"shorter to that, too, to prevent the oldest outputs "
"flapping. Increase freely if you intend to process old "
"dates - worker's memory is the limit"))
days_forward = luigi.IntParameter(
default=0,
description="extent to which contiguousness is to be assured into future, in days from current time. Prevents infinite loop when stop is none")
def datetime_to_parameter(self, dt):
return dt.date()
def parameter_to_datetime(self, p):
return datetime(p.year, p.month, p.day)
def datetime_to_parameters(self, dt):
"""
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
"""
return self._task_parameters(dt.date())
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
dt = p[self._param_name]
return datetime(dt.year, dt.month, dt.day)
def moving_start(self, now):
return now - timedelta(days=self.days_back)
def moving_stop(self, now):
return now + timedelta(days=self.days_forward)
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to turn of day.
"""
date_start = datetime(finite_start.year, finite_start.month, finite_start.day)
dates = []
for i in itertools.count():
t = date_start + timedelta(days=i)
if t >= finite_stop:
return dates
if t >= finite_start:
dates.append(t)
class RangeHourlyBase(RangeBase):
"""
Produces a contiguous completed range of an hourly recurring task.
"""
start = luigi.DateHourParameter(
default=None,
description="beginning datehour, inclusive. Default: None - work backward forever (requires reverse=True)")
stop = luigi.DateHourParameter(
default=None,
description="ending datehour, exclusive. Default: None - work forward forever")
hours_back = luigi.IntParameter(
default=100 * 24, # slightly more than three months
description=("extent to which contiguousness is to be assured into "
"past, in hours from current time. Prevents infinite "
"loop when start is none. If the dataset has limited "
"retention (i.e. old outputs get removed), this should "
"be set shorter to that, too, to prevent the oldest "
"outputs flapping. Increase freely if you intend to "
"process old dates - worker's memory is the limit"))
# TODO always entire interval for reprocessings (fixed start and stop)?
hours_forward = luigi.IntParameter(
default=0,
description="extent to which contiguousness is to be assured into future, in hours from current time. Prevents infinite loop when stop is none")
def datetime_to_parameter(self, dt):
return dt
def parameter_to_datetime(self, p):
return p
def datetime_to_parameters(self, dt):
"""
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
"""
return self._task_parameters(dt)
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
return p[self._param_name]
def moving_start(self, now):
return now - timedelta(hours=self.hours_back)
def moving_stop(self, now):
return now + timedelta(hours=self.hours_forward)
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to whole hours.
"""
datehour_start = datetime(finite_start.year, finite_start.month, finite_start.day, finite_start.hour)
datehours = []
for i in itertools.count():
t = datehour_start + timedelta(hours=i)
if t >= finite_stop:
return datehours
if t >= finite_start:
datehours.append(t)
def _format_datetime(self, dt):
return luigi.DateHourParameter().serialize(dt)
class RangeByMinutesBase(RangeBase):
"""
Produces a contiguous completed range of an recurring tasks separated a specified number of minutes.
"""
start = luigi.DateMinuteParameter(
default=None,
description="beginning date-hour-minute, inclusive. Default: None - work backward forever (requires reverse=True)")
stop = luigi.DateMinuteParameter(
default=None,
description="ending date-hour-minute, exclusive. Default: None - work forward forever")
minutes_back = luigi.IntParameter(
default=60*24, # one day
description=("extent to which contiguousness is to be assured into "
"past, in minutes from current time. Prevents infinite "
"loop when start is none. If the dataset has limited "
"retention (i.e. old outputs get removed), this should "
"be set shorter to that, too, to prevent the oldest "
"outputs flapping. Increase freely if you intend to "
"process old dates - worker's memory is the limit"))
minutes_forward = luigi.IntParameter(
default=0,
description="extent to which contiguousness is to be assured into future, "
"in minutes from current time. Prevents infinite loop when stop is none")
minutes_interval = luigi.IntParameter(
default=1,
description="separation between events in minutes. It must evenly divide 60"
)
def datetime_to_parameter(self, dt):
return dt
def parameter_to_datetime(self, p):
return p
def datetime_to_parameters(self, dt):
"""
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
"""
return self._task_parameters(dt)
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
dt = p[self._param_name]
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute)
def moving_start(self, now):
return now - timedelta(minutes=self.minutes_back)
def moving_stop(self, now):
return now + timedelta(minutes=self.minutes_forward)
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to a whole number of minutes intervals.
"""
# Validate that the minutes_interval can divide 60 and it is greater than 0 and lesser than 60
if not (0 < self.minutes_interval < 60):
raise ParameterException('minutes-interval must be within 0..60')
if (60 / self.minutes_interval) * self.minutes_interval != 60:
raise ParameterException('minutes-interval does not evenly divide 60')
# start of a complete interval, e.g. 20:13 and the interval is 5 -> 20:10
start_minute = int(finite_start.minute/self.minutes_interval)*self.minutes_interval
datehour_start = datetime(
year=finite_start.year,
month=finite_start.month,
day=finite_start.day,
hour=finite_start.hour,
minute=start_minute)
datehours = []
for i in itertools.count():
t = datehour_start + timedelta(minutes=i*self.minutes_interval)
if t >= finite_stop:
return datehours
if t >= finite_start:
datehours.append(t)
def _format_datetime(self, dt):
return luigi.DateMinuteParameter().serialize(dt)
def _constrain_glob(glob, paths, limit=5):
"""
Tweaks glob into a list of more specific globs that together still cover paths and not too much extra.
Saves us minutes long listings for long dataset histories.
Specifically, in this implementation the leftmost occurrences of "[0-9]"
give rise to a few separate globs that each specialize the expression to
digits that actually occur in paths.
"""
def digit_set_wildcard(chars):
"""
Makes a wildcard expression for the set, a bit readable, e.g. [1-5].
"""
chars = sorted(chars)
if len(chars) > 1 and ord(chars[-1]) - ord(chars[0]) == len(chars) - 1:
return '[%s-%s]' % (chars[0], chars[-1])
else:
return '[%s]' % ''.join(chars)
current = {glob: paths}
while True:
pos = list(current.keys())[0].find('[0-9]')
if pos == -1:
# no wildcard expressions left to specialize in the glob
return list(current.keys())
char_sets = {}
for g, p in six.iteritems(current):
char_sets[g] = sorted(set(path[pos] for path in p))
if sum(len(s) for s in char_sets.values()) > limit:
return [g.replace('[0-9]', digit_set_wildcard(char_sets[g]), 1) for g in current]
for g, s in six.iteritems(char_sets):
for c in s:
new_glob = g.replace('[0-9]', c, 1)
new_paths = list(filter(lambda p: p[pos] == c, current[g]))
current[new_glob] = new_paths
del current[g]
def most_common(items):
"""
Wanted functionality from Counters (new in Python 2.7).
"""
counts = {}
for i in items:
counts.setdefault(i, 0)
counts[i] += 1
return max(six.iteritems(counts), key=operator.itemgetter(1))
def _get_per_location_glob(tasks, outputs, regexes):
"""
Builds a glob listing existing output paths.
Esoteric reverse engineering, but worth it given that (compared to an
equivalent contiguousness guarantee by naive complete() checks)
requests to the filesystem are cut by orders of magnitude, and users
don't even have to retrofit existing tasks anyhow.
"""
paths = [o.path for o in outputs]
# naive, because some matches could be confused by numbers earlier
# in path, e.g. /foo/fifa2000k/bar/2000-12-31/00
matches = [r.search(p) for r, p in zip(regexes, paths)]
for m, p, t in zip(matches, paths, tasks):
if m is None:
raise NotImplementedError("Couldn't deduce datehour representation in output path %r of task %s" % (p, t))
n_groups = len(matches[0].groups())
# the most common position of every group is likely
# to be conclusive hit or miss
positions = [most_common((m.start(i), m.end(i)) for m in matches)[0] for i in range(1, n_groups + 1)]
glob = list(paths[0]) # FIXME sanity check that it's the same for all paths
for start, end in positions:
glob = glob[:start] + ['[0-9]'] * (end - start) + glob[end:]
# chop off the last path item
# (wouldn't need to if `hadoop fs -ls -d` equivalent were available)
return ''.join(glob).rsplit('/', 1)[0]
def _get_filesystems_and_globs(datetime_to_task, datetime_to_re):
"""
Yields a (filesystem, glob) tuple per every output location of task.
The task can have one or several FileSystemTarget outputs.
For convenience, the task can be a luigi.WrapperTask,
in which case outputs of all its dependencies are considered.
"""
# probe some scattered datetimes unlikely to all occur in paths, other than by being sincere datetime parameter's representations
# TODO limit to [self.start, self.stop) so messages are less confusing? Done trivially it can kill correctness
sample_datetimes = [datetime(y, m, d, h) for y in range(2000, 2050, 10) for m in range(1, 4) for d in range(5, 8) for h in range(21, 24)]
regexes = [re.compile(datetime_to_re(d)) for d in sample_datetimes]
sample_tasks = [datetime_to_task(d) for d in sample_datetimes]
sample_outputs = [flatten_output(t) for t in sample_tasks]
for o, t in zip(sample_outputs, sample_tasks):
if len(o) != len(sample_outputs[0]):
raise NotImplementedError("Outputs must be consistent over time, sorry; was %r for %r and %r for %r" % (o, t, sample_outputs[0], sample_tasks[0]))
# TODO fall back on requiring last couple of days? to avoid astonishing blocking when changes like that are deployed
# erm, actually it's not hard to test entire hours_back..hours_forward and split into consistent subranges FIXME?
for target in o:
if not isinstance(target, FileSystemTarget):
raise NotImplementedError("Output targets must be instances of FileSystemTarget; was %r for %r" % (target, t))
for o in zip(*sample_outputs): # transposed, so here we're iterating over logical outputs, not datetimes
glob = _get_per_location_glob(sample_tasks, o, regexes)
yield o[0].fs, glob
def _list_existing(filesystem, glob, paths):
"""
Get all the paths that do in fact exist. Returns a set of all existing paths.
Takes a luigi.target.FileSystem object, a str which represents a glob and
a list of strings representing paths.
"""
globs = _constrain_glob(glob, paths)
time_start = time.time()
listing = []
for g in sorted(globs):
logger.debug('Listing %s', g)
if filesystem.exists(g):
listing.extend(filesystem.listdir(g))
logger.debug('%d %s listings took %f s to return %d items',
len(globs), filesystem.__class__.__name__, time.time() - time_start, len(listing))
return set(listing)
def infer_bulk_complete_from_fs(datetimes, datetime_to_task, datetime_to_re):
"""
Efficiently determines missing datetimes by filesystem listing.
The current implementation works for the common case of a task writing
output to a FileSystemTarget whose path is built using strftime with format
like '...%Y...%m...%d...%H...', without custom complete() or exists().
(Eventually Luigi could have ranges of completion as first-class citizens.
Then this listing business could be factored away/be provided for
explicitly in target API or some kind of a history server.)
"""
filesystems_and_globs_by_location = _get_filesystems_and_globs(datetime_to_task, datetime_to_re)
paths_by_datetime = [[o.path for o in flatten_output(datetime_to_task(d))] for d in datetimes]
listing = set()
for (f, g), p in zip(filesystems_and_globs_by_location, zip(*paths_by_datetime)): # transposed, so here we're iterating over logical outputs, not datetimes
listing |= _list_existing(f, g, p)
# quickly learn everything that's missing
missing_datetimes = []
for d, p in zip(datetimes, paths_by_datetime):
if not set(p) <= listing:
missing_datetimes.append(d)
return missing_datetimes
class RangeDaily(RangeDailyBase):
"""Efficiently produces a contiguous completed range of a daily recurring
task that takes a single DateParameter.
Falls back to infer it from output filesystem listing to facilitate the
common case usage.
Convenient to use even from command line, like:
.. code-block:: console
luigi --module your.module RangeDaily --of YourActualTask --start 2014-01-01
"""
def missing_datetimes(self, finite_datetimes):
try:
cls_with_params = functools.partial(self.of, **self.of_params)
complete_parameters = self.of.bulk_complete.__func__(cls_with_params, map(self.datetime_to_parameter, finite_datetimes))
return set(finite_datetimes) - set(map(self.parameter_to_datetime, complete_parameters))
except NotImplementedError:
return infer_bulk_complete_from_fs(
finite_datetimes,
lambda d: self._instantiate_task_cls(self.datetime_to_parameter(d)),
lambda d: d.strftime('(%Y).*(%m).*(%d)'))
class RangeHourly(RangeHourlyBase):
"""Efficiently produces a contiguous completed range of an hourly recurring
task that takes a single DateHourParameter.
Benefits from bulk_complete information to efficiently cover gaps.
Falls back to infer it from output filesystem listing to facilitate the
common case usage.
Convenient to use even from command line, like:
.. code-block:: console
luigi --module your.module RangeHourly --of YourActualTask --start 2014-01-01T00
"""
def missing_datetimes(self, finite_datetimes):
try:
# TODO: Why is there a list() here but not for the RangeDaily??
cls_with_params = functools.partial(self.of, **self.of_params)
complete_parameters = self.of.bulk_complete.__func__(cls_with_params, list(map(self.datetime_to_parameter, finite_datetimes)))
return set(finite_datetimes) - set(map(self.parameter_to_datetime, complete_parameters))
except NotImplementedError:
return infer_bulk_complete_from_fs(
finite_datetimes,
lambda d: self._instantiate_task_cls(self.datetime_to_parameter(d)),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)'))
class RangeByMinutes(RangeByMinutesBase):
"""Efficiently produces a contiguous completed range of an recurring
task every interval minutes that takes a single DateMinuteParameter.
Benefits from bulk_complete information to efficiently cover gaps.
Falls back to infer it from output filesystem listing to facilitate the
common case usage.
Convenient to use even from command line, like:
.. code-block:: console
luigi --module your.module RangeByMinutes --of YourActualTask --start 2014-01-01T0123
"""
def missing_datetimes(self, finite_datetimes):
try:
cls_with_params = functools.partial(self.of, **self.of_params)
complete_parameters = self.of.bulk_complete.__func__(cls_with_params, map(self.datetime_to_parameter, finite_datetimes))
return set(finite_datetimes) - set(map(self.parameter_to_datetime, complete_parameters))
except NotImplementedError:
return infer_bulk_complete_from_fs(
finite_datetimes,
lambda d: self._instantiate_task_cls(self.datetime_to_parameter(d)),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H).*(%M)'))
| apache-2.0 | 2,253,821,299,300,542,200 | 41.705234 | 160 | 0.65756 | false |
sunlianqiang/kbengine | kbe/src/lib/python/Lib/idlelib/IOBinding.py | 70 | 19745 | import os
import types
import shlex
import sys
import codecs
import tempfile
import tkinter.filedialog as tkFileDialog
import tkinter.messagebox as tkMessageBox
import re
from tkinter import *
from tkinter.simpledialog import askstring
from idlelib.configHandler import idleConf
from codecs import BOM_UTF8
# Try setting the locale, so that we can find out
# what encoding to use
try:
import locale
locale.setlocale(locale.LC_CTYPE, "")
except (ImportError, locale.Error):
pass
# Encoding for file names
filesystemencoding = sys.getfilesystemencoding() ### currently unused
locale_encoding = 'ascii'
if sys.platform == 'win32':
# On Windows, we could use "mbcs". However, to give the user
# a portable encoding name, we need to find the code page
try:
locale_encoding = locale.getdefaultlocale()[1]
codecs.lookup(locale_encoding)
except LookupError:
pass
else:
try:
# Different things can fail here: the locale module may not be
# loaded, it may not offer nl_langinfo, or CODESET, or the
# resulting codeset may be unknown to Python. We ignore all
# these problems, falling back to ASCII
locale_encoding = locale.nl_langinfo(locale.CODESET)
if locale_encoding is None or locale_encoding is '':
# situation occurs on Mac OS X
locale_encoding = 'ascii'
codecs.lookup(locale_encoding)
except (NameError, AttributeError, LookupError):
# Try getdefaultlocale: it parses environment variables,
# which may give a clue. Unfortunately, getdefaultlocale has
# bugs that can cause ValueError.
try:
locale_encoding = locale.getdefaultlocale()[1]
if locale_encoding is None or locale_encoding is '':
# situation occurs on Mac OS X
locale_encoding = 'ascii'
codecs.lookup(locale_encoding)
except (ValueError, LookupError):
pass
locale_encoding = locale_encoding.lower()
encoding = locale_encoding ### KBK 07Sep07 This is used all over IDLE, check!
### 'encoding' is used below in encode(), check!
coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(r'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
def coding_spec(data):
"""Return the encoding declaration according to PEP 263.
When checking encoded data, only the first two lines should be passed
in to avoid a UnicodeDecodeError if the rest of the data is not unicode.
The first two lines would contain the encoding specification.
Raise a LookupError if the encoding is declared but unknown.
"""
if isinstance(data, bytes):
# This encoding might be wrong. However, the coding
# spec must be ASCII-only, so any non-ASCII characters
# around here will be ignored. Decoding to Latin-1 should
# never fail (except for memory outage)
lines = data.decode('iso-8859-1')
else:
lines = data
# consider only the first two lines
if '\n' in lines:
lst = lines.split('\n', 2)[:2]
elif '\r' in lines:
lst = lines.split('\r', 2)[:2]
else:
lst = [lines]
for line in lst:
match = coding_re.match(line)
if match is not None:
break
if not blank_re.match(line):
return None
else:
return None
name = match.group(1)
try:
codecs.lookup(name)
except LookupError:
# The standard encoding error does not indicate the encoding
raise LookupError("Unknown encoding: "+name)
return name
class IOBinding:
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.__id_open = self.text.bind("<<open-window-from-file>>", self.open)
self.__id_save = self.text.bind("<<save-window>>", self.save)
self.__id_saveas = self.text.bind("<<save-window-as-file>>",
self.save_as)
self.__id_savecopy = self.text.bind("<<save-copy-of-window-as-file>>",
self.save_a_copy)
self.fileencoding = None
self.__id_print = self.text.bind("<<print-window>>", self.print_window)
def close(self):
# Undo command bindings
self.text.unbind("<<open-window-from-file>>", self.__id_open)
self.text.unbind("<<save-window>>", self.__id_save)
self.text.unbind("<<save-window-as-file>>",self.__id_saveas)
self.text.unbind("<<save-copy-of-window-as-file>>", self.__id_savecopy)
self.text.unbind("<<print-window>>", self.__id_print)
# Break cycles
self.editwin = None
self.text = None
self.filename_change_hook = None
def get_saved(self):
return self.editwin.get_saved()
def set_saved(self, flag):
self.editwin.set_saved(flag)
def reset_undo(self):
self.editwin.reset_undo()
filename_change_hook = None
def set_filename_change_hook(self, hook):
self.filename_change_hook = hook
filename = None
dirname = None
def set_filename(self, filename):
if filename and os.path.isdir(filename):
self.filename = None
self.dirname = filename
else:
self.filename = filename
self.dirname = None
self.set_saved(1)
if self.filename_change_hook:
self.filename_change_hook()
def open(self, event=None, editFile=None):
flist = self.editwin.flist
# Save in case parent window is closed (ie, during askopenfile()).
if flist:
if not editFile:
filename = self.askopenfile()
else:
filename=editFile
if filename:
# If editFile is valid and already open, flist.open will
# shift focus to its existing window.
# If the current window exists and is a fresh unnamed,
# unmodified editor window (not an interpreter shell),
# pass self.loadfile to flist.open so it will load the file
# in the current window (if the file is not already open)
# instead of a new window.
if (self.editwin and
not getattr(self.editwin, 'interp', None) and
not self.filename and
self.get_saved()):
flist.open(filename, self.loadfile)
else:
flist.open(filename)
else:
if self.text:
self.text.focus_set()
return "break"
# Code for use outside IDLE:
if self.get_saved():
reply = self.maybesave()
if reply == "cancel":
self.text.focus_set()
return "break"
if not editFile:
filename = self.askopenfile()
else:
filename=editFile
if filename:
self.loadfile(filename)
else:
self.text.focus_set()
return "break"
eol = r"(\r\n)|\n|\r" # \r\n (Windows), \n (UNIX), or \r (Mac)
eol_re = re.compile(eol)
eol_convention = os.linesep # default
def loadfile(self, filename):
try:
# open the file in binary mode so that we can handle
# end-of-line convention ourselves.
with open(filename, 'rb') as f:
two_lines = f.readline() + f.readline()
f.seek(0)
bytes = f.read()
except OSError as msg:
tkMessageBox.showerror("I/O Error", str(msg), master=self.text)
return False
chars, converted = self._decode(two_lines, bytes)
if chars is None:
tkMessageBox.showerror("Decoding Error",
"File %s\nFailed to Decode" % filename,
parent=self.text)
return False
# We now convert all end-of-lines to '\n's
firsteol = self.eol_re.search(chars)
if firsteol:
self.eol_convention = firsteol.group(0)
chars = self.eol_re.sub(r"\n", chars)
self.text.delete("1.0", "end")
self.set_filename(None)
self.text.insert("1.0", chars)
self.reset_undo()
self.set_filename(filename)
if converted:
# We need to save the conversion results first
# before being able to execute the code
self.set_saved(False)
self.text.mark_set("insert", "1.0")
self.text.yview("insert")
self.updaterecentfileslist(filename)
return True
def _decode(self, two_lines, bytes):
"Create a Unicode string."
chars = None
# Check presence of a UTF-8 signature first
if bytes.startswith(BOM_UTF8):
try:
chars = bytes[3:].decode("utf-8")
except UnicodeDecodeError:
# has UTF-8 signature, but fails to decode...
return None, False
else:
# Indicates that this file originally had a BOM
self.fileencoding = 'BOM'
return chars, False
# Next look for coding specification
try:
enc = coding_spec(two_lines)
except LookupError as name:
tkMessageBox.showerror(
title="Error loading the file",
message="The encoding '%s' is not known to this Python "\
"installation. The file may not display correctly" % name,
master = self.text)
enc = None
except UnicodeDecodeError:
return None, False
if enc:
try:
chars = str(bytes, enc)
self.fileencoding = enc
return chars, False
except UnicodeDecodeError:
pass
# Try ascii:
try:
chars = str(bytes, 'ascii')
self.fileencoding = None
return chars, False
except UnicodeDecodeError:
pass
# Try utf-8:
try:
chars = str(bytes, 'utf-8')
self.fileencoding = 'utf-8'
return chars, False
except UnicodeDecodeError:
pass
# Finally, try the locale's encoding. This is deprecated;
# the user should declare a non-ASCII encoding
try:
# Wait for the editor window to appear
self.editwin.text.update()
enc = askstring(
"Specify file encoding",
"The file's encoding is invalid for Python 3.x.\n"
"IDLE will convert it to UTF-8.\n"
"What is the current encoding of the file?",
initialvalue = locale_encoding,
parent = self.editwin.text)
if enc:
chars = str(bytes, enc)
self.fileencoding = None
return chars, True
except (UnicodeDecodeError, LookupError):
pass
return None, False # None on failure
def maybesave(self):
if self.get_saved():
return "yes"
message = "Do you want to save %s before closing?" % (
self.filename or "this untitled document")
confirm = tkMessageBox.askyesnocancel(
title="Save On Close",
message=message,
default=tkMessageBox.YES,
master=self.text)
if confirm:
reply = "yes"
self.save(None)
if not self.get_saved():
reply = "cancel"
elif confirm is None:
reply = "cancel"
else:
reply = "no"
self.text.focus_set()
return reply
def save(self, event):
if not self.filename:
self.save_as(event)
else:
if self.writefile(self.filename):
self.set_saved(True)
try:
self.editwin.store_file_breaks()
except AttributeError: # may be a PyShell
pass
self.text.focus_set()
return "break"
def save_as(self, event):
filename = self.asksavefile()
if filename:
if self.writefile(filename):
self.set_filename(filename)
self.set_saved(1)
try:
self.editwin.store_file_breaks()
except AttributeError:
pass
self.text.focus_set()
self.updaterecentfileslist(filename)
return "break"
def save_a_copy(self, event):
filename = self.asksavefile()
if filename:
self.writefile(filename)
self.text.focus_set()
self.updaterecentfileslist(filename)
return "break"
def writefile(self, filename):
self.fixlastline()
text = self.text.get("1.0", "end-1c")
if self.eol_convention != "\n":
text = text.replace("\n", self.eol_convention)
chars = self.encode(text)
try:
with open(filename, "wb") as f:
f.write(chars)
return True
except OSError as msg:
tkMessageBox.showerror("I/O Error", str(msg),
master=self.text)
return False
def encode(self, chars):
if isinstance(chars, bytes):
# This is either plain ASCII, or Tk was returning mixed-encoding
# text to us. Don't try to guess further.
return chars
# Preserve a BOM that might have been present on opening
if self.fileencoding == 'BOM':
return BOM_UTF8 + chars.encode("utf-8")
# See whether there is anything non-ASCII in it.
# If not, no need to figure out the encoding.
try:
return chars.encode('ascii')
except UnicodeError:
pass
# Check if there is an encoding declared
try:
# a string, let coding_spec slice it to the first two lines
enc = coding_spec(chars)
failed = None
except LookupError as msg:
failed = msg
enc = None
else:
if not enc:
# PEP 3120: default source encoding is UTF-8
enc = 'utf-8'
if enc:
try:
return chars.encode(enc)
except UnicodeError:
failed = "Invalid encoding '%s'" % enc
tkMessageBox.showerror(
"I/O Error",
"%s.\nSaving as UTF-8" % failed,
master = self.text)
# Fallback: save as UTF-8, with BOM - ignoring the incorrect
# declared encoding
return BOM_UTF8 + chars.encode("utf-8")
def fixlastline(self):
c = self.text.get("end-2c")
if c != '\n':
self.text.insert("end-1c", "\n")
def print_window(self, event):
confirm = tkMessageBox.askokcancel(
title="Print",
message="Print to Default Printer",
default=tkMessageBox.OK,
master=self.text)
if not confirm:
self.text.focus_set()
return "break"
tempfilename = None
saved = self.get_saved()
if saved:
filename = self.filename
# shell undo is reset after every prompt, looks saved, probably isn't
if not saved or filename is None:
(tfd, tempfilename) = tempfile.mkstemp(prefix='IDLE_tmp_')
filename = tempfilename
os.close(tfd)
if not self.writefile(tempfilename):
os.unlink(tempfilename)
return "break"
platform = os.name
printPlatform = True
if platform == 'posix': #posix platform
command = idleConf.GetOption('main','General',
'print-command-posix')
command = command + " 2>&1"
elif platform == 'nt': #win32 platform
command = idleConf.GetOption('main','General','print-command-win')
else: #no printing for this platform
printPlatform = False
if printPlatform: #we can try to print for this platform
command = command % shlex.quote(filename)
pipe = os.popen(command, "r")
# things can get ugly on NT if there is no printer available.
output = pipe.read().strip()
status = pipe.close()
if status:
output = "Printing failed (exit status 0x%x)\n" % \
status + output
if output:
output = "Printing command: %s\n" % repr(command) + output
tkMessageBox.showerror("Print status", output, master=self.text)
else: #no printing for this platform
message = "Printing is not enabled for this platform: %s" % platform
tkMessageBox.showinfo("Print status", message, master=self.text)
if tempfilename:
os.unlink(tempfilename)
return "break"
opendialog = None
savedialog = None
filetypes = [
("Python files", "*.py *.pyw", "TEXT"),
("Text files", "*.txt", "TEXT"),
("All files", "*"),
]
defaultextension = '.py' if sys.platform == 'darwin' else ''
def askopenfile(self):
dir, base = self.defaultfilename("open")
if not self.opendialog:
self.opendialog = tkFileDialog.Open(master=self.text,
filetypes=self.filetypes)
filename = self.opendialog.show(initialdir=dir, initialfile=base)
return filename
def defaultfilename(self, mode="open"):
if self.filename:
return os.path.split(self.filename)
elif self.dirname:
return self.dirname, ""
else:
try:
pwd = os.getcwd()
except OSError:
pwd = ""
return pwd, ""
def asksavefile(self):
dir, base = self.defaultfilename("save")
if not self.savedialog:
self.savedialog = tkFileDialog.SaveAs(
master=self.text,
filetypes=self.filetypes,
defaultextension=self.defaultextension)
filename = self.savedialog.show(initialdir=dir, initialfile=base)
return filename
def updaterecentfileslist(self,filename):
"Update recent file list on all editor windows"
if self.editwin.flist:
self.editwin.update_recent_files_list(filename)
def _io_binding(parent):
root = Tk()
root.title("Test IOBinding")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
class MyEditWin:
def __init__(self, text):
self.text = text
self.flist = None
self.text.bind("<Control-o>", self.open)
self.text.bind("<Control-s>", self.save)
def get_saved(self): return 0
def set_saved(self, flag): pass
def reset_undo(self): pass
def open(self, event):
self.text.event_generate("<<open-window-from-file>>")
def save(self, event):
self.text.event_generate("<<save-window>>")
text = Text(root)
text.pack()
text.focus_set()
editwin = MyEditWin(text)
io = IOBinding(editwin)
if __name__ == "__main__":
from idlelib.idle_test.htest import run
run(_io_binding)
| lgpl-3.0 | -3,723,960,921,664,250,000 | 34.576577 | 80 | 0.54905 | false |
ilexius/odoo | addons/website_project_issue/tests/test_access_rights.py | 45 | 6654 | # -*- coding: utf-8 -*-
from openerp.addons.project.tests.test_access_rights import TestPortalProjectBase
from openerp.exceptions import AccessError
from openerp.tools import mute_logger
class TestPortalProjectBase(TestPortalProjectBase):
def setUp(self):
super(TestPortalProjectBase, self).setUp()
Issue = self.env['project.issue'].with_context({'mail_create_nolog': True})
self.issue_1 = Issue.create({
'name': 'Test1', 'user_id': False, 'project_id': self.project_pigs.id})
self.issue_2 = Issue.create({
'name': 'Test2', 'user_id': False, 'project_id': self.project_pigs.id})
self.issue_3 = Issue.create({
'name': 'Test3', 'user_id': False, 'project_id': self.project_pigs.id})
self.issue_4 = Issue.create({
'name': 'Test4', 'user_id': self.user_projectuser.id, 'project_id': self.project_pigs.id})
self.issue_5 = Issue.create({
'name': 'Test5', 'user_id': self.user_portal.id, 'project_id': self.project_pigs.id})
self.issue_6 = Issue.create({
'name': 'Test6', 'user_id': self.user_public.id, 'project_id': self.project_pigs.id})
class TestPortalIssue(TestPortalProjectBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_project_access_rights(self):
""" Test basic project access rights, for project and portal_project """
pigs_id = self.project_pigs.id
Issue = self.env['project.issue']
# ----------------------------------------
# CASE1: portal project
# ----------------------------------------
self.project_pigs.write({'privacy_visibility': 'portal'})
# Do: Alfred reads project -> ok (employee ok public)
# Test: all project issues visible
issues = Issue.sudo(self.user_projectuser.id).search([('project_id', '=', pigs_id)])
test_issue_ids = set([self.issue_1.id, self.issue_2.id, self.issue_3.id, self.issue_4.id, self.issue_5.id, self.issue_6.id])
self.assertEqual(set(issues.ids), test_issue_ids,
'access rights: project user cannot see all issues of a portal project')
# Do: Bert reads project -> crash, no group
# Test: no project issue searchable
self.assertRaises(AccessError, Issue.sudo(self.user_noone.id).search, [('project_id', '=', pigs_id)])
# Data: issue follower
self.issue_1.sudo(self.user_projectuser.id).message_subscribe_users(user_ids=[self.user_portal.id])
self.issue_3.sudo(self.user_projectuser.id).message_subscribe_users(user_ids=[self.user_portal.id])
# Do: Chell reads project -> ok (portal ok public)
# Test: only followed project issues visible + assigned
issues = Issue.sudo(self.user_portal.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_1.id, self.issue_3.id, self.issue_5.id]),
'access rights: portal user should see the followed issues of a portal project')
# Data: issue follower cleaning
self.issue_1.sudo(self.user_projectuser.id).message_unsubscribe_users(user_ids=[self.user_portal.id])
self.issue_3.sudo(self.user_projectuser.id).message_unsubscribe_users(user_ids=[self.user_portal.id])
# ----------------------------------------
# CASE2: employee project
# ----------------------------------------
self.project_pigs.write({'privacy_visibility': 'employees'})
# Do: Alfred reads project -> ok (employee ok employee)
# Test: all project issues visible
issues = Issue.sudo(self.user_projectuser.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_1.id, self.issue_2.id, self.issue_3.id,
self.issue_4.id, self.issue_5.id, self.issue_6.id]),
'access rights: project user cannot see all issues of an employees project')
# Do: Chell reads project -> ko (portal ko employee)
# Test: no project issue visible + assigned
issues = Issue.sudo(self.user_portal.id).search([('project_id', '=', pigs_id)])
self.assertFalse(issues.ids, 'access rights: portal user should not see issues of an employees project, even if assigned')
# ----------------------------------------
# CASE3: followers project
# ----------------------------------------
self.project_pigs.write({'privacy_visibility': 'followers'})
# Do: Alfred reads project -> ko (employee ko followers)
# Test: no project issue visible
issues = Issue.sudo(self.user_projectuser.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_4.id]),
'access rights: employee user should not see issues of a not-followed followers project, only assigned')
# Do: Chell reads project -> ko (portal ko employee)
# Test: no project issue visible
issues = Issue.sudo(self.user_portal.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_5.id]),
'access rights: portal user should not see issues of a not-followed followers project, only assigned')
# Data: subscribe Alfred, Chell and Donovan as follower
self.project_pigs.message_subscribe_users(user_ids=[self.user_projectuser.id, self.user_portal.id, self.user_public.id])
self.issue_1.sudo(self.user_projectmanager.id).message_subscribe_users(user_ids=[self.user_portal.id, self.user_projectuser.id])
self.issue_3.sudo(self.user_projectmanager.id).message_subscribe_users(user_ids=[self.user_portal.id, self.user_projectuser.id])
# Do: Alfred reads project -> ok (follower ok followers)
# Test: followed + assigned issues visible
issues = Issue.sudo(self.user_projectuser.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_1.id, self.issue_3.id, self.issue_4.id]),
'access rights: employee user should not see followed + assigned issues of a follower project')
# Do: Chell reads project -> ok (follower ok follower)
# Test: followed + assigned issues visible
issues = Issue.sudo(self.user_portal.id).search([('project_id', '=', pigs_id)])
self.assertEqual(set(issues.ids), set([self.issue_1.id, self.issue_3.id, self.issue_5.id]),
'access rights: employee user should not see followed + assigned issues of a follower project')
| gpl-3.0 | -2,707,740,358,732,591,600 | 57.368421 | 136 | 0.615269 | false |
lightcn/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/Expression.py | 384 | 4146 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.functions import *
database="test"
uid = 3
class Expression(unohelper.Base, XJobExecutor ):
def __init__(self, sExpression="", sName="", bFromModify=False):
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.win = DBModalDialog(60, 50, 180, 65, "Expression Builder")
self.win.addFixedText("lblExpression",17 , 10, 35, 15, "Expression :")
self.win.addEdit("txtExpression", -5, 5, 123, 15)
self.win.addFixedText("lblName", 2, 30, 50, 15, "Displayed Name :")
self.win.addEdit("txtName", -5, 25, 123, 15)
self.win.addButton( "btnOK", -5, -5, 40, 15, "OK", actionListenerProc = self.btnOk_clicked )
self.win.addButton( "btnCancel", -5 - 40 -5, -5, 40, 15, "Cancel", actionListenerProc = self.btnCancel_clicked )
self.bModify=bFromModify
if self.bModify==True:
self.win.setEditText("txtExpression",sExpression)
self.win.setEditText("txtName",sName)
self.win.doModalDialog("",None)
def btnOk_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
text = doc.Text
cursor = doc.getCurrentController().getViewCursor()
if self.bModify==True:
oCurObj=cursor.TextField
sKey=u""+self.win.getEditText("txtName")
sValue=u"[[ " + self.win.getEditText("txtExpression") + " ]]"
oCurObj.Items = (sKey,sValue)
oCurObj.update()
self.win.endExecute()
else:
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
if self.win.getEditText("txtName")!="" and self.win.getEditText("txtExpression")!="":
sKey=u""+self.win.getEditText("txtName")
sValue=u"[[ " + self.win.getEditText("txtExpression") + " ]]"
if cursor.TextTable==None:
oInputList.Items = (sKey,sValue)
text.insertTextContent(cursor,oInputList,False)
else:
oTable = cursor.TextTable
oCurCell = cursor.Cell
tableText = oTable.getCellByName( oCurCell.CellName )
oInputList.Items = (sKey,sValue)
tableText.insertTextContent(cursor,oInputList,False)
self.win.endExecute()
else:
ErrorDialog("Please fill appropriate data in Name field or in Expression field.")
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
Expression()
elif __name__=="package":
g_ImplementationHelper.addImplementation( Expression, "org.openoffice.openerp.report.expression", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,914,507,552,340,006,000 | 43.106383 | 130 | 0.614086 | false |
ray-project/ray | python/ray/autoscaler/_private/aliyun/node_provider.py | 1 | 12663 | import random
import threading
from collections import defaultdict
import logging
import time
from typing import Any, Dict, List, Optional
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import TAG_RAY_CLUSTER_NAME, TAG_RAY_NODE_NAME, \
TAG_RAY_LAUNCH_CONFIG, TAG_RAY_NODE_KIND, \
TAG_RAY_USER_NODE_TYPE, TAG_RAY_NODE_STATUS
from ray.autoscaler._private.constants import BOTO_MAX_RETRIES
from ray.autoscaler._private.log_timer import LogTimer
from ray.autoscaler._private.cli_logger import cli_logger
from ray.autoscaler._private.aliyun.utils import AcsClient
from ray.autoscaler._private.aliyun.config import PENDING, STOPPED, \
STOPPING, RUNNING, bootstrap_aliyun
logger = logging.getLogger(__name__)
TAG_BATCH_DELAY = 1
STOPPING_NODE_DELAY = 1
class AliyunNodeProvider(NodeProvider):
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.cache_stopped_nodes = provider_config.get("cache_stopped_nodes",
True)
self.acs = AcsClient(
access_key=provider_config["access_key"],
access_key_secret=provider_config["access_key_secret"],
region_id=provider_config["region"],
max_retries=BOTO_MAX_RETRIES,
)
# Try availability zones round-robin, starting from random offset
self.subnet_idx = random.randint(0, 100)
# Tags that we believe to actually be on the node.
self.tag_cache = {}
# Tags that we will soon upload.
self.tag_cache_pending = defaultdict(dict)
# Number of threads waiting for a batched tag update.
self.batch_thread_count = 0
self.batch_update_done = threading.Event()
self.batch_update_done.set()
self.ready_for_new_batch = threading.Event()
self.ready_for_new_batch.set()
self.tag_cache_lock = threading.Lock()
self.count_lock = threading.Lock()
# Cache of node objects from the last nodes() call. This avoids
# excessive DescribeInstances requests.
self.cached_nodes = {}
def non_terminated_nodes(self, tag_filters: Dict[str, str]) -> List[str]:
tags = [
{
"Key": TAG_RAY_CLUSTER_NAME,
"Value": self.cluster_name,
},
]
for k, v in tag_filters.items():
tags.append({
"Key": k,
"Value": v,
})
instances = self.acs.describe_instances(tags=tags)
non_terminated_instance = []
for instance in instances:
if instance.get("Status") == RUNNING or instance.get(
"Status") == PENDING:
non_terminated_instance.append(instance.get("InstanceId"))
self.cached_nodes[instance.get("InstanceId")] = instance
return non_terminated_instance
def is_running(self, node_id: str) -> bool:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
instance = instances[0]
return instance.get("Status") == "Running"
cli_logger.error("Invalid node id: %s", node_id)
return False
def is_terminated(self, node_id: str) -> bool:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances) == 1
instance = instances[0]
return instance.get("Status") == "Stopped"
cli_logger.error("Invalid node id: %s", node_id)
return False
def node_tags(self, node_id: str) -> Dict[str, str]:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances) == 1
instance = instances[0]
if instance.get("Tags") is not None:
node_tags = dict()
for tag in instance.get("Tags").get("Tag"):
node_tags[tag.get("TagKey")] = tag.get("TagValue")
return node_tags
return dict()
def external_ip(self, node_id: str) -> str:
while True:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances)
instance = instances[0]
if instance.get("PublicIpAddress") is not None \
and instance.get(
"PublicIpAddress").get("IpAddress") is not None:
if len(instance.get("PublicIpAddress").get(
"IpAddress")) > 0:
return instance.get("PublicIpAddress").get(
"IpAddress")[0]
cli_logger.error(
"PublicIpAddress attribute is not exist. %s" % instance)
time.sleep(STOPPING_NODE_DELAY)
def internal_ip(self, node_id: str) -> str:
while True:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances) == 1
instance = instances[0]
if instance.get("VpcAttributes") is not None and instance.get(
"VpcAttributes").get(
"PrivateIpAddress") is not None and len(
instance.get("VpcAttributes").get(
"PrivateIpAddress").get("IpAddress")) > 0:
return instance.get("VpcAttributes").get(
"PrivateIpAddress").get("IpAddress")[0]
cli_logger.error(
"InnerIpAddress attribute is not exist. %s" % instance)
time.sleep(STOPPING_NODE_DELAY)
def set_node_tags(self, node_id: str, tags: Dict[str, str]) -> None:
is_batching_thread = False
with self.tag_cache_lock:
if not self.tag_cache_pending:
is_batching_thread = True
# Wait for threads in the last batch to exit
self.ready_for_new_batch.wait()
self.ready_for_new_batch.clear()
self.batch_update_done.clear()
self.tag_cache_pending[node_id].update(tags)
if is_batching_thread:
time.sleep(TAG_BATCH_DELAY)
with self.tag_cache_lock:
self._update_node_tags()
self.batch_update_done.set()
with self.count_lock:
self.batch_thread_count += 1
self.batch_update_done.wait()
with self.count_lock:
self.batch_thread_count -= 1
if self.batch_thread_count == 0:
self.ready_for_new_batch.set()
def _update_node_tags(self):
batch_updates = defaultdict(list)
for node_id, tags in self.tag_cache_pending.items():
for x in tags.items():
batch_updates[x].append(node_id)
self.tag_cache[node_id] = tags
self.tag_cache_pending = defaultdict(dict)
self._create_tags(batch_updates)
def _create_tags(self, batch_updates):
for (k, v), node_ids in batch_updates.items():
m = "Set tag {}={} on {}".format(k, v, node_ids)
with LogTimer("AliyunNodeProvider: {}".format(m)):
if k == TAG_RAY_NODE_NAME:
k = "Name"
self.acs.tag_resource(node_ids, [{"Key": k, "Value": v}])
def create_node(self, node_config: Dict[str, Any], tags: Dict[str, str],
count: int) -> Optional[Dict[str, Any]]:
filter_tags = [{
"Key": TAG_RAY_CLUSTER_NAME,
"Value": self.cluster_name,
}, {
"Key": TAG_RAY_NODE_KIND,
"Value": tags[TAG_RAY_NODE_KIND]
}, {
"Key": TAG_RAY_USER_NODE_TYPE,
"Value": tags[TAG_RAY_USER_NODE_TYPE]
}, {
"Key": TAG_RAY_LAUNCH_CONFIG,
"Value": tags[TAG_RAY_LAUNCH_CONFIG]
}, {
"Key": TAG_RAY_NODE_NAME,
"Value": tags[TAG_RAY_NODE_NAME]
}]
reused_nodes_dict = {}
if self.cache_stopped_nodes:
reuse_nodes_candidate = self.acs.describe_instances(
tags=filter_tags)
if reuse_nodes_candidate:
with cli_logger.group("Stopping instances to reuse"):
reuse_node_ids = []
for node in reuse_nodes_candidate:
node_id = node.get("InstanceId")
status = node.get("Status")
if status != STOPPING and status != STOPPED:
continue
if status == STOPPING:
# wait for node stopped
while self.acs.describe_instances(
instance_ids=[node_id])[0].get(
"Status") == STOPPING:
logging.info("wait for %s stop" % node_id)
time.sleep(STOPPING_NODE_DELAY)
# logger.info("reuse %s" % node_id)
reuse_node_ids.append(node_id)
reused_nodes_dict[node.get("InstanceId")] = node
self.acs.start_instance(node_id)
self.tag_cache[node_id] = node.get("Tags")
self.set_node_tags(node_id, tags)
if len(reuse_node_ids) == count:
break
count -= len(reuse_node_ids)
created_nodes_dict = {}
if count > 0:
filter_tags.append({
"Key": TAG_RAY_NODE_STATUS,
"Value": tags[TAG_RAY_NODE_STATUS]
})
instance_id_sets = self.acs.run_instances(
instance_type=node_config["InstanceType"],
image_id=node_config["ImageId"],
tags=filter_tags,
amount=count,
vswitch_id=self.provider_config["v_switch_id"],
security_group_id=self.provider_config["security_group_id"],
key_pair_name=self.provider_config["key_name"])
instances = self.acs.describe_instances(
instance_ids=instance_id_sets)
if instances is not None:
for instance in instances:
created_nodes_dict[instance.get("InstanceId")] = instance
all_created_nodes = reused_nodes_dict
all_created_nodes.update(created_nodes_dict)
return all_created_nodes
def terminate_node(self, node_id: str) -> None:
logger.info("terminate node: %s" % node_id)
if self.cache_stopped_nodes:
logger.info(
"Stopping instance {} (to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)").format(
node_id)
self.acs.stop_instance(node_id)
else:
self.acs.delete_instance(node_id)
def terminate_nodes(self, node_ids: List[str]) -> None:
if not node_ids:
return
if self.cache_stopped_nodes:
logger.info(
"Stopping instances {} (to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)".format(
node_ids))
self.acs.stop_instances(node_ids)
else:
self.acs.delete_instances(node_ids)
def _get_node(self, node_id):
"""Refresh and get info for this node, updating the cache."""
self.non_terminated_nodes({}) # Side effect: updates cache
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
# Node not in {pending, running} -- retry with a point query. This
# usually means the node was recently preempted or terminated.
matches = self.acs.describe_instances(instance_ids=[node_id])
assert len(matches) == 1, "Invalid instance id {}".format(node_id)
return matches[0]
def _get_cached_node(self, node_id):
"""Return node info from cache if possible, otherwise fetches it."""
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id)
@staticmethod
def bootstrap_config(cluster_config):
return bootstrap_aliyun(cluster_config)
| apache-2.0 | -5,249,107,222,568,656,000 | 39.328025 | 78 | 0.5445 | false |
dkubiak789/odoo | addons/payment_ogone/data/ogone.py | 395 | 30321 | # -*- coding: utf-8 -*-
OGONE_ERROR_MAP = {
'0020001001': "Authorization failed, please retry",
'0020001002': "Authorization failed, please retry",
'0020001003': "Authorization failed, please retry",
'0020001004': "Authorization failed, please retry",
'0020001005': "Authorization failed, please retry",
'0020001006': "Authorization failed, please retry",
'0020001007': "Authorization failed, please retry",
'0020001008': "Authorization failed, please retry",
'0020001009': "Authorization failed, please retry",
'0020001010': "Authorization failed, please retry",
'0030001999': "Our payment system is currently under maintenance, please try later",
'0050001005': "Expiry date error",
'0050001007': "Requested Operation code not allowed",
'0050001008': "Invalid delay value",
'0050001010': "Input date in invalid format",
'0050001013': "Unable to parse socket input stream",
'0050001014': "Error in parsing stream content",
'0050001015': "Currency error",
'0050001016': "Transaction still posted at end of wait",
'0050001017': "Sync value not compatible with delay value",
'0050001019': "Transaction duplicate of a pre-existing transaction",
'0050001020': "Acceptation code empty while required for the transaction",
'0050001024': "Maintenance acquirer differs from original transaction acquirer",
'0050001025': "Maintenance merchant differs from original transaction merchant",
'0050001028': "Maintenance operation not accurate for the original transaction",
'0050001031': "Host application unknown for the transaction",
'0050001032': "Unable to perform requested operation with requested currency",
'0050001033': "Maintenance card number differs from original transaction card number",
'0050001034': "Operation code not allowed",
'0050001035': "Exception occurred in socket input stream treatment",
'0050001036': "Card length does not correspond to an acceptable value for the brand",
'0050001036': "Card length does not correspond to an acceptable value for the brand",
'0050001068': "A technical problem occurred, please contact helpdesk",
'0050001069': "Invalid check for CardID and Brand",
'0050001070': "A technical problem occurred, please contact helpdesk",
'0050001116': "Unknown origin IP",
'0050001117': "No origin IP detected",
'0050001118': "Merchant configuration problem, please contact support",
'10001001': "Communication failure",
'10001002': "Communication failure",
'10001003': "Communication failure",
'10001004': "Communication failure",
'10001005': "Communication failure",
'20001001': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001002': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001003': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001004': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001005': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001006': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001007': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001008': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001009': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001010': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001101': "A technical problem occurred, please contact helpdesk",
'20001105': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001111': "A technical problem occurred, please contact helpdesk",
'20002001': "Origin for the response of the bank can not be checked",
'20002002': "Beneficiary account number has been modified during processing",
'20002003': "Amount has been modified during processing",
'20002004': "Currency has been modified during processing",
'20002005': "No feedback from the bank server has been detected",
'30001001': "Payment refused by the acquirer",
'30001002': "Duplicate request",
'30001010': "A technical problem occurred, please contact helpdesk",
'30001011': "A technical problem occurred, please contact helpdesk",
'30001012': "Card black listed - Contact acquirer",
'30001015': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001051': "A technical problem occurred, please contact helpdesk",
'30001054': "A technical problem occurred, please contact helpdesk",
'30001057': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001058': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001060': "Aquirer indicates that a failure occured during payment processing",
'30001070': "RATEPAY Invalid Response Type (Failure)",
'30001071': "RATEPAY Missing Mandatory status code field (failure)",
'30001072': "RATEPAY Missing Mandatory Result code field (failure)",
'30001073': "RATEPAY Response parsing Failed",
'30001090': "CVC check required by front end and returned invalid by acquirer",
'30001091': "ZIP check required by front end and returned invalid by acquirer",
'30001092': "Address check required by front end and returned as invalid by acquirer.",
'30001100': "Unauthorized buyer's country",
'30001101': "IP country <> card country",
'30001102': "Number of different countries too high",
'30001103': "unauthorized card country",
'30001104': "unauthorized ip address country",
'30001105': "Anonymous proxy",
'30001110': "If the problem persists, please contact Support, or go to paysafecard's card balance page (https://customer.cc.at.paysafecard.com/psccustomer/GetWelcomePanelServlet?language=en) to see when the amount reserved on your card will be available again.",
'30001120': "IP address in merchant's black list",
'30001130': "BIN in merchant's black list",
'30001131': "Wrong BIN for 3xCB",
'30001140': "Card in merchant's card blacklist",
'30001141': "Email in blacklist",
'30001142': "Passenger name in blacklist",
'30001143': "Card holder name in blacklist",
'30001144': "Passenger name different from owner name",
'30001145': "Time to departure too short",
'30001149': "Card Configured in Card Supplier Limit for another relation (CSL)",
'30001150': "Card not configured in the system for this customer (CSL)",
'30001151': "REF1 not allowed for this relationship (Contract number",
'30001152': "Card/Supplier Amount limit reached (CSL)",
'30001153': "Card not allowed for this supplier (Date out of contract bounds)",
'30001154': "You have reached the usage limit allowed",
'30001155': "You have reached the usage limit allowed",
'30001156': "You have reached the usage limit allowed",
'30001157': "Unauthorized IP country for itinerary",
'30001158': "email usage limit reached",
'30001159': "Unauthorized card country/IP country combination",
'30001160': "Postcode in highrisk group",
'30001161': "generic blacklist match",
'30001162': "Billing Address is a PO Box",
'30001180': "maximum scoring reached",
'30001997': "Authorization canceled by simulation",
'30001998': "A technical problem occurred, please try again.",
'30001999': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30002001': "Payment refused by the financial institution",
'30002001': "Payment refused by the financial institution",
'30021001': "Call acquirer support call number.",
'30022001': "Payment must be approved by the acquirer before execution.",
'30031001': "Invalid merchant number.",
'30041001': "Retain card.",
'30051001': "Authorization declined",
'30071001': "Retain card - special conditions.",
'30121001': "Invalid transaction",
'30131001': "Invalid amount",
'30131002': "You have reached the total amount allowed",
'30141001': "Invalid card number",
'30151001': "Unknown acquiring institution.",
'30171001': "Payment method cancelled by the buyer",
'30171002': "The maximum time allowed is elapsed.",
'30191001': "Try again later.",
'30201001': "A technical problem occurred, please contact helpdesk",
'30301001': "Invalid format",
'30311001': "Unknown acquirer ID.",
'30331001': "Card expired.",
'30341001': "Suspicion of fraud.",
'30341002': "Suspicion of fraud (3rdMan)",
'30341003': "Suspicion of fraud (Perseuss)",
'30341004': "Suspicion of fraud (ETHOCA)",
'30381001': "A technical problem occurred, please contact helpdesk",
'30401001': "Invalid function.",
'30411001': "Lost card.",
'30431001': "Stolen card, pick up",
'30511001': "Insufficient funds.",
'30521001': "No Authorization. Contact the issuer of your card.",
'30541001': "Card expired.",
'30551001': "Invalid PIN.",
'30561001': "Card not in authorizer's database.",
'30571001': "Transaction not permitted on card.",
'30581001': "Transaction not allowed on this terminal",
'30591001': "Suspicion of fraud.",
'30601001': "The merchant must contact the acquirer.",
'30611001': "Amount exceeds card ceiling.",
'30621001': "Restricted card.",
'30631001': "Security policy not respected.",
'30641001': "Amount changed from ref. trn.",
'30681001': "Tardy response.",
'30751001': "PIN entered incorrectly too often",
'30761001': "Card holder already contesting.",
'30771001': "PIN entry required.",
'30811001': "Message flow error.",
'30821001': "Authorization center unavailable",
'30831001': "Authorization center unavailable",
'30901001': "Temporary system shutdown.",
'30911001': "Acquirer unavailable.",
'30921001': "Invalid card type for acquirer.",
'30941001': "Duplicate transaction",
'30961001': "Processing temporarily not possible",
'30971001': "A technical problem occurred, please contact helpdesk",
'30981001': "A technical problem occurred, please contact helpdesk",
'31011001': "Unknown acceptance code",
'31021001': "Invalid currency",
'31031001': "Acceptance code missing",
'31041001': "Inactive card",
'31051001': "Merchant not active",
'31061001': "Invalid expiration date",
'31071001': "Interrupted host communication",
'31081001': "Card refused",
'31091001': "Invalid password",
'31101001': "Plafond transaction (majoré du bonus) dépassé",
'31111001': "Plafond mensuel (majoré du bonus) dépassé",
'31121001': "Plafond centre de facturation dépassé",
'31131001': "Plafond entreprise dépassé",
'31141001': "Code MCC du fournisseur non autorisé pour la carte",
'31151001': "Numéro SIRET du fournisseur non autorisé pour la carte",
'31161001': "This is not a valid online banking account",
'32001004': "A technical problem occurred, please try again.",
'34011001': "Bezahlung mit RatePAY nicht möglich.",
'39991001': "A technical problem occurred, please contact the helpdesk of your acquirer",
'40001001': "A technical problem occurred, please try again.",
'40001002': "A technical problem occurred, please try again.",
'40001003': "A technical problem occurred, please try again.",
'40001004': "A technical problem occurred, please try again.",
'40001005': "A technical problem occurred, please try again.",
'40001006': "A technical problem occurred, please try again.",
'40001007': "A technical problem occurred, please try again.",
'40001008': "A technical problem occurred, please try again.",
'40001009': "A technical problem occurred, please try again.",
'40001010': "A technical problem occurred, please try again.",
'40001011': "A technical problem occurred, please contact helpdesk",
'40001012': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'40001013': "A technical problem occurred, please contact helpdesk",
'40001016': "A technical problem occurred, please contact helpdesk",
'40001018': "A technical problem occurred, please try again.",
'40001019': "Sorry, an error occurred during processing. Please retry the operation (use back button of the browser). If problem persists, contact your merchant's helpdesk.",
'40001020': "Sorry, an error occurred during processing. Please retry the operation (use back button of the browser). If problem persists, contact your merchant's helpdesk.",
'40001050': "A technical problem occurred, please contact helpdesk",
'40001133': "Authentication failed, the signature of your bank access control server is incorrect",
'40001134': "Authentication failed, please retry or cancel.",
'40001135': "Authentication temporary unavailable, please retry or cancel.",
'40001136': "Technical problem with your browser, please retry or cancel",
'40001137': "Your bank access control server is temporary unavailable, please retry or cancel",
'40001998': "Temporary technical problem. Please retry a little bit later.",
'50001001': "Unknown card type",
'50001002': "Card number format check failed for given card number.",
'50001003': "Merchant data error",
'50001004': "Merchant identification missing",
'50001005': "Expiry date error",
'50001006': "Amount is not a number",
'50001007': "A technical problem occurred, please contact helpdesk",
'50001008': "A technical problem occurred, please contact helpdesk",
'50001009': "A technical problem occurred, please contact helpdesk",
'50001010': "A technical problem occurred, please contact helpdesk",
'50001011': "Brand not supported for that merchant",
'50001012': "A technical problem occurred, please contact helpdesk",
'50001013': "A technical problem occurred, please contact helpdesk",
'50001014': "A technical problem occurred, please contact helpdesk",
'50001015': "Invalid currency code",
'50001016': "A technical problem occurred, please contact helpdesk",
'50001017': "A technical problem occurred, please contact helpdesk",
'50001018': "A technical problem occurred, please contact helpdesk",
'50001019': "A technical problem occurred, please contact helpdesk",
'50001020': "A technical problem occurred, please contact helpdesk",
'50001021': "A technical problem occurred, please contact helpdesk",
'50001022': "A technical problem occurred, please contact helpdesk",
'50001023': "A technical problem occurred, please contact helpdesk",
'50001024': "A technical problem occurred, please contact helpdesk",
'50001025': "A technical problem occurred, please contact helpdesk",
'50001026': "A technical problem occurred, please contact helpdesk",
'50001027': "A technical problem occurred, please contact helpdesk",
'50001028': "A technical problem occurred, please contact helpdesk",
'50001029': "A technical problem occurred, please contact helpdesk",
'50001030': "A technical problem occurred, please contact helpdesk",
'50001031': "A technical problem occurred, please contact helpdesk",
'50001032': "A technical problem occurred, please contact helpdesk",
'50001033': "A technical problem occurred, please contact helpdesk",
'50001034': "A technical problem occurred, please contact helpdesk",
'50001035': "A technical problem occurred, please contact helpdesk",
'50001036': "Card length does not correspond to an acceptable value for the brand",
'50001037': "Purchasing card number for a regular merchant",
'50001038': "Non Purchasing card for a Purchasing card merchant",
'50001039': "Details sent for a non-Purchasing card merchant, please contact helpdesk",
'50001040': "Details not sent for a Purchasing card transaction, please contact helpdesk",
'50001041': "Payment detail validation failed",
'50001042': "Given transactions amounts (tax,discount,shipping,net,etc…) do not compute correctly together",
'50001043': "A technical problem occurred, please contact helpdesk",
'50001044': "No acquirer configured for this operation",
'50001045': "No UID configured for this operation",
'50001046': "Operation not allowed for the merchant",
'50001047': "A technical problem occurred, please contact helpdesk",
'50001048': "A technical problem occurred, please contact helpdesk",
'50001049': "A technical problem occurred, please contact helpdesk",
'50001050': "A technical problem occurred, please contact helpdesk",
'50001051': "A technical problem occurred, please contact helpdesk",
'50001052': "A technical problem occurred, please contact helpdesk",
'50001053': "A technical problem occurred, please contact helpdesk",
'50001054': "Card number incorrect or incompatible",
'50001055': "A technical problem occurred, please contact helpdesk",
'50001056': "A technical problem occurred, please contact helpdesk",
'50001057': "A technical problem occurred, please contact helpdesk",
'50001058': "A technical problem occurred, please contact helpdesk",
'50001059': "A technical problem occurred, please contact helpdesk",
'50001060': "A technical problem occurred, please contact helpdesk",
'50001061': "A technical problem occurred, please contact helpdesk",
'50001062': "A technical problem occurred, please contact helpdesk",
'50001063': "Card Issue Number does not correspond to range or not present",
'50001064': "Start Date not valid or not present",
'50001066': "Format of CVC code invalid",
'50001067': "The merchant is not enrolled for 3D-Secure",
'50001068': "The card number or account number (PAN) is invalid",
'50001069': "Invalid check for CardID and Brand",
'50001070': "The ECI value given is either not supported, or in conflict with other data in the transaction",
'50001071': "Incomplete TRN demat",
'50001072': "Incomplete PAY demat",
'50001073': "No demat APP",
'50001074': "Authorisation too old",
'50001075': "VERRes was an error message",
'50001076': "DCP amount greater than authorisation amount",
'50001077': "Details negative amount",
'50001078': "Details negative quantity",
'50001079': "Could not decode/decompress received PARes (3D-Secure)",
'50001080': "Received PARes was an erereor message from ACS (3D-Secure)",
'50001081': "Received PARes format was invalid according to the 3DS specifications (3D-Secure)",
'50001082': "PAReq/PARes reconciliation failure (3D-Secure)",
'50001084': "Maximum amount reached",
'50001087': "The transaction type requires authentication, please check with your bank.",
'50001090': "CVC missing at input, but CVC check asked",
'50001091': "ZIP missing at input, but ZIP check asked",
'50001092': "Address missing at input, but Address check asked",
'50001095': "Invalid date of birth",
'50001096': "Invalid commodity code",
'50001097': "The requested currency and brand are incompatible.",
'50001111': "Data validation error",
'50001113': "This order has already been processed",
'50001114': "Error pre-payment check page access",
'50001115': "Request not received in secure mode",
'50001116': "Unknown IP address origin",
'50001117': "NO IP address origin",
'50001118': "Pspid not found or not correct",
'50001119': "Password incorrect or disabled due to numbers of errors",
'50001120': "Invalid currency",
'50001121': "Invalid number of decimals for the currency",
'50001122': "Currency not accepted by the merchant",
'50001123': "Card type not active",
'50001124': "Number of lines don't match with number of payments",
'50001125': "Format validation error",
'50001126': "Overflow in data capture requests for the original order",
'50001127': "The original order is not in a correct status",
'50001128': "missing authorization code for unauthorized order",
'50001129': "Overflow in refunds requests",
'50001130': "Error access to original order",
'50001131': "Error access to original history item",
'50001132': "The Selected Catalog is empty",
'50001133': "Duplicate request",
'50001134': "Authentication failed, please retry or cancel.",
'50001135': "Authentication temporary unavailable, please retry or cancel.",
'50001136': "Technical problem with your browser, please retry or cancel",
'50001137': "Your bank access control server is temporary unavailable, please retry or cancel",
'50001150': "Fraud Detection, Technical error (IP not valid)",
'50001151': "Fraud detection : technical error (IPCTY unknown or error)",
'50001152': "Fraud detection : technical error (CCCTY unknown or error)",
'50001153': "Overflow in redo-authorisation requests",
'50001170': "Dynamic BIN check failed",
'50001171': "Dynamic country check failed",
'50001172': "Error in Amadeus signature",
'50001174': "Card Holder Name is too long",
'50001175': "Name contains invalid characters",
'50001176': "Card number is too long",
'50001177': "Card number contains non-numeric info",
'50001178': "Card Number Empty",
'50001179': "CVC too long",
'50001180': "CVC contains non-numeric info",
'50001181': "Expiration date contains non-numeric info",
'50001182': "Invalid expiration month",
'50001183': "Expiration date must be in the future",
'50001184': "SHA Mismatch",
'50001205': "Missing mandatory fields for billing address.",
'50001206': "Missing mandatory field date of birth.",
'50001207': "Missing required shopping basket details.",
'50001208': "Missing social security number",
'50001209': "Invalid country code",
'50001210': "Missing yearly salary",
'50001211': "Missing gender",
'50001212': "Missing email",
'50001213': "Missing IP address",
'50001214': "Missing part payment campaign ID",
'50001215': "Missing invoice number",
'50001216': "The alias must be different than the card number",
'60000001': "account number unknown",
'60000003': "not credited dd-mm-yy",
'60000005': "name/number do not correspond",
'60000007': "account number blocked",
'60000008': "specific direct debit block",
'60000009': "account number WKA",
'60000010': "administrative reason",
'60000011': "account number expired",
'60000012': "no direct debit authorisation given",
'60000013': "debit not approved",
'60000014': "double payment",
'60000018': "name/address/city not entered",
'60001001': "no original direct debit for revocation",
'60001002': "payer’s account number format error",
'60001004': "payer’s account at different bank",
'60001005': "payee’s account at different bank",
'60001006': "payee’s account number format error",
'60001007': "payer’s account number blocked",
'60001008': "payer’s account number expired",
'60001009': "payee’s account number expired",
'60001010': "direct debit not possible",
'60001011': "creditor payment not possible",
'60001012': "payer’s account number unknown WKA-number",
'60001013': "payee’s account number unknown WKA-number",
'60001014': "impermissible WKA transaction",
'60001015': "period for revocation expired",
'60001017': "reason for revocation not correct",
'60001018': "original run number not numeric",
'60001019': "payment ID incorrect",
'60001020': "amount not numeric",
'60001021': "amount zero not permitted",
'60001022': "negative amount not permitted",
'60001023': "payer and payee giro account number",
'60001025': "processing code (verwerkingscode) incorrect",
'60001028': "revocation not permitted",
'60001029': "guaranteed direct debit on giro account number",
'60001030': "NBC transaction type incorrect",
'60001031': "description too large",
'60001032': "book account number not issued",
'60001034': "book account number incorrect",
'60001035': "payer’s account number not numeric",
'60001036': "payer’s account number not eleven-proof",
'60001037': "payer’s account number not issued",
'60001039': "payer’s account number of DNB/BGC/BLA",
'60001040': "payee’s account number not numeric",
'60001041': "payee’s account number not eleven-proof",
'60001042': "payee’s account number not issued",
'60001044': "payee’s account number unknown",
'60001050': "payee’s name missing",
'60001051': "indicate payee’s bank account number instead of 3102",
'60001052': "no direct debit contract",
'60001053': "amount beyond bounds",
'60001054': "selective direct debit block",
'60001055': "original run number unknown",
'60001057': "payer’s name missing",
'60001058': "payee’s account number missing",
'60001059': "restore not permitted",
'60001060': "bank’s reference (navraaggegeven) missing",
'60001061': "BEC/GBK number incorrect",
'60001062': "BEC/GBK code incorrect",
'60001087': "book account number not numeric",
'60001090': "cancelled on request",
'60001091': "cancellation order executed",
'60001092': "cancelled instead of bended",
'60001093': "book account number is a shortened account number",
'60001094': "instructing party account number not identical with payer",
'60001095': "payee unknown GBK acceptor",
'60001097': "instructing party account number not identical with payee",
'60001099': "clearing not permitted",
'60001101': "payer’s account number not spaces",
'60001102': "PAN length not numeric",
'60001103': "PAN length outside limits",
'60001104': "track number not numeric",
'60001105': "track number not valid",
'60001106': "PAN sequence number not numeric",
'60001107': "domestic PAN not numeric",
'60001108': "domestic PAN not eleven-proof",
'60001109': "domestic PAN not issued",
'60001110': "foreign PAN not numeric",
'60001111': "card valid date not numeric",
'60001112': "book period number (boekperiodenr) not numeric",
'60001113': "transaction number not numeric",
'60001114': "transaction time not numeric",
'60001115': "transaction no valid time",
'60001116': "transaction date not numeric",
'60001117': "transaction no valid date",
'60001118': "STAN not numeric",
'60001119': "instructing party’s name missing",
'60001120': "foreign amount (bedrag-vv) not numeric",
'60001122': "rate (verrekenkoers) not numeric",
'60001125': "number of decimals (aantaldecimalen) incorrect",
'60001126': "tariff (tarifering) not B/O/S",
'60001127': "domestic costs (kostenbinnenland) not numeric",
'60001128': "domestic costs (kostenbinnenland) not higher than zero",
'60001129': "foreign costs (kostenbuitenland) not numeric",
'60001130': "foreign costs (kostenbuitenland) not higher than zero",
'60001131': "domestic costs (kostenbinnenland) not zero",
'60001132': "foreign costs (kostenbuitenland) not zero",
'60001134': "Euro record not fully filled in",
'60001135': "Client currency incorrect",
'60001136': "Amount NLG not numeric",
'60001137': "Amount NLG not higher than zero",
'60001138': "Amount NLG not equal to Amount",
'60001139': "Amount NLG incorrectly converted",
'60001140': "Amount EUR not numeric",
'60001141': "Amount EUR not greater than zero",
'60001142': "Amount EUR not equal to Amount",
'60001143': "Amount EUR incorrectly converted",
'60001144': "Client currency not NLG",
'60001145': "rate euro-vv (Koerseuro-vv) not numeric",
'60001146': "comma rate euro-vv (Kommakoerseuro-vv) incorrect",
'60001147': "acceptgiro distributor not valid",
'60001148': "Original run number and/or BRN are missing",
'60001149': "Amount/Account number/ BRN different",
'60001150': "Direct debit already revoked/restored",
'60001151': "Direct debit already reversed/revoked/restored",
'60001153': "Payer’s account number not known",
}
DATA_VALIDATION_ERROR = '50001111'
def retryable(error):
return error in [
'0020001001', '0020001002', '0020001003', '0020001004', '0020001005',
'0020001006', '0020001007', '0020001008', '0020001009', '0020001010',
'30001010', '30001011', '30001015',
'30001057', '30001058',
'30001998', '30001999',
#'30611001', # amount exceeds card limit
'30961001',
'40001001', '40001002', '40001003', '40001004', '40001005',
'40001006', '40001007', '40001008', '40001009', '40001010',
'40001012',
'40001018', '40001019', '40001020',
'40001134', '40001135', '40001136', '40001137',
#'50001174', # cardholder name too long
]
| agpl-3.0 | 8,211,988,891,751,535,000 | 59.875252 | 266 | 0.70785 | false |
louisswarren/hieretikz | drinkerclass.py | 1 | 6686 | import subprocess
from hierarchyclass import *
from tikzify import *
formulae = 'tt lem wlem dgp glpoa gmp dp he dnsu dnse ud'.split()
globals().update({f: f for f in formulae})
efq = 'efq'
globals().update({future: future for future in
'dpn glpon mgmp glpon'.split()})
# These are actually equivalent.
ip = he
glpo = lem
hen = dpn
wgmp = dnsu
formula_layout = '''\
glpoa
lem
dp he
ud gmp dgp
dnsu dnse
wlem
'''
formula_strs = {f: f.upper() for f in formulae}
formula_strs[dnse] = R'DNS$\exists$'
formula_strs[glpoa] = "GLPO$'$"
formula_strs[glpon] = R'GLPO$_\neg$'
formula_strs[dnsu] = R'DNS$\forall$,WGMP'
formula_strs[lem] = R'LEM,GLPO'
formula_strs[dpn] = R'DP$_\lnot$,HE$_\lnot$'
unnamed_proofs = {
# (he, ip), (ip, he),
# (lem, glpo), (glpo, lem),
# (dpn, hen), (hen, dpn),
# (dnsu, wgmp), (wgmp, dnsu),
(lem, wlem),
(dp, dpn),
(he, hen),
(gmp, wgmp),
(dgp, wlem),
(glpoa, lem),
(glpoa, gmp),
(dp, ud),
(dp, gmp),
# (dp, dnsu),
(glpo, dpn),
(he, dnse),
(glpo, dnse),
(gmp, dnse),
(dpn, dnse),
# (glpoa, wgmp),
(dp, efq, tt, dgp),
(he, efq, tt, dgp),
# (dp, tt, wlem),
(he, tt, wlem),
(gmp, tt, wlem),
(dp, lem, glpoa),
# (gmp, lem, glpoa), # Not in tome
(dnse, tt, wlem),
(gmp, mgmp), (glpo, glpon), (glpon, wlem), (glpon, dnse), # Speculation
}
# EFQ isn't on the diagram, so these won't be plotted
unnamed_proofs.update({(efq, lem, f) for f in formulae if f not in (efq, lem, tt)})
proofs = {p: '{}-{}'.format(','.join(p[:-1]), p[-1]) for p in unnamed_proofs}
named_models = {
'dp-cm': (
{tt, efq, he, dgp, wlem, glpon, ud},
{dp, lem, dnsu, wgmp, mgmp},
),
'dp-cm-lobot': (
{tt, he, lem, dpn, hen, dgp, wlem, dnsu, dnse, glpo, glpoa, glpon, gmp, ud},
{dp},
),
'he-cm': (
{tt, efq, dp, dgp, wlem, glpon, ud},
{he, lem},
),
'he-cm-lobot': (
{tt, dp, lem, dpn, hen, dgp, wlem, dnsu, dnse, glpo, glpoa, glpon, gmp, ud},
{he},
),
'linear-growing-terms': (
{tt, efq, wlem, dgp},
{dp, he, lem, dnse, glpoa, ud},
),
'two-world-constant-terms': (
{tt, efq, dp, he, wlem, dgp, ud},
{lem},
),
'two-world-growing-terms': (
{tt, efq, wlem, dgp, wgmp},
{glpoa, dp, he, dpn, hen, gmp, dnse, glpon, ud},
),
'two-world-growing-terms-lobot': (
{tt, gmp, glpoa},
{ud},
),
'two-world-growing-terms-with-bot': (
{tt, lem, wlem, dgp},
{glpoa, dp, he, gmp, wgmp, ud, mgmp},
),
'v-const-term': (
{tt, efq, dnsu, ud},
{wlem, dgp, dnse},
),
'v-const-term-lobot': (
{tt, glpoa, lem, dpn, hen, gmp, dnse, glpon, ud},
{dgp},
),
'diamond-constant-terms': (
{tt, efq, wlem, gmp, ud},
{dgp, lem},
),
'beth-width-two': (
{lem, he, dp},
set(),
),
'one-term-v': (
{efq, dp, he},
{wlem, dgp},
),
'one-term-v-lobot': (
{tt, dp, he},
{dgp},
),
'one-term-v-lem': (
{dp, he, lem, ud, glpoa},
{dgp},
),
'trivial-lobot': (
{f for f in formulae if f is not efq},
{efq},
),
'one-world-one-term': (
{f for f in formulae if f is not tt} | {efq},
{tt},
),
'non-full-dp-cm-with-single-term-root': (
{he, efq},
{ud},
),
'non-full-dp-cm-with-single-term-root-lem': (
{tt, he, lem},
{ud},
),
}
models = [(k, *map(frozenset, v)) for k, v in named_models.items()]
if __name__ == '__main__':
h = Hierarchy((Arrow(tails, head) for *tails, head in unnamed_proofs),
(Tier(low, high, name) for name, (low, high) in named_models.items()))
qarrows = h.find_qarrows(set(formulae))
ev_qarrows = {arrow.edge: h.evaluate_qarrow(arrow, set(formulae), 1) for arrow in qarrows}
minimal_diagram = TikzHierarchy(name_dict=formula_strs)
minimal_diagram.add_string_node_layout(formula_layout)
minimal_diagram.add_edges((set(proofs)), color=False)
minimal_diagram.add_edges(set(arrow.edge for arrow in qarrows), 'dashed')
qarrows2 = h.find_qarrows(set(formulae), 2)
ev_qarrows2 = {arrow.edge: h.evaluate_qarrow(arrow, set(formulae), 2) for arrow in qarrows2}
minimal_diagram2 = TikzHierarchy(name_dict=formula_strs)
minimal_diagram2.add_string_node_layout(formula_layout)
minimal_diagram2.add_edges((set(proofs)), color=False)
minimal_diagram2.add_edges(set(arrow.edge for arrow in qarrows2), 'dashed')
inth = h.under_quotient(efq)
int_qarrows = inth.find_qarrows(set(formulae) - {efq})
int_ev_qarrows = {arrow.edge: inth.evaluate_qarrow(arrow, set(formulae), 1) for arrow in int_qarrows}
int_diagram = TikzHierarchy(name_dict=formula_strs)
int_diagram.add_string_node_layout(formula_layout)
int_diagram.add_edges(set(arrow.edge for arrow in inth.arrows), color=False)
int_diagram.add_edges(set(arrow.edge for arrow in int_qarrows), 'dashed')
tth = h.under_quotient(tt)
tt_qarrows = tth.find_qarrows(set(formulae) - {tt})
tt_ev_qarrows = {arrow.edge: tth.evaluate_qarrow(arrow, set(formulae), 1) for arrow in tt_qarrows}
tt_diagram = TikzHierarchy(name_dict=formula_strs)
tt_diagram.add_string_node_layout(formula_layout)
tt_diagram.add_edges(set(arrow.edge for arrow in tth.arrows), color=False)
tt_diagram.add_edges(set(arrow.edge for arrow in tt_qarrows), 'dashed')
tex = make_sections(
('Minimal Logic', minimal_diagram),
('Investigations ({})'.format(len(qarrows)),
make_columns(make_connections_list(ev_qarrows)), 1),
('Minimal Logic 2', minimal_diagram2),
('Investigations ({})'.format(len(qarrows2)),
make_columns(make_connections_list(ev_qarrows2)), 1),
('Intuitionistic Logic', int_diagram),
('Investigations ({})'.format(len(int_qarrows)),
make_columns(make_connections_list(int_ev_qarrows)), 1),
('Two-termed semantics', tt_diagram),
('Investigations ({})'.format(len(tt_qarrows)),
make_columns(make_connections_list(tt_ev_qarrows)), 1),
)
document = make_latex_document(tex)
with open('drinker.tex', 'w') as f:
f.write(document)
subprocess.call(['pdflatex', 'drinker.tex'], stdout=subprocess.DEVNULL)
#with open('backdrinker.tex', 'r') as f:
# assert(f.read() == document)
| mit | 2,143,332,116,224,817,700 | 30.389671 | 105 | 0.554592 | false |
waynegm/OpendTect-External-Attributes | Python_3/Filtering/ex_prewitt.py | 3 | 1093 | # Prewitt External Attribute
import sys,os
import numpy as np
from scipy.ndimage import prewitt
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
xa.params = {
'Inputs': ['Input'],
'Output' : ['Average Gradient', 'In-line gradient', 'Cross-line gradient', 'Z gradient'],
'ZSampMargin' : {'Value': [-1,1], 'Hidden': True},
'StepOut' : {'Value': [1,1], 'Hidden': True},
'Parallel' : False,
'Help' : 'http://waynegm.github.io/OpendTect-Plugin-Docs/Attributes/ExternalAttrib/'
}
def doCompute():
inlpos = xa.SI['nrinl']//2
crlpos = xa.SI['nrcrl']//2
while True:
xa.doInput()
indata = xa.Input['Input']
xa.Output['In-line gradient'] = prewitt(indata, axis=0)[inlpos,crlpos,:]
xa.Output['Cross-line gradient'] = prewitt(indata, axis=1)[inlpos,crlpos,:]
xa.Output['Z gradient'] = prewitt(indata, axis=2)[inlpos,crlpos,:]
xa.Output['Average Gradient'] = ( xa.Output['In-line gradient']
+ xa.Output['Cross-line gradient']
+ xa.Output['Z gradient'] )/3
xa.doOutput()
xa.doCompute = doCompute
xa.run(sys.argv[1:])
| mit | -7,116,982,936,372,556,000 | 30.228571 | 90 | 0.645929 | false |
josenavas/QiiTa | qiita_pet/test/test_download.py | 1 | 13165 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from mock import Mock
from os.path import exists, isdir, join, basename
from os import remove, makedirs, close
from shutil import rmtree
from tempfile import mkdtemp, mkstemp
from biom.util import biom_open
from biom import example_table as et
from qiita_pet.test.tornado_test_base import TestHandlerBase
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_db.user import User
from qiita_db.study import Study
from qiita_db.artifact import Artifact
from qiita_db.software import Parameters, Command
class TestDownloadHandler(TestHandlerBase):
def setUp(self):
super(TestDownloadHandler, self).setUp()
self._clean_up_files = []
def tearDown(self):
super(TestDownloadHandler, self).tearDown()
for fp in self._clean_up_files:
if exists(fp):
if isdir(fp):
rmtree(fp)
else:
remove(fp)
def test_download(self):
# check success
response = self.get('/download/1')
self.assertEqual(response.code, 200)
self.assertEqual(response.body, (
"This installation of Qiita was not equipped with nginx, so it "
"is incapable of serving files. The file you attempted to "
"download is located at raw_data/1_s_G1_L001_sequences.fastq.gz"))
# failure
response = self.get('/download/1000')
self.assertEqual(response.code, 403)
# directory
a = Artifact(1)
fd, fp = mkstemp(suffix='.html')
close(fd)
with open(fp, 'w') as f:
f.write('\n')
self._clean_up_files.append(fp)
dirpath = mkdtemp()
fd, fp2 = mkstemp(suffix='.txt', dir=dirpath)
close(fd)
with open(fp2, 'w') as f:
f.write('\n')
self._clean_up_files.append(dirpath)
a.set_html_summary(fp, support_dir=dirpath)
for fp_id, _, fp_type in a.filepaths:
if fp_type == 'html_summary_dir':
break
response = self.get('/download/%d' % fp_id)
self.assertEqual(response.code, 200)
fp_name = basename(fp2)
dirname = basename(dirpath)
self.assertEqual(
response.body, "- 1 /protected/FASTQ/1/%s/%s FASTQ/1/%s/%s\n"
% (dirname, fp_name, dirname, fp_name))
class TestDownloadStudyBIOMSHandler(TestHandlerBase):
def setUp(self):
super(TestDownloadStudyBIOMSHandler, self).setUp()
self._clean_up_files = []
def tearDown(self):
super(TestDownloadStudyBIOMSHandler, self).tearDown()
for fp in self._clean_up_files:
if exists(fp):
if isdir(fp):
rmtree(fp)
else:
remove(fp)
def test_download_study(self):
tmp_dir = mkdtemp()
self._clean_up_files.append(tmp_dir)
biom_fp = join(tmp_dir, 'otu_table.biom')
smr_dir = join(tmp_dir, 'sortmerna_picked_otus')
log_dir = join(smr_dir, 'seqs_otus.log')
tgz = join(tmp_dir, 'sortmerna_picked_otus.tgz')
with biom_open(biom_fp, 'w') as f:
et.to_hdf5(f, "test")
makedirs(smr_dir)
with open(log_dir, 'w') as f:
f.write('\n')
with open(tgz, 'w') as f:
f.write('\n')
files_biom = [(biom_fp, 'biom'), (smr_dir, 'directory'), (tgz, 'tgz')]
params = Parameters.from_default_params(
Command(3).default_parameter_sets.next(), {'input_data': 1})
a = Artifact.create(files_biom, "BIOM", parents=[Artifact(2)],
processing_parameters=params)
for _, fp, _ in a.filepaths:
self._clean_up_files.append(fp)
response = self.get('/download_study_bioms/1')
self.assertEqual(response.code, 200)
exp = (
'- 1256812 /protected/processed_data/1_study_1001_closed_'
'reference_otu_table.biom processed_data/1_study_1001_closed_'
'reference_otu_table.biom\n'
'- 36615 /protected/templates/1_prep_1_qiime_[0-9]*-'
'[0-9]*.txt mapping_files/4_mapping_file.txt\n'
'- 1256812 /protected/processed_data/'
'1_study_1001_closed_reference_otu_table.biom processed_data/'
'1_study_1001_closed_reference_otu_table.biom\n'
'- 36615 /protected/templates/1_prep_1_qiime_[0-9]*-'
'[0-9]*.txt mapping_files/5_mapping_file.txt\n'
'- 1256812 /protected/processed_data/'
'1_study_1001_closed_reference_otu_table_Silva.biom processed_data'
'/1_study_1001_closed_reference_otu_table_Silva.biom\n'
'- 36615 /protected/templates/1_prep_1_qiime_[0-9]*-'
'[0-9]*.txt mapping_files/6_mapping_file.txt\n'
'- 36615 /protected/templates/1_prep_2_qiime_[0-9]*-'
'[0-9]*.txt mapping_files/7_mapping_file.txt\n'
'- [0-9]* /protected/BIOM/{0}/otu_table.biom '
'BIOM/{0}/otu_table.biom\n'
'- 1 /protected/BIOM/{0}/sortmerna_picked_otus/seqs_otus.log '
'BIOM/{0}/sortmerna_picked_otus/seqs_otus.log\n'
'- 36615 /protected/templates/1_prep_1_qiime_[0-9]*-[0-9]*.'
'txt mapping_files/{0}_mapping_file.txt\n'.format(a.id))
self.assertRegexpMatches(response.body, exp)
response = self.get('/download_study_bioms/200')
self.assertEqual(response.code, 405)
# changing user so we can test the failures
BaseHandler.get_current_user = Mock(
return_value=User("[email protected]"))
response = self.get('/download_study_bioms/1')
self.assertEqual(response.code, 405)
a.visibility = 'public'
response = self.get('/download_study_bioms/1')
self.assertEqual(response.code, 200)
exp = (
'- [0-9]* /protected/BIOM/{0}/otu_table.biom '
'BIOM/{0}/otu_table.biom\n'
'- 1 /protected/BIOM/{0}/sortmerna_picked_otus/seqs_otus.log '
'BIOM/{0}/sortmerna_picked_otus/seqs_otus.log\n'
'- 36615 /protected/templates/1_prep_1_qiime_[0-9]*-[0-9]*.'
'txt mapping_files/{0}_mapping_file.txt\n'.format(a.id))
self.assertRegexpMatches(response.body, exp)
class TestDownloadRelease(TestHandlerBase):
def setUp(self):
super(TestDownloadRelease, self).setUp()
def tearDown(self):
super(TestDownloadRelease, self).tearDown()
def test_download(self):
# check success
response = self.get('/release/download/1')
self.assertEqual(response.code, 200)
self.assertIn(
"This installation of Qiita was not equipped with nginx, so it is "
"incapable of serving files. The file you attempted to download "
"is located at", response.body)
class TestDownloadRawData(TestHandlerBase):
def setUp(self):
super(TestDownloadRawData, self).setUp()
self._clean_up_files = []
def tearDown(self):
super(TestDownloadRawData, self).tearDown()
for fp in self._clean_up_files:
if exists(fp):
if isdir(fp):
rmtree(fp)
else:
remove(fp)
def test_download_raw_data(self):
# it's possible that one of the tests is deleting the raw data
# so we will make sure that the files exists so this test passes
all_files = [fp for a in Study(1).artifacts()
for _, fp, _ in a.filepaths]
for fp in all_files:
if not exists(fp):
with open(fp, 'w') as f:
f.write('')
response = self.get('/download_raw_data/1')
self.assertEqual(response.code, 200)
exp = (
'- 58 /protected/raw_data/1_s_G1_L001_sequences.fastq.gz '
'raw_data/1_s_G1_L001_sequences.fastq.gz\n'
'- 58 /protected/raw_data/1_s_G1_L001_sequences_barcodes.fastq.gz '
'raw_data/1_s_G1_L001_sequences_barcodes.fastq.gz\n'
'- 36615 /protected/templates/1_prep_1_qiime_[0-9]*-[0-9]*.txt '
'mapping_files/1_mapping_file.txt\n'
'- 36615 /protected/templates/1_prep_2_qiime_[0-9]*-[0-9]*.txt '
'mapping_files/7_mapping_file.txt\n')
self.assertRegexpMatches(response.body, exp)
response = self.get('/download_study_bioms/200')
self.assertEqual(response.code, 405)
# changing user so we can test the failures
BaseHandler.get_current_user = Mock(
return_value=User("[email protected]"))
response = self.get('/download_study_bioms/1')
self.assertEqual(response.code, 405)
class TestDownloadEBISampleAccessions(TestHandlerBase):
def setUp(self):
super(TestDownloadEBISampleAccessions, self).setUp()
def tearDown(self):
super(TestDownloadEBISampleAccessions, self).tearDown()
def test_download(self):
# check success
response = self.get('/download_ebi_accessions/samples/1')
exp = ("sample_name\tsample_accession\n1.SKB2.640194\tERS000008\n"
"1.SKM4.640180\tERS000004\n1.SKB3.640195\tERS000024\n"
"1.SKB6.640176\tERS000025\n1.SKD6.640190\tERS000007\n"
"1.SKM6.640187\tERS000022\n1.SKD9.640182\tERS000019\n"
"1.SKM8.640201\tERS000014\n1.SKM2.640199\tERS000015\n"
"1.SKD2.640178\tERS000009\n1.SKB7.640196\tERS000002\n"
"1.SKD4.640185\tERS000023\n1.SKB8.640193\tERS000000\n"
"1.SKM3.640197\tERS000018\n1.SKD5.640186\tERS000017\n"
"1.SKB1.640202\tERS000011\n1.SKM1.640183\tERS000025\n"
"1.SKD1.640179\tERS000012\n1.SKD3.640198\tERS000013\n"
"1.SKB5.640181\tERS000006\n1.SKB4.640189\tERS000020\n"
"1.SKB9.640200\tERS000016\n1.SKM9.640192\tERS000003\n"
"1.SKD8.640184\tERS000001\n1.SKM5.640177\tERS000005\n"
"1.SKM7.640188\tERS000010\n1.SKD7.640191\tERS000021")
self.assertEqual(response.code, 200)
self.assertRegexpMatches(response.body, exp)
# changing user so we can test the failures
BaseHandler.get_current_user = Mock(
return_value=User("[email protected]"))
response = self.get('/download_ebi_accessions/samples/1')
self.assertEqual(response.code, 405)
class TestDownloadEBIPrepAccessions(TestHandlerBase):
def setUp(self):
super(TestDownloadEBIPrepAccessions, self).setUp()
def tearDown(self):
super(TestDownloadEBIPrepAccessions, self).tearDown()
def test_download(self):
# check success
response = self.get('/download_ebi_accessions/experiments/1')
exp = ("sample_name\texperiment_accession\n1.SKB2.640194\tERX0000008\n"
"1.SKM4.640180\tERX0000004\n1.SKB3.640195\tERX0000024\n"
"1.SKB6.640176\tERX0000025\n1.SKD6.640190\tERX0000007\n"
"1.SKM6.640187\tERX0000022\n1.SKD9.640182\tERX0000019\n"
"1.SKM8.640201\tERX0000014\n1.SKM2.640199\tERX0000015\n"
"1.SKD2.640178\tERX0000009\n1.SKB7.640196\tERX0000002\n"
"1.SKD4.640185\tERX0000023\n1.SKB8.640193\tERX0000000\n"
"1.SKM3.640197\tERX0000018\n1.SKD5.640186\tERX0000017\n"
"1.SKB1.640202\tERX0000011\n1.SKM1.640183\tERX0000026\n"
"1.SKD1.640179\tERX0000012\n1.SKD3.640198\tERX0000013\n"
"1.SKB5.640181\tERX0000006\n1.SKB4.640189\tERX0000020\n"
"1.SKB9.640200\tERX0000016\n1.SKM9.640192\tERX0000003\n"
"1.SKD8.640184\tERX0000001\n1.SKM5.640177\tERX0000005\n"
"1.SKM7.640188\tERX0000010\n1.SKD7.640191\tERX0000021")
self.assertEqual(response.code, 200)
self.assertRegexpMatches(response.body, exp)
# changing user so we can test the failures
BaseHandler.get_current_user = Mock(
return_value=User("[email protected]"))
response = self.get('/download_ebi_accessions/experiments/1')
self.assertEqual(response.code, 405)
class TestDownloadUpload(TestHandlerBase):
def setUp(self):
super(TestDownloadUpload, self).setUp()
def tearDown(self):
super(TestDownloadUpload, self).tearDown()
def test_download(self):
# check failure
response = self.get('/download_upload/1/uploaded_file.txt')
self.assertEqual(response.code, 403)
# check success
BaseHandler.get_current_user = Mock(return_value=User("[email protected]"))
response = self.get('/download_upload/1/uploaded_file.txt')
self.assertEqual(response.code, 200)
if __name__ == '__main__':
main()
| bsd-3-clause | 7,230,521,670,577,990,000 | 39.015198 | 79 | 0.599468 | false |
iut-ibk/DynaMind-UrbanSim | 3rdparty/opus/src/urbansim_parcel/job_x_building/same_sector_employment_in_building.py | 2 | 2096 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.abstract_variables.abstract_number_of_agents_with_same_attribute_value import abstract_number_of_agents_with_same_attribute_value
class same_sector_employment_in_building(abstract_number_of_agents_with_same_attribute_value):
""""""
agent_attribute_name = "job.sector_id"
agent_dependencies = []
choice_set_dependencies = []
#unique_agent_attribute_value = range(1, 20)
geography_dataset_name = 'building'
## use default
#expression_agents_of_attribute_by_geography = "'agents_of_attribute_%(agent_attribute_value)s = %(geography_dataset_name)s.aggregate(%(agent_attribute_name)s==%(agent_attribute_value)s)'"
from opus_core.tests import opus_unittest
from opus_core.tests.utils.variable_tester import VariableTester
from numpy import arange, array
from numpy import ma
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs(self):
tester = VariableTester(
__file__,
package_order=['urbansim_parcel', 'urbansim', 'opus_core'],
test_data={
"job":{
'job_id': array([1, 2, 3, 4, 5, 6]),
'building_id':array([1, 1, 5, 3, 3, 3]),
'sector_id': array([1, 1, 2, 1, 3, 3]),
},
"building":{
'building_id': array([1, 2, 3, 4, 5,]),
},
})
## mind the mirror of gridcells in waling_distance calculus
should_be = array([[2, 0, 1, 0, 0],
[2, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[2, 0, 1, 0, 0],
[0, 0, 2, 0, 0],
[0, 0, 2, 0, 0]])
tester.test_is_close_for_variable_defined_by_this_module(self, should_be)
if __name__=='__main__':
opus_unittest.main()
| gpl-2.0 | -6,012,556,590,974,193,000 | 39.098039 | 192 | 0.525286 | false |
mrf345/FQM | migrations/versions/b41c62db00a1_.py | 1 | 1488 | """ Convert printer `vendor` and `product` to int type. And add `name`.
Revision ID: b41c62db00a1
Revises: d37b1524c3fc
Create Date: 2020-06-06 16:49:00.859545
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b41c62db00a1'
down_revision = 'd37b1524c3fc'
branch_labels = None
depends_on = None
def upgrade():
try:
with op.batch_alter_table('printers') as batch:
batch.alter_column('vendor',
existing_type=sa.VARCHAR(length=100),
type_=sa.Integer(),
existing_nullable=True)
batch.alter_column('product',
existing_type=sa.VARCHAR(length=100),
type_=sa.Integer(),
existing_nullable=True)
batch.add_column(sa.Column('name', sa.String(100), nullable=True))
except Exception:
pass
def downgrade():
with op.batch_alter_table('printers') as batch:
batch.alter_column('vendor',
existing_type=sa.Integer(),
type_=sa.VARCHAR(length=100),
existing_nullable=True)
batch.alter_column('product',
existing_type=sa.Integer(),
type_=sa.VARCHAR(length=100),
existing_nullable=True)
batch.drop_column('name')
| mpl-2.0 | -2,178,126,333,975,643,400 | 32.066667 | 78 | 0.530914 | false |
AOKP/external_chromium_org | ppapi/c/documentation/doxy_cleanup.py | 177 | 4451 | #!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''This utility cleans up the html files as emitted by doxygen so
that they are suitable for publication on a Google documentation site.
'''
import optparse
import os
import re
import shutil
import string
import sys
try:
from BeautifulSoup import BeautifulSoup, Tag
except (ImportError, NotImplementedError):
print ("This tool requires the BeautifulSoup package "
"(see http://www.crummy.com/software/BeautifulSoup/).\n"
"Make sure that the file BeautifulSoup.py is either in this directory "
"or is available in your PYTHON_PATH")
raise
class HTMLFixer(object):
'''This class cleans up the html strings as produced by Doxygen
'''
def __init__(self, html):
self.soup = BeautifulSoup(html)
def FixTableHeadings(self):
'''Fixes the doxygen table headings.
This includes:
- Using bare <h2> title row instead of row embedded in <tr><td> in table
- Putting the "name" attribute into the "id" attribute of the <tr> tag.
- Splitting up tables into multiple separate tables if a table
heading appears in the middle of a table.
For example, this html:
<table>
<tr><td colspan="2"><h2><a name="pub-attribs"></a>
Data Fields List</h2></td></tr>
...
</table>
would be converted to this:
<h2>Data Fields List</h2>
<table>
...
</table>
'''
table_headers = []
for tag in self.soup.findAll('tr'):
if tag.td and tag.td.h2 and tag.td.h2.a and tag.td.h2.a['name']:
#tag['id'] = tag.td.h2.a['name']
tag.string = tag.td.h2.a.next
tag.name = 'h2'
table_headers.append(tag)
# reverse the list so that earlier tags don't delete later tags
table_headers.reverse()
# Split up tables that have multiple table header (th) rows
for tag in table_headers:
print "Header tag: %s is %s" % (tag.name, tag.string.strip())
# Is this a heading in the middle of a table?
if tag.findPreviousSibling('tr') and tag.parent.name == 'table':
print "Splitting Table named %s" % tag.string.strip()
table = tag.parent
table_parent = table.parent
table_index = table_parent.contents.index(table)
new_table = Tag(self.soup, name='table', attrs=table.attrs)
table_parent.insert(table_index + 1, new_table)
tag_index = table.contents.index(tag)
for index, row in enumerate(table.contents[tag_index:]):
new_table.insert(index, row)
# Now move the <h2> tag to be in front of the <table> tag
assert tag.parent.name == 'table'
table = tag.parent
table_parent = table.parent
table_index = table_parent.contents.index(table)
table_parent.insert(table_index, tag)
def RemoveTopHeadings(self):
'''Removes <div> sections with a header, tabs, or navpath class attribute'''
header_tags = self.soup.findAll(
name='div',
attrs={'class' : re.compile('^(header|tabs[0-9]*|navpath)$')})
[tag.extract() for tag in header_tags]
def FixAll(self):
self.FixTableHeadings()
self.RemoveTopHeadings()
def __str__(self):
return str(self.soup)
def main():
'''Main entry for the doxy_cleanup utility
doxy_cleanup takes a list of html files and modifies them in place.'''
parser = optparse.OptionParser(usage='Usage: %prog [options] files...')
parser.add_option('-m', '--move', dest='move', action='store_true',
default=False, help='move html files to "original_html"')
options, files = parser.parse_args()
if not files:
parser.print_usage()
return 1
for filename in files:
try:
with open(filename, 'r') as file:
html = file.read()
print "Processing %s" % filename
fixer = HTMLFixer(html)
fixer.FixAll()
with open(filename, 'w') as file:
file.write(str(fixer))
if options.move:
new_directory = os.path.join(
os.path.dirname(os.path.dirname(filename)), 'original_html')
if not os.path.exists(new_directory):
os.mkdir(new_directory)
shutil.move(filename, new_directory)
except:
print "Error while processing %s" % filename
raise
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 6,580,175,742,424,933,000 | 30.34507 | 80 | 0.638508 | false |
valmynd/MediaFetcher | src/plugins/youtube_dl/test/test_compat.py | 1 | 5072 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.compat import (
compat_getenv,
compat_setenv,
compat_etree_fromstring,
compat_expanduser,
compat_shlex_split,
compat_str,
compat_struct_unpack,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
)
class TestCompat(unittest.TestCase):
def test_compat_getenv(self):
test_str = 'тест'
compat_setenv('YOUTUBE_DL_COMPAT_GETENV', test_str)
self.assertEqual(compat_getenv('YOUTUBE_DL_COMPAT_GETENV'), test_str)
def test_compat_setenv(self):
test_var = 'YOUTUBE_DL_COMPAT_SETENV'
test_str = 'тест'
compat_setenv(test_var, test_str)
compat_getenv(test_var)
self.assertEqual(compat_getenv(test_var), test_str)
def test_compat_expanduser(self):
old_home = os.environ.get('HOME')
test_str = r'C:\Documents and Settings\тест\Application Data'
compat_setenv('HOME', test_str)
self.assertEqual(compat_expanduser('~'), test_str)
compat_setenv('HOME', old_home or '')
def test_all_present(self):
import youtube_dl.compat
all_names = youtube_dl.compat.__all__
present_names = set(filter(
lambda c: '_' in c and not c.startswith('_'),
dir(youtube_dl.compat))) - set(['unicode_literals'])
self.assertEqual(all_names, sorted(present_names))
def test_compat_urllib_parse_unquote(self):
self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def')
self.assertEqual(compat_urllib_parse_unquote('%7e/abc+def'), '~/abc+def')
self.assertEqual(compat_urllib_parse_unquote(''), '')
self.assertEqual(compat_urllib_parse_unquote('%'), '%')
self.assertEqual(compat_urllib_parse_unquote('%%'), '%%')
self.assertEqual(compat_urllib_parse_unquote('%%%'), '%%%')
self.assertEqual(compat_urllib_parse_unquote('%2F'), '/')
self.assertEqual(compat_urllib_parse_unquote('%2f'), '/')
self.assertEqual(compat_urllib_parse_unquote('%E6%B4%A5%E6%B3%A2'), '津波')
self.assertEqual(
compat_urllib_parse_unquote('''<meta property="og:description" content="%E2%96%81%E2%96%82%E2%96%83%E2%96%84%25%E2%96%85%E2%96%86%E2%96%87%E2%96%88" />
%<a href="https://ar.wikipedia.org/wiki/%D8%AA%D8%B3%D9%88%D9%86%D8%A7%D9%85%D9%8A">%a'''),
'''<meta property="og:description" content="▁▂▃▄%▅▆▇█" />
%<a href="https://ar.wikipedia.org/wiki/تسونامي">%a''')
self.assertEqual(
compat_urllib_parse_unquote(
'''%28%5E%E2%97%A3_%E2%97%A2%5E%29%E3%81%A3%EF%B8%BB%E3%83%87%E2%95%90%E4%B8%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%86%B6%I%Break%25Things%'''),
'''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%Things%''')
def test_compat_urllib_parse_unquote_plus(self):
self.assertEqual(compat_urllib_parse_unquote_plus('abc%20def'), 'abc def')
self.assertEqual(compat_urllib_parse_unquote_plus('%7e/abc+def'), '~/abc def')
def test_compat_urllib_parse_urlencode(self):
self.assertEqual(compat_urllib_parse_urlencode({'abc': 'def'}), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode({'abc': b'def'}), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode({b'abc': 'def'}), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode({b'abc': b'def'}), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode([('abc', 'def')]), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode([('abc', b'def')]), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode([(b'abc', 'def')]), 'abc=def')
self.assertEqual(compat_urllib_parse_urlencode([(b'abc', b'def')]), 'abc=def')
def test_compat_shlex_split(self):
self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two'])
self.assertEqual(compat_shlex_split('-option "one\ntwo" \n -flag'), ['-option', 'one\ntwo', '-flag'])
self.assertEqual(compat_shlex_split('-val 中文'), ['-val', '中文'])
def test_compat_etree_fromstring(self):
xml = '''
<root foo="bar" spam="中文">
<normal>foo</normal>
<chinese>中文</chinese>
<foo><bar>spam</bar></foo>
</root>
'''
doc = compat_etree_fromstring(xml.encode('utf-8'))
self.assertTrue(isinstance(doc.attrib['foo'], compat_str))
self.assertTrue(isinstance(doc.attrib['spam'], compat_str))
self.assertTrue(isinstance(doc.find('normal').text, compat_str))
self.assertTrue(isinstance(doc.find('chinese').text, compat_str))
self.assertTrue(isinstance(doc.find('foo/bar').text, compat_str))
def test_compat_etree_fromstring_doctype(self):
xml = '''<?xml version="1.0"?>
<!DOCTYPE smil PUBLIC "-//W3C//DTD SMIL 2.0//EN" "http://www.w3.org/2001/SMIL20/SMIL20.dtd">
<smil xmlns="http://www.w3.org/2001/SMIL20/Language"></smil>'''
compat_etree_fromstring(xml)
def test_struct_unpack(self):
self.assertEqual(compat_struct_unpack('!B', b'\x00'), (0,))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 7,772,701,422,566,580,000 | 40.591667 | 184 | 0.670607 | false |
Bitl/RBXLegacy-src | Cut/RBXLegacyDiscordBot/lib/youtube_dl/extractor/porn91.py | 40 | 1888 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
int_or_none,
ExtractorError,
)
class Porn91IE(InfoExtractor):
IE_NAME = '91porn'
_VALID_URL = r'(?:https?://)(?:www\.|)91porn\.com/.+?\?viewkey=(?P<id>[\w\d]+)'
_TEST = {
'url': 'http://91porn.com/view_video.php?viewkey=7e42283b4f5ab36da134',
'md5': '7fcdb5349354f40d41689bd0fa8db05a',
'info_dict': {
'id': '7e42283b4f5ab36da134',
'title': '18岁大一漂亮学妹,水嫩性感,再爽一次!',
'ext': 'mp4',
'duration': 431,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
self._set_cookie('91porn.com', 'language', 'cn_CN')
webpage = self._download_webpage(
'http://91porn.com/view_video.php?viewkey=%s' % video_id, video_id)
if '作为游客,你每天只可观看10个视频' in webpage:
raise ExtractorError('91 Porn says: Daily limit 10 videos exceeded', expected=True)
title = self._search_regex(
r'<div id="viewvideo-title">([^<]+)</div>', webpage, 'title')
title = title.replace('\n', '')
info_dict = self._parse_html5_media_entries(url, webpage, video_id)[0]
duration = parse_duration(self._search_regex(
r'时长:\s*</span>\s*(\d+:\d+)', webpage, 'duration', fatal=False))
comment_count = int_or_none(self._search_regex(
r'留言:\s*</span>\s*(\d+)', webpage, 'comment count', fatal=False))
info_dict.update({
'id': video_id,
'title': title,
'duration': duration,
'comment_count': comment_count,
'age_limit': self._rta_search(webpage),
})
return info_dict
| gpl-3.0 | 3,025,237,762,915,472,400 | 30.275862 | 95 | 0.551268 | false |
jshum/dd-agent | tests/checks/mock/test_supervisord.py | 37 | 18752 | # stdlib
from socket import socket
import unittest
import xmlrpclib
# 3p
from mock import patch
# project
from checks import AgentCheck
from tests.checks.common import get_check
class TestSupervisordCheck(unittest.TestCase):
TEST_CASES = [{
'yaml': """
init_config:
instances:
- name: server1
host: localhost
port: 9001""",
'expected_instances': [{
'host': 'localhost',
'name': 'server1',
'port': 9001
}],
'expected_metrics': {
'server1': [
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:up']}),
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:down']}),
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:unknown']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:python']}),
('supervisord.process.uptime', 125, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:mysql']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:java']})
]
},
'expected_service_checks': {
'server1': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server1'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.OK,
'tags': ['supervisord_server:server1', 'supervisord_process:mysql'],
'check': 'supervisord.process.status'
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server1', 'supervisord_process:java'],
'check': 'supervisord.process.status'
}, {
'status': AgentCheck.UNKNOWN,
'tags': ['supervisord_server:server1', 'supervisord_process:python'],
'check': 'supervisord.process.status'
}]
}
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9001
user: user
pass: pass
proc_names:
- apache2
- webapp
- name: server1
host: 10.60.130.82""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9001,
'user': 'user',
'pass': 'pass',
'proc_names': ['apache2', 'webapp'],
}, {
'host': '10.60.130.82',
'name': 'server1'
}],
'expected_metrics': {
'server0': [
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:up']}),
('supervisord.process.count', 2, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:down']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:unknown']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'supervisord_process:apache2']}),
('supervisord.process.uptime', 2, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'supervisord_process:webapp']}),
],
'server1': [
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:up']}),
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:down']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'status:unknown']}),
('supervisord.process.uptime', 0, {'type': 'gauge', 'tags': ['supervisord_server:server1', 'supervisord_process:ruby']})
]
},
'expected_service_checks': {
'server0': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server0', 'supervisord_process:apache2'],
'check': 'supervisord.process.status'
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server0', 'supervisord_process:webapp'],
'check': 'supervisord.process.status'
}],
'server1': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server1'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.CRITICAL,
'tags': ['supervisord_server:server1', 'supervisord_process:ruby'],
'check': 'supervisord.process.status'
}]
}
}, {
'yaml': """
init_config:
instances:
- name: server0
host: invalid_host
port: 9009""",
'expected_instances': [{
'name': 'server0',
'host': 'invalid_host',
'port': 9009
}],
'error_message': """Cannot connect to http://invalid_host:9009. Make sure supervisor is running and XML-RPC inet interface is enabled."""
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9010
user: invalid_user
pass: invalid_pass""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9010,
'user': 'invalid_user',
'pass': 'invalid_pass'
}],
'error_message': """Username or password to server0 are incorrect."""
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9001
proc_names:
- mysql
- invalid_process""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9001,
'proc_names': ['mysql', 'invalid_process']
}],
'expected_metrics': {
'server0': [
('supervisord.process.count', 1, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:up']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:down']}),
('supervisord.process.count', 0, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:unknown']}),
('supervisord.process.uptime', 125, {'type': 'gauge', 'tags': ['supervisord_server:server0', 'supervisord_process:mysql']})
]
},
'expected_service_checks': {
'server0': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0', 'supervisord_process:mysql'],
'check': 'supervisord.process.status'
}]
}
}, {
'yaml': """
init_config:
instances:
- name: server0
host: localhost
port: 9001
proc_regex:
- '^mysq.$'
- invalid_process""",
'expected_instances': [{
'name': 'server0',
'host': 'localhost',
'port': 9001,
'proc_regex': ['^mysq.$', 'invalid_process']
}],
'expected_metrics': {
'server0': [
('supervisord.process.count', 1,
{'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:up']}),
('supervisord.process.count', 0,
{'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:down']}),
('supervisord.process.count', 0,
{'type': 'gauge', 'tags': ['supervisord_server:server0', 'status:unknown']}),
('supervisord.process.uptime', 125, {'type': 'gauge',
'tags': ['supervisord_server:server0',
'supervisord_process:mysql']})
]
},
'expected_service_checks': {
'server0': [{
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0'],
'check': 'supervisord.can_connect',
}, {
'status': AgentCheck.OK,
'tags': ['supervisord_server:server0', 'supervisord_process:mysql'],
'check': 'supervisord.process.status'
}]
}
}]
def setUp(self):
self.patcher = patch('xmlrpclib.Server', self.mock_server)
self.patcher.start()
def tearDown(self):
self.patcher.stop()
# Integration Test #####################################################
def test_check(self):
"""Integration test for supervisord check. Using a mocked supervisord."""
for tc in self.TEST_CASES:
check, instances = get_check('supervisord', tc['yaml'])
self.assertTrue(check is not None, msg=check)
self.assertEquals(tc['expected_instances'], instances)
for instance in instances:
name = instance['name']
try:
# Run the check
check.check(instance)
except Exception, e:
if 'error_message' in tc: # excepted error
self.assertEquals(str(e), tc['error_message'])
else:
self.assertTrue(False, msg=str(e))
else:
# Assert that the check collected the right metrics
expected_metrics = tc['expected_metrics'][name]
self.assert_metrics(expected_metrics, check.get_metrics())
# Assert that the check generated the right service checks
expected_service_checks = tc['expected_service_checks'][name]
self.assert_service_checks(expected_service_checks,
check.get_service_checks())
# Unit Tests ###########################################################
def test_build_message(self):
"""Unit test supervisord build service check message."""
process = {
'now': 1414815513,
'group': 'mysql',
'description': 'pid 787, uptime 0:02:05',
'pid': 787,
'stderr_logfile': '/var/log/supervisor/mysql-stderr---supervisor-3ATI82.log',
'stop': 0,
'statename': 'RUNNING',
'start': 1414815388,
'state': 20,
'stdout_logfile': '/var/log/mysql/mysql.log',
'logfile': '/var/log/mysql/mysql.log',
'exitstatus': 0,
'spawnerr': '',
'name': 'mysql'
}
expected_message = """Current time: 2014-11-01 04:18:33
Process name: mysql
Process group: mysql
Description: pid 787, uptime 0:02:05
Error log file: /var/log/supervisor/mysql-stderr---supervisor-3ATI82.log
Stdout log file: /var/log/mysql/mysql.log
Log file: /var/log/mysql/mysql.log
State: RUNNING
Start time: 2014-11-01 04:16:28
Stop time: \nExit Status: 0"""
check, _ = get_check('supervisord', self.TEST_CASES[0]['yaml'])
self.assertEquals(expected_message, check._build_message(process))
# Helper Methods #######################################################
@staticmethod
def mock_server(url):
return MockXmlRcpServer(url)
def assert_metrics(self, expected, actual):
actual = [TestSupervisordCheck.norm_metric(metric) for metric in actual]
self.assertEquals(len(actual), len(expected), msg='Invalid # metrics reported.\n'
'Expected: {0}. Found: {1}'.format(len(expected), len(actual)))
self.assertTrue(all([expected_metric in actual for expected_metric in expected]),
msg='Reported metrics are incorrect.\nExpected: {0}.\n'
'Found: {1}'.format(expected, actual))
def assert_service_checks(self, expected, actual):
actual = [TestSupervisordCheck.norm_service_check(service_check)
for service_check in actual]
self.assertEquals(len(actual), len(expected), msg='Invalid # service checks reported.'
'\nExpected: {0}. Found: {1}.'.format(expected, actual))
self.assertTrue(all([expected_service_check in actual
for expected_service_check in expected]),
msg='Reported service checks are incorrect.\nExpected:{0}\n'
'Found:{1}'.format(expected, actual))
@staticmethod
def norm_metric(metric):
'''Removes hostname and timestamp'''
metric[3].pop('hostname')
return (metric[0], metric[2], metric[3])
@staticmethod
def norm_service_check(service_check):
'''Removes timestamp, host_name, message and id'''
for field in ['timestamp', 'host_name', 'message', 'id']:
service_check.pop(field)
return service_check
class MockXmlRcpServer:
"""Class that mocks an XML RPC server. Initialized using a mocked
supervisord server url, which is used to initialize the supervisord
server.
"""
def __init__(self, url):
self.supervisor = MockSupervisor(url)
class MockSupervisor:
"""Class that mocks a supervisord sever. Initialized using the server url
and mocks process methods providing mocked process information for testing
purposes.
"""
MOCK_PROCESSES = {
'http://localhost:9001/RPC2': [{
'now': 1414815513,
'group': 'mysql',
'description': 'pid 787, uptime 0:02:05',
'pid': 787,
'stderr_logfile': '/var/log/supervisor/mysql-stderr---supervisor-3ATI82.log',
'stop': 0,
'statename': 'RUNNING',
'start': 1414815388,
'state': 20,
'stdout_logfile': '/var/log/mysql/mysql.log',
'logfile': '/var/log/mysql/mysql.log',
'exitstatus': 0,
'spawnerr': '',
'name': 'mysql'
}, {
'now': 1414815738,
'group': 'java',
'description': 'Nov 01 04:22 AM',
'pid': 0,
'stderr_logfile': '/var/log/supervisor/java-stderr---supervisor-lSdcKZ.log',
'stop': 1414815722,
'statename': 'STOPPED',
'start': 1414815388,
'state': 0,
'stdout_logfile': '/var/log/java/java.log',
'logfile': '/var/log/java/java.log',
'exitstatus': 21,
'spawnerr': '',
'name': 'java'
}, {
'now': 1414815738,
'group': 'python',
'description': '',
'pid': 2765,
'stderr_logfile': '/var/log/supervisor/python-stderr---supervisor-vFzxIg.log',
'stop': 1414815737,
'statename': 'STARTING',
'start': 1414815737,
'state': 10,
'stdout_logfile': '/var/log/python/python.log',
'logfile': '/var/log/python/python.log',
'exitstatus': 0,
'spawnerr': '',
'name': 'python'
}],
'http://user:pass@localhost:9001/RPC2': [{
'now': 1414869824,
'group': 'apache2',
'description': 'Exited too quickly (process log may have details)',
'pid': 0,
'stderr_logfile': '/var/log/supervisor/apache2-stderr---supervisor-0PkXWd.log',
'stop': 1414867047,
'statename': 'FATAL',
'start': 1414867047,
'state': 200,
'stdout_logfile': '/var/log/apache2/apache2.log',
'logfile': '/var/log/apache2/apache2.log',
'exitstatus': 0,
'spawnerr': 'Exited too quickly (process log may have details)',
'name': 'apache2'
}, {
'now': 1414871104,
'group': 'webapp',
'description': '',
'pid': 17600,
'stderr_logfile': '/var/log/supervisor/webapp-stderr---supervisor-onZK__.log',
'stop': 1414871101,
'statename': 'STOPPING',
'start': 1414871102,
'state': 40,
'stdout_logfile': '/var/log/company/webapp.log',
'logfile': '/var/log/company/webapp.log',
'exitstatus': 1,
'spawnerr': '',
'name': 'webapp'
}],
'http://10.60.130.82:9001/RPC2': [{
'now': 1414871588,
'group': 'ruby',
'description': 'Exited too quickly (process log may have details)',
'pid': 0,
'stderr_logfile': '/var/log/supervisor/ruby-stderr---supervisor-BU7Wat.log',
'stop': 1414871588,
'statename': 'BACKOFF',
'start': 1414871588,
'state': 30,
'stdout_logfile': '/var/log/ruby/ruby.log',
'logfile': '/var/log/ruby/ruby.log',
'exitstatus': 0,
'spawnerr': 'Exited too quickly (process log may have details)',
'name': 'ruby'
}]
}
def __init__(self, url):
self.url = url
def getAllProcessInfo(self):
self._validate_request()
return self.MOCK_PROCESSES[self.url]
def getProcessInfo(self, proc_name):
self._validate_request(proc=proc_name)
for proc in self.MOCK_PROCESSES[self.url]:
if proc['name'] == proc_name:
return proc
raise Exception('Process not found: %s' % proc_name)
def _validate_request(self, proc=None):
'''Validates request and simulates errors when not valid'''
if 'invalid_host' in self.url:
# Simulate connecting to an invalid host/port in order to
# raise `socket.error: [Errno 111] Connection refused`
socket().connect(('localhost', 38837))
elif 'invalid_pass' in self.url:
# Simulate xmlrpc exception for invalid credentials
raise xmlrpclib.ProtocolError(self.url[7:], 401,
'Unauthorized', None)
elif proc is not None and 'invalid' in proc:
# Simulate xmlrpc exception for process not found
raise xmlrpclib.Fault(10, 'BAD_NAME')
| bsd-3-clause | -5,686,121,151,280,651,000 | 38.230126 | 145 | 0.515092 | false |
Scalr/libcloud | libcloud/storage/drivers/atmos.py | 12 | 17006 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import base64
import hashlib
import hmac
import time
from libcloud.utils.py3 import PY3
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import next
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import urlencode
from libcloud.utils.py3 import urlquote
from libcloud.utils.py3 import urlunquote
if PY3:
from io import FileIO as file
from libcloud.utils.files import read_in_chunks, guess_file_mime_type
from libcloud.common.base import ConnectionUserAndKey, XmlResponse
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Object, Container, StorageDriver, CHUNK_SIZE
from libcloud.storage.types import ContainerAlreadyExistsError, \
ContainerDoesNotExistError, ContainerIsNotEmptyError, \
ObjectDoesNotExistError
def collapse(s):
return ' '.join([x for x in s.split(' ') if x])
class AtmosError(LibcloudError):
def __init__(self, code, message, driver=None):
super(AtmosError, self).__init__(value=message, driver=driver)
self.code = code
class AtmosResponse(XmlResponse):
def success(self):
return self.status in (httplib.OK, httplib.CREATED, httplib.NO_CONTENT,
httplib.PARTIAL_CONTENT)
def parse_error(self):
tree = self.parse_body()
if tree is None:
return None
code = int(tree.find('Code').text)
message = tree.find('Message').text
raise AtmosError(code=code, message=message,
driver=self.connection.driver)
class AtmosConnection(ConnectionUserAndKey):
responseCls = AtmosResponse
def add_default_headers(self, headers):
headers['x-emc-uid'] = self.user_id
headers['Date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime())
headers['x-emc-date'] = headers['Date']
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/octet-stream'
if 'Accept' not in headers:
headers['Accept'] = '*/*'
return headers
def pre_connect_hook(self, params, headers):
headers['x-emc-signature'] = self._calculate_signature(params, headers)
return params, headers
def _calculate_signature(self, params, headers):
pathstring = urlunquote(self.action)
if pathstring.startswith(self.driver.path):
pathstring = pathstring[len(self.driver.path):]
if params:
if type(params) is dict:
params = list(params.items())
pathstring += '?' + urlencode(params)
pathstring = pathstring.lower()
xhdrs = [(k, v) for k, v in list(headers.items()) if
k.startswith('x-emc-')]
xhdrs.sort(key=lambda x: x[0])
signature = [
self.method,
headers.get('Content-Type', ''),
headers.get('Range', ''),
headers.get('Date', ''),
pathstring,
]
signature.extend([k + ':' + collapse(v) for k, v in xhdrs])
signature = '\n'.join(signature)
key = base64.b64decode(self.key)
signature = hmac.new(b(key), b(signature), hashlib.sha1).digest()
return base64.b64encode(b(signature)).decode('utf-8')
class AtmosDriver(StorageDriver):
connectionCls = AtmosConnection
host = None
path = None
api_name = 'atmos'
supports_chunked_encoding = True
website = 'http://atmosonline.com/'
name = 'atmos'
DEFAULT_CDN_TTL = 60 * 60 * 24 * 7 # 1 week
def __init__(self, key, secret=None, secure=True, host=None, port=None):
host = host or self.host
super(AtmosDriver, self).__init__(key, secret, secure, host, port)
def iterate_containers(self):
result = self.connection.request(self._namespace_path(''))
entries = self._list_objects(result.object, object_type='directory')
for entry in entries:
extra = {
'object_id': entry['id']
}
yield Container(entry['name'], extra, self)
def get_container(self, container_name):
path = self._namespace_path(container_name) + '/?metadata/system'
try:
result = self.connection.request(path)
except AtmosError:
e = sys.exc_info()[1]
if e.code != 1003:
raise
raise ContainerDoesNotExistError(e, self, container_name)
meta = self._emc_meta(result)
extra = {
'object_id': meta['objectid']
}
return Container(container_name, extra, self)
def create_container(self, container_name):
path = self._namespace_path(container_name) + '/'
try:
self.connection.request(path, method='POST')
except AtmosError:
e = sys.exc_info()[1]
if e.code != 1016:
raise
raise ContainerAlreadyExistsError(e, self, container_name)
return self.get_container(container_name)
def delete_container(self, container):
try:
self.connection.request(self._namespace_path(container.name) + '/',
method='DELETE')
except AtmosError:
e = sys.exc_info()[1]
if e.code == 1003:
raise ContainerDoesNotExistError(e, self, container.name)
elif e.code == 1023:
raise ContainerIsNotEmptyError(e, self, container.name)
return True
def get_object(self, container_name, object_name):
container = self.get_container(container_name)
object_name_cleaned = self._clean_object_name(object_name)
path = self._namespace_path(container_name) + '/' + object_name_cleaned
try:
result = self.connection.request(path + '?metadata/system')
system_meta = self._emc_meta(result)
result = self.connection.request(path + '?metadata/user')
user_meta = self._emc_meta(result)
except AtmosError:
e = sys.exc_info()[1]
if e.code != 1003:
raise
raise ObjectDoesNotExistError(e, self, object_name)
last_modified = time.strptime(system_meta['mtime'],
'%Y-%m-%dT%H:%M:%SZ')
last_modified = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
last_modified)
extra = {
'object_id': system_meta['objectid'],
'last_modified': last_modified
}
data_hash = user_meta.pop('md5', '')
return Object(object_name, int(system_meta['size']), data_hash, extra,
user_meta, container, self)
def upload_object(self, file_path, container, object_name, extra=None,
verify_hash=True):
method = 'PUT'
extra = extra or {}
object_name_cleaned = self._clean_object_name(object_name)
request_path = self._namespace_path(container.name) + '/' +\
object_name_cleaned
content_type = extra.get('content_type', None)
try:
self.connection.request(request_path + '?metadata/system')
except AtmosError:
e = sys.exc_info()[1]
if e.code != 1003:
raise
method = 'POST'
result_dict = self._upload_object(
object_name=object_name,
content_type=content_type,
request_path=request_path,
request_method=method,
headers={}, file_path=file_path)
bytes_transferred = result_dict['bytes_transferred']
if extra is None:
meta_data = {}
else:
meta_data = extra.get('meta_data', {})
meta_data['md5'] = result_dict['data_hash']
user_meta = ', '.join([k + '=' + str(v) for k, v in
list(meta_data.items())])
self.connection.request(request_path + '?metadata/user', method='POST',
headers={'x-emc-meta': user_meta})
result = self.connection.request(request_path + '?metadata/system')
meta = self._emc_meta(result)
del meta_data['md5']
extra = {
'object_id': meta['objectid'],
'meta_data': meta_data,
}
return Object(object_name, bytes_transferred, result_dict['data_hash'],
extra, meta_data, container, self)
def upload_object_via_stream(self, iterator, container, object_name,
extra=None):
if isinstance(iterator, file):
iterator = iter(iterator)
data_hash = hashlib.md5()
generator = read_in_chunks(iterator, CHUNK_SIZE, True)
bytes_transferred = 0
try:
chunk = next(generator)
except StopIteration:
chunk = ''
path = self._namespace_path(container.name + '/' + object_name)
method = 'PUT'
if extra is not None:
content_type = extra.get('content_type', None)
else:
content_type = None
if not content_type:
content_type, _ = guess_file_mime_type(object_name)
if not content_type:
raise AttributeError(
'File content-type could not be guessed and' +
' no content_type value provided')
try:
self.connection.request(path + '?metadata/system')
except AtmosError:
e = sys.exc_info()[1]
if e.code != 1003:
raise
method = 'POST'
while True:
end = bytes_transferred + len(chunk) - 1
data_hash.update(b(chunk))
headers = {
'x-emc-meta': 'md5=' + data_hash.hexdigest(),
'Content-Type': content_type,
}
if len(chunk) > 0 and bytes_transferred > 0:
headers['Range'] = 'Bytes=%d-%d' % (bytes_transferred, end)
method = 'PUT'
result = self.connection.request(path, method=method, data=chunk,
headers=headers)
bytes_transferred += len(chunk)
try:
chunk = next(generator)
except StopIteration:
break
if len(chunk) == 0:
break
data_hash = data_hash.hexdigest()
if extra is None:
meta_data = {}
else:
meta_data = extra.get('meta_data', {})
meta_data['md5'] = data_hash
user_meta = ', '.join([k + '=' + str(v) for k, v in
list(meta_data.items())])
self.connection.request(path + '?metadata/user', method='POST',
headers={'x-emc-meta': user_meta})
result = self.connection.request(path + '?metadata/system')
meta = self._emc_meta(result)
extra = {
'object_id': meta['objectid'],
'meta_data': meta_data,
}
return Object(object_name, bytes_transferred, data_hash, extra,
meta_data, container, self)
def download_object(self, obj, destination_path, overwrite_existing=False,
delete_on_failure=True):
path = self._namespace_path(obj.container.name + '/' + obj.name)
response = self.connection.request(path, method='GET', raw=True)
return self._get_object(obj=obj, callback=self._save_object,
response=response,
callback_kwargs={
'obj': obj,
'response': response.response,
'destination_path': destination_path,
'overwrite_existing': overwrite_existing,
'delete_on_failure': delete_on_failure
},
success_status_code=httplib.OK)
def download_object_as_stream(self, obj, chunk_size=None):
path = self._namespace_path(obj.container.name + '/' + obj.name)
response = self.connection.request(path, method='GET', raw=True)
return self._get_object(obj=obj, callback=read_in_chunks,
response=response,
callback_kwargs={
'iterator': response.response,
'chunk_size': chunk_size
},
success_status_code=httplib.OK)
def delete_object(self, obj):
path = self._namespace_path(obj.container.name) + '/' +\
self._clean_object_name(obj.name)
try:
self.connection.request(path, method='DELETE')
except AtmosError:
e = sys.exc_info()[1]
if e.code != 1003:
raise
raise ObjectDoesNotExistError(e, self, obj.name)
return True
def enable_object_cdn(self, obj):
return True
def get_object_cdn_url(self, obj, expiry=None, use_object=False):
"""
Return an object CDN URL.
:param obj: Object instance
:type obj: :class:`Object`
:param expiry: Expiry
:type expiry: ``str``
:param use_object: Use object
:type use_object: ``bool``
:rtype: ``str``
"""
if use_object:
path = '/rest/objects' + obj.meta_data['object_id']
else:
path = '/rest/namespace/' + obj.container.name + '/' + obj.name
if self.secure:
protocol = 'https'
else:
protocol = 'http'
expiry = str(expiry or int(time.time()) + self.DEFAULT_CDN_TTL)
params = [
('uid', self.key),
('expires', expiry),
]
params.append(('signature', self._cdn_signature(path, params, expiry)))
params = urlencode(params)
path = self.path + path
return urlparse.urlunparse((protocol, self.host, path, '', params, ''))
def _cdn_signature(self, path, params, expiry):
key = base64.b64decode(self.secret)
signature = '\n'.join(['GET', path.lower(), self.key, expiry])
signature = hmac.new(key, signature, hashlib.sha1).digest()
return base64.b64encode(signature)
def _list_objects(self, tree, object_type=None):
listing = tree.find(self._emc_tag('DirectoryList'))
entries = []
for entry in listing.findall(self._emc_tag('DirectoryEntry')):
file_type = entry.find(self._emc_tag('FileType')).text
if object_type is not None and object_type != file_type:
continue
entries.append({
'id': entry.find(self._emc_tag('ObjectID')).text,
'type': file_type,
'name': entry.find(self._emc_tag('Filename')).text
})
return entries
def _clean_object_name(self, name):
return urlquote(name.encode('ascii'))
def _namespace_path(self, path):
return self.path + '/rest/namespace/' + urlquote(path.encode('ascii'))
def _object_path(self, object_id):
return self.path + '/rest/objects/' + object_id.encode('ascii')
@staticmethod
def _emc_tag(tag):
return '{http://www.emc.com/cos/}' + tag
def _emc_meta(self, response):
meta = response.headers.get('x-emc-meta', '')
if len(meta) == 0:
return {}
meta = meta.split(', ')
return dict([x.split('=', 1) for x in meta])
def iterate_container_objects(self, container):
headers = {'x-emc-include-meta': '1'}
path = self._namespace_path(container.name) + '/'
result = self.connection.request(path, headers=headers)
entries = self._list_objects(result.object, object_type='regular')
for entry in entries:
metadata = {'object_id': entry['id']}
yield Object(entry['name'], 0, '', {}, metadata, container, self)
| apache-2.0 | -2,901,010,467,178,901,500 | 35.337607 | 79 | 0.555569 | false |
haeusser/tensorflow | tensorflow/contrib/linalg/python/kernel_tests/linear_operator_composition_test.py | 13 | 8641 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
linalg = linalg_lib
random_seed.set_random_seed(23)
rng = np.random.RandomState(0)
class SquareLinearOperatorCompositionTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
# Increase from 1e-6 to 1e-4
self._atol[dtypes.float32] = 1e-4
self._atol[dtypes.complex64] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
sess = ops.get_default_session()
shape = list(shape)
# Either 1 or 2 matrices, depending.
num_operators = rng.randint(low=1, high=3)
matrices = [
linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
for _ in range(num_operators)
]
if use_placeholder:
matrices_ph = [
array_ops.placeholder(dtype=dtype) for _ in range(num_operators)
]
# Evaluate here because (i) you cannot feed a tensor, and (ii)
# values are random and we want the same value used for both mat and
# feed_dict.
matrices = sess.run(matrices)
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(m_ph) for m_ph in matrices_ph])
feed_dict = {m_ph: m for (m_ph, m) in zip(matrices_ph, matrices)}
else:
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(m) for m in matrices])
feed_dict = None
# Convert back to Tensor. Needed if use_placeholder, since then we have
# already evaluated each matrix to a numpy array.
apply_order_list = list(reversed(matrices))
mat = ops.convert_to_tensor(apply_order_list[0])
for other_mat in apply_order_list[1:]:
mat = math_ops.matmul(other_mat, mat)
return operator, mat, feed_dict
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(matrix)],
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_is_non_singular_auto_set(self):
# Matrix with two positive eigenvalues, 11 and 8.
# The matrix values do not effect auto-setting of the flags.
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorMatrix(matrix, is_non_singular=True)
operator_2 = linalg.LinearOperatorMatrix(matrix, is_non_singular=True)
operator = linalg.LinearOperatorComposition(
[operator_1, operator_2],
is_positive_definite=False, # No reason it HAS to be False...
is_non_singular=None)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
with self.assertRaisesRegexp(ValueError, "always non-singular"):
linalg.LinearOperatorComposition(
[operator_1, operator_2], is_non_singular=False)
def test_name(self):
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorMatrix(matrix, name="left")
operator_2 = linalg.LinearOperatorMatrix(matrix, name="right")
operator = linalg.LinearOperatorComposition([operator_1, operator_2])
self.assertEqual("left_o_right", operator.name)
def test_different_dtypes_raises(self):
operators = [
linalg.LinearOperatorMatrix(rng.rand(2, 3, 3)),
linalg.LinearOperatorMatrix(rng.rand(2, 3, 3).astype(np.float32))
]
with self.assertRaisesRegexp(TypeError, "same dtype"):
linalg.LinearOperatorComposition(operators)
def test_empty_operators_raises(self):
with self.assertRaisesRegexp(ValueError, "non-empty"):
linalg.LinearOperatorComposition([])
class NonSquareLinearOperatorCompositionTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def setUp(self):
# Increase from 1e-6 to 1e-4
self._atol[dtypes.float32] = 1e-4
self._atol[dtypes.complex64] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
sess = ops.get_default_session()
shape = list(shape)
# Test only the case of 2 matrices.
# The Square test uses either 1 or 2, so we have tested the case of 1 matrix
# sufficiently.
num_operators = 2
# Create 2 matrices/operators, A1, A2, which becomes A = A1 A2.
# Use inner dimension of 2.
k = 2
batch_shape = shape[:-2]
shape_1 = batch_shape + [shape[-2], k]
shape_2 = batch_shape + [k, shape[-1]]
matrices = [
linear_operator_test_util.random_normal(
shape_1, dtype=dtype), linear_operator_test_util.random_normal(
shape_2, dtype=dtype)
]
if use_placeholder:
matrices_ph = [
array_ops.placeholder(dtype=dtype) for _ in range(num_operators)
]
# Evaluate here because (i) you cannot feed a tensor, and (ii)
# values are random and we want the same value used for both mat and
# feed_dict.
matrices = sess.run(matrices)
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(m_ph) for m_ph in matrices_ph])
feed_dict = {m_ph: m for (m_ph, m) in zip(matrices_ph, matrices)}
else:
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(m) for m in matrices])
feed_dict = None
# Convert back to Tensor. Needed if use_placeholder, since then we have
# already evaluated each matrix to a numpy array.
apply_order_list = list(reversed(matrices))
mat = ops.convert_to_tensor(apply_order_list[0])
for other_mat in apply_order_list[1:]:
mat = math_ops.matmul(other_mat, mat)
return operator, mat, feed_dict
def test_static_shapes(self):
operators = [
linalg.LinearOperatorMatrix(rng.rand(2, 3, 4)),
linalg.LinearOperatorMatrix(rng.rand(2, 4, 5))
]
operator = linalg.LinearOperatorComposition(operators)
self.assertAllEqual((2, 3, 5), operator.shape)
def test_shape_tensors_when_statically_available(self):
operators = [
linalg.LinearOperatorMatrix(rng.rand(2, 3, 4)),
linalg.LinearOperatorMatrix(rng.rand(2, 4, 5))
]
operator = linalg.LinearOperatorComposition(operators)
with self.test_session():
self.assertAllEqual((2, 3, 5), operator.shape_tensor().eval())
def test_shape_tensors_when_only_dynamically_available(self):
mat_1 = rng.rand(1, 2, 3, 4)
mat_2 = rng.rand(1, 2, 4, 5)
mat_ph_1 = array_ops.placeholder(dtypes.float64)
mat_ph_2 = array_ops.placeholder(dtypes.float64)
feed_dict = {mat_ph_1: mat_1, mat_ph_2: mat_2}
operators = [
linalg.LinearOperatorMatrix(mat_ph_1),
linalg.LinearOperatorMatrix(mat_ph_2)
]
operator = linalg.LinearOperatorComposition(operators)
with self.test_session():
self.assertAllEqual(
(1, 2, 3, 5), operator.shape_tensor().eval(feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
| apache-2.0 | -3,901,312,115,052,500,000 | 36.569565 | 80 | 0.680477 | false |
PLyczkowski/Sticky-Keymap | 2.74/python/lib/encodings/cp866.py | 272 | 34396 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP866.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp866',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI
0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x2116, # NUMERO SIGN
0x00fd: 0x00a4, # CURRENCY SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE
'\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE
'\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE
'\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE
'\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE
'\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE
'\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE
'\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U
'\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF
'\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA
'\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE
'\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE
'\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA
'\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA
'\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN
'\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU
'\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E
'\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU
'\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA
'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
'\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE
'\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE
'\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE
'\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE
'\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE
'\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE
'\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I
'\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA
'\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL
'\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM
'\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN
'\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O
'\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U
'\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF
'\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA
'\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE
'\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE
'\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA
'\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA
'\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN
'\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU
'\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E
'\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU
'\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA
'\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO
'\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO
'\u0404' # 0x00f2 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
'\u0454' # 0x00f3 -> CYRILLIC SMALL LETTER UKRAINIAN IE
'\u0407' # 0x00f4 -> CYRILLIC CAPITAL LETTER YI
'\u0457' # 0x00f5 -> CYRILLIC SMALL LETTER YI
'\u040e' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHORT U
'\u045e' # 0x00f7 -> CYRILLIC SMALL LETTER SHORT U
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u2116' # 0x00fc -> NUMERO SIGN
'\xa4' # 0x00fd -> CURRENCY SIGN
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00fd, # CURRENCY SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO
0x0404: 0x00f2, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0407: 0x00f4, # CYRILLIC CAPITAL LETTER YI
0x040e: 0x00f6, # CYRILLIC CAPITAL LETTER SHORT U
0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A
0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I
0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O
0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U
0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE
0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE
0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE
0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00a8, # CYRILLIC SMALL LETTER I
0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA
0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL
0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM
0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN
0x043e: 0x00ae, # CYRILLIC SMALL LETTER O
0x043f: 0x00af, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e3, # CYRILLIC SMALL LETTER U
0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF
0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00ed, # CYRILLIC SMALL LETTER E
0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU
0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA
0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO
0x0454: 0x00f3, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0457: 0x00f5, # CYRILLIC SMALL LETTER YI
0x045e: 0x00f7, # CYRILLIC SMALL LETTER SHORT U
0x2116: 0x00fc, # NUMERO SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-2.0 | 270,120,870,210,631,000 | 48.277937 | 97 | 0.608385 | false |
oblitum/ycmd | cpp/ycm/tests/gmock/scripts/upload_gmock.py | 770 | 2833 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gmock.py v0.1.0 -- uploads a Google Mock patch for review.
This simple wrapper passes all command line flags and
[email protected] to upload.py.
USAGE: upload_gmock.py [options for upload.py]
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GMOCK_GROUP = '[email protected]'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Mock discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GMOCK_GROUP not in cc_list:
cc_list.append(GMOCK_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GMOCK_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| gpl-3.0 | 4,210,096,880,699,330,600 | 35.320513 | 72 | 0.726085 | false |
BT-ojossen/stock-logistics-workflow | __unported__/stock_sale_filters/stock.py | 33 | 2395 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Alexandre Fayolle
# Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import Model
from openerp.osv import fields
class stock_picking(Model):
_inherit = 'stock.picking'
_columns = {
'carrier_partner_id': fields.related('carrier_id', 'partner_id',
type='many2one',
relation='res.partner',
string='Carrier Name',
readonly=True,
help="Name of the carrier partner"),
'sale_shop_id': fields.related('sale_id', 'shop_id',
type='many2one',
relation='sale.shop',
string='Shop',
readonly=True,
help='The shop from which the sale order for the picking was issued')
}
class sale_order(Model):
_inherit = 'sale.order'
_columns = {
'carrier_partner_id': fields.related('carrier_id', 'partner_id',
type='many2one',
relation='res.partner',
string='Carrier Name',
readonly=True,
help="Name of the carrier partner")
}
| agpl-3.0 | 6,392,168,610,957,687,000 | 45.057692 | 108 | 0.463466 | false |
richardnpaul/FWL-Website | lib/python2.7/site-packages/django/contrib/auth/tests/management.py | 97 | 9156 | from __future__ import unicode_literals
from datetime import date
from django.contrib.auth import models, management
from django.contrib.auth.management import create_permissions
from django.contrib.auth.management.commands import changepassword
from django.contrib.auth.models import User
from django.contrib.auth.tests import CustomUser
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core.management import call_command
from django.core.management.base import CommandError
from django.core.management.validation import get_validation_errors
from django.db.models.loading import get_app
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
from django.utils.six import StringIO
@skipIfCustomUser
class GetDefaultUsernameTestCase(TestCase):
def setUp(self):
self.old_get_system_username = management.get_system_username
def tearDown(self):
management.get_system_username = self.old_get_system_username
def test_actual_implementation(self):
self.assertIsInstance(management.get_system_username(), six.text_type)
def test_simple(self):
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), 'joe')
def test_existing(self):
models.User.objects.create(username='joe')
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), '')
self.assertEqual(
management.get_default_username(check_db=False), 'joe')
def test_i18n(self):
# 'Julia' with accented 'u':
management.get_system_username = lambda: 'J\xfalia'
self.assertEqual(management.get_default_username(), 'julia')
@skipIfCustomUser
class ChangepasswordManagementCommandTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create_user(username='joe', password='qwerty')
self.stdout = StringIO()
self.stderr = StringIO()
def tearDown(self):
self.stdout.close()
self.stderr.close()
def test_that_changepassword_command_changes_joes_password(self):
"Executing the changepassword management command should change joe's password"
self.assertTrue(self.user.check_password('qwerty'))
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute("joe", stdout=self.stdout)
command_output = self.stdout.getvalue().strip()
self.assertEqual(command_output, "Changing password for user 'joe'\nPassword changed successfully for user 'joe'")
self.assertTrue(models.User.objects.get(username="joe").check_password("not qwerty"))
def test_that_max_tries_exits_1(self):
"""
A CommandError should be thrown by handle() if the user enters in
mismatched passwords three times.
"""
command = changepassword.Command()
command._get_pass = lambda *args: args or 'foo'
with self.assertRaises(CommandError):
command.execute("joe", stdout=self.stdout, stderr=self.stderr)
@skipIfCustomUser
class CreatesuperuserManagementCommandTestCase(TestCase):
def test_createsuperuser(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe",
email="[email protected]",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, '[email protected]')
# created password should be unusable
self.assertFalse(u.has_usable_password())
def test_verbosity_zero(self):
# We can supress output on the management command
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe2",
email="[email protected]",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, '[email protected]')
self.assertFalse(u.has_usable_password())
def test_email_in_username(self):
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="[email protected]",
email="[email protected]",
stdout=new_io
)
u = User._default_manager.get(username="[email protected]")
self.assertEqual(u.email, '[email protected]')
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"A superuser can be created when a custom User model is in use"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
email="[email protected]",
date_of_birth="1976-04-01",
stdout=new_io,
skip_validation=True
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUser._default_manager.get(email="[email protected]")
self.assertEqual(u.date_of_birth, date(1976, 4, 1))
# created password should be unusable
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user_missing_required_field(self):
"A Custom superuser won't be created when a required field isn't provided"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
with self.assertRaises(CommandError):
call_command("createsuperuser",
interactive=False,
username="[email protected]",
stdout=new_io,
stderr=new_io,
skip_validation=True
)
self.assertEqual(CustomUser._default_manager.count(), 0)
class CustomUserModelValidationTestCase(TestCase):
@override_settings(AUTH_USER_MODEL='auth.CustomUserBadRequiredFields')
def test_username_not_in_required_fields(self):
"USERNAME_FIELD should not appear in REQUIRED_FIELDS."
new_io = StringIO()
get_validation_errors(new_io, get_app('auth'))
self.assertIn("The field named as the USERNAME_FIELD should not be included in REQUIRED_FIELDS on a swappable User model.", new_io.getvalue())
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonUniqueUsername')
def test_username_non_unique(self):
"A non-unique USERNAME_FIELD should raise a model validation error."
new_io = StringIO()
get_validation_errors(new_io, get_app('auth'))
self.assertIn("The USERNAME_FIELD must be unique. Add unique=True to the field parameters.", new_io.getvalue())
class PermissionDuplicationTestCase(TestCase):
def setUp(self):
self._original_permissions = models.Permission._meta.permissions[:]
def tearDown(self):
models.Permission._meta.permissions = self._original_permissions
def test_duplicated_permissions(self):
"""
Test that we show proper error message if we are trying to create
duplicate permissions.
"""
# check duplicated default permission
models.Permission._meta.permissions = [
('change_permission', 'Can edit permission (duplicate)')]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'change_permission' clashes with a "
"builtin permission for model 'auth.Permission'.",
create_permissions, models, [], verbosity=0)
# check duplicated custom permissions
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
('my_custom_permission', 'Some permission with duplicate permission code'),
]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'my_custom_permission' is duplicated for model "
"'auth.Permission'.",
create_permissions, models, [], verbosity=0)
# should not raise anything
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
]
create_permissions(models, [], verbosity=0)
| gpl-3.0 | -1,999,815,977,508,504,300 | 39.157895 | 150 | 0.663281 | false |
aventuri/opencaster | code/libs/dvbobjects/dvbobjects/PSI/MGT.py | 5 | 2405 | #! /usr/bin/env python
# This file is part of the dvbobjects library.
#
# Copyright 2010-2013 Lorenzo Pallara [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import string
from dvbobjects.MPEG.Section import Section
from dvbobjects.utils import *
######################################################################
class master_guide_section(Section):
table_id = 0xC7
section_max_size = 4096
def pack_section_body(self):
# pack tables_loop
tl_bytes = string.join(
map(lambda x: x.pack(),
self.tables_loop),
"")
# pack descriptors_loop
dl_bytes = string.join(
map(lambda x: x.pack(),
self.descriptors_loop),
"")
self.table_id_extension = 0
self.private_indicator = 1
fmt = "!BH%dsH%ds" % (len(tl_bytes), len(dl_bytes))
return pack(fmt,
self.ATSC_protocol_version,
len(self.tables_loop),
tl_bytes,
0xF000 | (len(dl_bytes) & 0x0FFF),
dl_bytes,
)
######################################################################
class table_loop_item(DVBobject):
def pack(self):
# pack transport_descriptor_loop
dl_bytes = string.join(
map(lambda x: x.pack(),
self.descriptors_loop),
"")
fmt = "!HHBLH%ds" % len(dl_bytes)
return pack(fmt,
self.table_type,
0xE000 | (self.table_type_pid & 0x1FFF),
0xE0 | (self.table_type_version_number & 0x1F),
self.number_bytes,
0xF000 | (len(dl_bytes) & 0x0FFF),
dl_bytes,
)
| gpl-2.0 | 4,272,831,636,192,524,000 | 30.233766 | 78 | 0.564241 | false |
dcroc16/skunk_works | google_appengine/lib/django-1.5/tests/regressiontests/generic_views/dates.py | 50 | 31477 | from __future__ import absolute_import
import time
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from django.utils.unittest import skipUnless
from .models import Book, BookSigning
TZ_SUPPORT = hasattr(time, 'tzset')
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that.")
def _make_books(n, base_date):
for i in range(n):
b = Book.objects.create(
name='Book %d' % i,
slug='book-%d' % i,
pages=100+i,
pubdate=base_date - datetime.timedelta(days=i))
class ArchiveIndexViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_archive_view(self):
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_context_object_name(self):
res = self.client.get('/dates/books/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['thingies']), list(Book.objects.all()))
self.assertFalse('latest' in res.context)
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 404)
def test_allow_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_template(self):
res = self.client.get('/dates/books/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_archive_view_template_suffix(self):
res = self.client.get('/dates/books/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_archive_view_invalid(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/dates/books/invalid/')
def test_archive_view_by_month(self):
res = self.client.get('/dates/books/by_month/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'month', 'DESC')))
def test_paginated_archive_view(self):
_make_books(20, base_date=datetime.date.today())
res = self.client.get('/dates/books/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[0:10]))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
res = self.client.get('/dates/books/paginated/?page=2')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 2)
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[10:20]))
def test_paginated_archive_view_does_not_load_entire_table(self):
# Regression test for #18087
_make_books(20, base_date=datetime.date.today())
# 1 query for years list + 1 query for books
with self.assertNumQueries(2):
self.client.get('/dates/books/')
# same as above + 1 query to test if books exist + 1 query to count them
with self.assertNumQueries(4):
self.client.get('/dates/books/paginated/')
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(2):
self.client.get('/dates/books/reverse/')
def test_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted descending in index"""
_make_books(5, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(reversed(sorted(res.context['date_list']))))
class YearArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_year_view(self):
res = self.client.get('/dates/books/2008/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.datetime(2008, 10, 1)])
self.assertEqual(res.context['year'], datetime.date(2008, 1, 1))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
# Since allow_empty=False, next/prev years must be valid (#7164)
self.assertEqual(res.context['next_year'], None)
self.assertEqual(res.context['previous_year'], datetime.date(2006, 1, 1))
def test_year_view_make_object_list(self):
res = self.client.get('/dates/books/2006/make_object_list/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.datetime(2006, 5, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_empty(self):
res = self.client.get('/dates/books/1999/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/1999/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
# Since allow_empty=True, next/prev are allowed to be empty years (#7164)
self.assertEqual(res.context['next_year'], datetime.date(2000, 1, 1))
self.assertEqual(res.context['previous_year'], datetime.date(1998, 1, 1))
def test_year_view_allow_future(self):
# Create a new book in the future
year = datetime.date.today().year + 1
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=datetime.date(year, 1, 1))
res = self.client.get('/dates/books/%s/' % year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/allow_empty/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
res = self.client.get('/dates/books/%s/allow_future/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.datetime(year, 1, 1)])
def test_year_view_paginated(self):
res = self.client.get('/dates/books/2006/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_invalid_pattern(self):
res = self.client.get('/dates/books/no_year/')
self.assertEqual(res.status_code, 404)
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(4):
self.client.get('/dates/books/2008/reverse/')
def test_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in year view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
class MonthArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_month_view(self):
res = self.client.get('/dates/books/2008/oct/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
self.assertEqual(list(res.context['date_list']), [datetime.datetime(2008, 10, 1)])
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['month'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev months must be valid (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['month'], datetime.date(2000, 1, 1))
# Since allow_empty=True, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], datetime.date(2000, 2, 1))
self.assertEqual(res.context['previous_month'], datetime.date(1999, 12, 1))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], None)
def test_month_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60)).replace(day=1)
urlbit = future.strftime('%Y/%b').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'][0].date(), b.pubdate)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['month'], future)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month. So next
# should be in the future (yup, #7164, again)
res = self.client.get('/dates/books/2008/oct/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], future)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/')
self.assertEqual(res.status_code, 200)
def test_month_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/no_month/')
self.assertEqual(res.status_code, 404)
def test_previous_month_without_content(self):
"Content can exist on any day of the previous month. Refs #14711"
self.pubdate_list = [
datetime.date(2010, month, day)
for month,day in ((9,1), (10,2), (11,3))
]
for pubdate in self.pubdate_list:
name = str(pubdate)
Book.objects.create(name=name, slug=name, pages=100, pubdate=pubdate)
res = self.client.get('/dates/books/2010/nov/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,10,1))
# The following test demonstrates the bug
res = self.client.get('/dates/books/2010/nov/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,10,1))
# The bug does not occur here because a Book with pubdate of Sep 1 exists
res = self.client.get('/dates/books/2010/oct/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,9,1))
def test_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in month view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/dec/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
class WeekArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_week_view(self):
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
self.assertEqual(res.context['book_list'][0], Book.objects.get(pubdate=datetime.date(2008, 10, 1)))
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
# Since allow_empty=False, next/prev weeks must be valid
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_allow_empty(self):
# allow_empty = False, empty week
res = self.client.get('/dates/books/2008/week/12/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2008/week/12/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['week'], datetime.date(2008, 3, 23))
# Since allow_empty=True, next/prev are allowed to be empty weeks
self.assertEqual(res.context['next_week'], datetime.date(2008, 3, 30))
self.assertEqual(res.context['previous_week'], datetime.date(2008, 3, 16))
# allow_empty but not allow_future: next_week should be empty
url = datetime.date.today().strftime('/dates/books/%Y/week/%U/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], None)
def test_week_view_allow_future(self):
# January 7th always falls in week 1, given Python's definition of week numbers
future = datetime.date(datetime.date.today().year + 1, 1, 7)
future_sunday = future - datetime.timedelta(days=(future.weekday() + 1) % 7)
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/week/1/' % future.year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/week/1/allow_future/' % future.year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['week'], future_sunday)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty weeks
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2008, 9, 28))
# allow_future, but not allow_empty, with a current week. So next
# should be in the future
res = self.client.get('/dates/books/2008/week/39/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], future_sunday)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_paginated(self):
week_start = datetime.date(2008, 9, 28)
week_end = week_start + datetime.timedelta(days=7)
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
def test_week_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/week/no_week/')
self.assertEqual(res.status_code, 404)
def test_week_start_Monday(self):
# Regression for #14752
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
res = self.client.get('/dates/books/2008/week/39/monday/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 29))
def test_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
class DayArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_day_view(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['day'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev days must be valid.
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
def test_day_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/1/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/1/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['day'], datetime.date(2000, 1, 1))
# Since it's allow empty, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_day'], datetime.date(2000, 1, 2))
self.assertEqual(res.context['previous_day'], datetime.date(1999, 12, 31))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/%d/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], None)
def test_day_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['day'], future)
# allow_future but not allow_empty, next/prev must be valid
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month.
res = self.client.get('/dates/books/2008/oct/01/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], future)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
# allow_future for yesterday, next_day is today (#17192)
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
res = self.client.get('/dates/books/%s/allow_empty_and_future/'
% yesterday.strftime('%Y/%b/%d').lower())
self.assertEqual(res.context['next_day'], today)
def test_day_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
def test_next_prev_context(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.content, b"Archive for Oct. 1, 2008. Previous day is May 1, 2006")
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/')
self.assertEqual(res.status_code, 200)
def test_day_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/oct/no_day/')
self.assertEqual(res.status_code, 404)
def test_today_view(self):
res = self.client.get('/dates/books/today/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/today/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['day'], datetime.date.today())
def test_datetime_day_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_day_view(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 404)
class DateDetailViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_date_detail_by_pk(self):
res = self.client.get('/dates/books/2008/oct/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=1))
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_date_detail_by_slug(self):
res = self.client.get('/dates/books/2006/may/01/byslug/dreaming-in-code/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(slug='dreaming-in-code'))
def test_date_detail_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
def test_date_detail_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", slug="new-new", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/new-new/' % urlbit)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/%s/allow_future/' % (urlbit, b.id))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], b)
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_invalid_url(self):
self.assertRaises(AttributeError, self.client.get, "/dates/books/2008/oct/01/nopk/")
def test_get_object_custom_queryset(self):
"""
Ensure that custom querysets are used when provided to
BaseDateDetailView.get_object()
Refs #16918.
"""
res = self.client.get(
'/dates/books/get_object_custom_queryset/2006/may/01/2/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=2))
self.assertEqual(res.context['book'], Book.objects.get(pk=2))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
res = self.client.get(
'/dates/books/get_object_custom_queryset/2008/oct/01/1/')
self.assertEqual(res.status_code, 404)
def test_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 404)
| mit | -1,653,736,007,509,047,800 | 48.570079 | 140 | 0.654796 | false |
hpi-xnor/BMXNet | python/mxnet/ndarray/_internal.py | 34 | 1986 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, unused-import
"""NDArray namespace used to register internal functions."""
import os as _os
import sys as _sys
import numpy as np
try:
if int(_os.environ.get("MXNET_ENABLE_CYTHON", True)) == 0:
from .._ctypes.ndarray import NDArrayBase, CachedOp
from .._ctypes.ndarray import _set_ndarray_class, _imperative_invoke
elif _sys.version_info >= (3, 0):
from .._cy3.ndarray import NDArrayBase, CachedOp
from .._cy3.ndarray import _set_ndarray_class, _imperative_invoke
else:
from .._cy2.ndarray import NDArrayBase, CachedOp
from .._cy2.ndarray import _set_ndarray_class, _imperative_invoke
except ImportError:
if int(_os.environ.get("MXNET_ENFORCE_CYTHON", False)) != 0:
raise ImportError("Cython Module cannot be loaded but MXNET_ENFORCE_CYTHON=1")
from .._ctypes.ndarray import NDArrayBase, CachedOp
from .._ctypes.ndarray import _set_ndarray_class, _imperative_invoke
from ..base import _Null
try:
from .gen__internal import * # pylint: disable=unused-wildcard-import
except ImportError:
pass
__all__ = ['NDArrayBase', 'CachedOp', '_imperative_invoke', '_set_ndarray_class']
| apache-2.0 | 1,769,698,280,044,095,700 | 41.255319 | 86 | 0.726586 | false |
ilexius/odoo | openerp/addons/test_impex/tests/test_load.py | 7 | 44231 | # -*- coding: utf-8 -*-
import json
import pkgutil
import unittest
import openerp.modules.registry
import openerp
from openerp.tests import common
from openerp.tools.misc import mute_logger
def message(msg, type='error', from_=0, to_=0, record=0, field='value', **kwargs):
return dict(kwargs,
type=type, rows={'from': from_, 'to': to_}, record=record,
field=field, message=msg)
def moreaction(**kwargs):
return dict(kwargs,
type='ir.actions.act_window',
target='new',
view_mode='tree,form',
view_type='form',
views=[(False, 'tree'), (False, 'form')],
help=u"See all possible values")
def values(seq, field='value'):
return [item[field] for item in seq]
class ImporterCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(ImporterCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(ImporterCase, self).setUp()
self.model = self.registry(self.model_name)
self.registry('ir.model.data').clear_caches()
def import_(self, fields, rows, context=None):
return self.model.load(
self.cr, openerp.SUPERUSER_ID, fields, rows, context=context)
def read(self, fields=('value',), domain=(), context=None):
return self.model.read(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
fields=fields, context=context)
def browse(self, domain=(), context=None):
return self.model.browse(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
context=context)
def xid(self, record):
ModelData = self.registry('ir.model.data')
ids = ModelData.search(
self.cr, openerp.SUPERUSER_ID,
[('model', '=', record._name), ('res_id', '=', record.id)])
if ids:
d = ModelData.read(
self.cr, openerp.SUPERUSER_ID, ids, ['name', 'module'])[0]
if d['module']:
return '%s.%s' % (d['module'], d['name'])
return d['name']
name = record.name_get()[0][1]
# fix dotted name_get results, otherwise xid lookups blow up
name = name.replace('.', '-')
ModelData.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'model': record._name,
'res_id': record.id,
'module': '__test__'
})
return '__test__.' + name
def add_translations(self, name, type, code, *tnx):
self.registry('res.lang').load_lang(self.cr, openerp.SUPERUSER_ID, code)
Translations = self.registry('ir.translation')
for source, value in tnx:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'lang': code,
'type': type,
'src': source,
'value': value,
'state': 'translated',
})
class test_ids_stuff(ImporterCase):
model_name = 'export.integer'
def test_create_with_id(self):
result = self.import_(['.id', 'value'], [['42', '36']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'field': '.id',
'message': u"Unknown database identifier '42'",
}])
def test_create_with_xid(self):
result = self.import_(['id', 'value'], [['somexmlid', '42']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
'somexmlid',
self.xid(self.browse()[0]))
def test_update_with_id(self):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': 36})
self.assertEqual(
36,
self.model.browse(self.cr, openerp.SUPERUSER_ID, id).value)
result = self.import_(['.id', 'value'], [[str(id), '42']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[42], # updated value to imported
values(self.read()))
def test_update_with_xid(self):
self.import_(['id', 'value'], [['somexmlid', '36']])
self.assertEqual([36], values(self.read()))
self.import_(['id', 'value'], [['somexmlid', '1234567']])
self.assertEqual([1234567], values(self.read()))
class test_boolean_field(ImporterCase):
model_name = 'export.boolean'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_exported(self):
result = self.import_(['value'], [['False'], ['True'], ])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
records = self.read()
self.assertEqual([
False,
True,
], values(records))
def test_falses(self):
for lang, source, value in [('fr_FR', 'no', u'non'),
('de_DE', 'no', u'nein'),
('ru_RU', 'no', u'нет'),
('nl_BE', 'false', u'vals'),
('lt_LT', 'false', u'klaidingas')]:
self.add_translations('test_import.py', 'code', lang, (source, value))
falses = [[u'0'], [u'no'], [u'false'], [u'FALSE'], [u''],
[u'non'], # no, fr
[u'nein'], # no, de
[u'нет'], # no, ru
[u'vals'], # false, nl
[u'klaidingas'], # false, lt,
]
result = self.import_(['value'], falses)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(falses))
self.assertEqual([False] * len(falses), values(self.read()))
def test_trues(self):
trues = [['None'], ['nil'], ['()'], ['f'], ['#f'],
# Problem: OpenOffice (and probably excel) output localized booleans
['VRAI'], ['ok'], ['true'], ['yes'], ['1'], ]
result = self.import_(['value'], trues)
self.assertEqual(len(result['ids']), 10)
self.assertEqual(result['messages'], [
message(u"Unknown value '%s' for boolean field 'unknown', assuming 'yes'" % v[0],
moreinfo=u"Use '1' for yes and '0' for no",
type='warning', from_=i, to_=i, record=i)
for i, v in enumerate(trues)
if v[0] not in ('true', 'yes', '1')
])
self.assertEqual(
[True] * 10,
values(self.read()))
class test_integer_field(ImporterCase):
model_name = 'export.integer'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
result = self.import_(['value'], [['0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
result = self.import_(['value'], [['-0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
result = self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678']
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([
1, 42, 2**31-1, 12345678
], values(self.read()))
def test_negatives(self):
result = self.import_(['value'], [
['-1'],
['-42'],
[str(-(2**31 - 1))],
[str(-(2**31))],
['-12345678']
])
self.assertEqual(len(result['ids']), 5)
self.assertFalse(result['messages'])
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678
], values(self.read()))
@mute_logger('openerp.sql_db', 'openerp.models')
def test_out_of_range(self):
result = self.import_(['value'], [[str(2**31)]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'message': "integer out of range\n"
}])
result = self.import_(['value'], [[str(-2**32)]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'message': "integer out of range\n"
}])
def test_nonsense(self):
result = self.import_(['value'], [['zorglub']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [{
'type': 'error',
'rows': {'from': 0, 'to': 0},
'record': 0,
'field': 'value',
'message': u"'zorglub' does not seem to be an integer for field 'unknown'",
}])
class test_float_field(ImporterCase):
model_name = 'export.float'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
result = self.import_(['value'], [['0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
result = self.import_(['value'], [['-0']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
result = self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678'],
[str(2**33)],
['0.000001'],
])
self.assertEqual(len(result['ids']), 6)
self.assertFalse(result['messages'])
self.assertEqual([
1, 42, 2**31-1, 12345678, 2.0**33, .000001
], values(self.read()))
def test_negatives(self):
result = self.import_(['value'], [
['-1'],
['-42'],
[str(-2**31 + 1)],
[str(-2**31)],
['-12345678'],
[str(-2**33)],
['-0.000001'],
])
self.assertEqual(len(result['ids']), 7)
self.assertFalse(result['messages'])
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678, -2.0**33, -.000001
], values(self.read()))
def test_nonsense(self):
result = self.import_(['value'], [['foobar']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [
message(u"'foobar' does not seem to be a number for field 'unknown'")])
class test_string_field(ImporterCase):
model_name = 'export.string.bounded'
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False], values(self.read()))
def test_imported(self):
result = self.import_(['value'], [
[u'foobar'],
[u'foobarbaz'],
[u'Með suð í eyrum við spilum endalaust'],
[u"People 'get' types. They use them all the time. Telling "
u"someone he can't pound a nail with a banana doesn't much "
u"surprise him."]
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([
u"foobar",
u"foobarbaz",
u"Með suð í eyrum ",
u"People 'get' typ",
], values(self.read()))
class test_unbound_string_field(ImporterCase):
model_name = 'export.string'
def test_imported(self):
result = self.import_(['value'], [
[u'í dag viðrar vel til loftárása'],
# ackbar.jpg
[u"If they ask you about fun, you tell them – fun is a filthy"
u" parasite"]
])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
self.assertEqual([
u"í dag viðrar vel til loftárása",
u"If they ask you about fun, you tell them – fun is a filthy parasite"
], values(self.read()))
class test_required_string_field(ImporterCase):
model_name = 'export.string.required'
@mute_logger('openerp.sql_db', 'openerp.models')
def test_empty(self):
result = self.import_(['value'], [[]])
self.assertEqual(result['messages'], [message(
u"Missing required value for the field 'unknown' (value)")])
self.assertIs(result['ids'], False)
@mute_logger('openerp.sql_db', 'openerp.models')
def test_not_provided(self):
result = self.import_(['const'], [['12']])
self.assertEqual(result['messages'], [message(
u"Missing required value for the field 'unknown' (value)")])
self.assertIs(result['ids'], False)
class test_text(ImporterCase):
model_name = 'export.text'
def test_empty(self):
result = self.import_(['value'], [['']])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([False], values(self.read()))
def test_imported(self):
s = (u"Breiðskífa er notað um útgefna hljómplötu sem inniheldur "
u"stúdíóupptökur frá einum flytjanda. Breiðskífur eru oftast "
u"milli 25-80 mínútur og er lengd þeirra oft miðuð við 33⅓ "
u"snúninga 12 tommu vínylplötur (sem geta verið allt að 30 mín "
u"hvor hlið).\n\nBreiðskífur eru stundum tvöfaldar og eru þær þá"
u" gefnar út á tveimur geisladiskum eða tveimur vínylplötum.")
result = self.import_(['value'], [[s]])
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
self.assertEqual([s], values(self.read()))
class test_selection(ImporterCase):
model_name = 'export.selection'
translations_fr = [
("Foo", "tete"),
("Bar", "titi"),
("Qux", "toto"),
]
def test_imported(self):
result = self.import_(['value'], [
['Qux'],
['Bar'],
['Foo'],
['2'],
])
self.assertEqual(len(result['ids']), 4)
self.assertFalse(result['messages'])
self.assertEqual([3, 2, 1, 2], values(self.read()))
def test_imported_translated(self):
self.add_translations(
'export.selection,value', 'selection', 'fr_FR', *self.translations_fr)
result = self.import_(['value'], [
['toto'],
['tete'],
['titi'],
], context={'lang': 'fr_FR'})
self.assertEqual(len(result['ids']), 3)
self.assertFalse(result['messages'])
self.assertEqual([3, 1, 2], values(self.read()))
result = self.import_(['value'], [['Foo']], context={'lang': 'fr_FR'})
self.assertEqual(len(result['ids']), 1)
self.assertFalse(result['messages'])
def test_invalid(self):
result = self.import_(['value'], [['Baz']])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [message(
u"Value 'Baz' not found in selection field 'unknown'",
moreinfo="Foo Bar Qux 4".split())])
result = self.import_(['value'], [[42]])
self.assertIs(result['ids'], False)
self.assertEqual(result['messages'], [message(
u"Value '42' not found in selection field 'unknown'",
moreinfo="Foo Bar Qux 4".split())])
class test_selection_with_default(ImporterCase):
model_name = 'export.selection.withdefault'
def test_empty(self):
""" Empty cells should set corresponding field to False
"""
result = self.import_(['value'], [['']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
self.assertEqual(
values(self.read()),
[False])
def test_default(self):
""" Non-provided cells should set corresponding field to default
"""
result = self.import_(['const'], [['42']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
self.assertEqual(
values(self.read()),
[2])
class test_selection_function(ImporterCase):
model_name = 'export.selection.function'
translations_fr = [
("Corge", "toto"),
("Grault", "titi"),
("Wheee", "tete"),
("Moog", "tutu"),
]
def test_imported(self):
""" import uses fields_get, so translates import label (may or may not
be good news) *and* serializes the selection function to reverse it:
import does not actually know that the selection field uses a function
"""
# NOTE: conflict between a value and a label => pick first
result = self.import_(['value'], [
['3'],
["Grault"],
])
self.assertEqual(len(result['ids']), 2)
self.assertFalse(result['messages'])
self.assertEqual(
[3, 1],
values(self.read()))
def test_translated(self):
""" Expects output of selection function returns translated labels
"""
self.add_translations(
'export.selection,value', 'selection', 'fr_FR', *self.translations_fr)
result = self.import_(['value'], [
['titi'],
['tete'],
], context={'lang': 'fr_FR'})
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
self.assertEqual(values(self.read()), [1, 2])
result = self.import_(['value'], [['Wheee']], context={'lang': 'fr_FR'})
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
class test_m2o(ImporterCase):
model_name = 'export.many2one'
def test_by_name(self):
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# get its name
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
result = self.import_(['value'], [
# import by name_get
[name1],
[name1],
[name2],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
# correct ids assigned to corresponding records
self.assertEqual([
(integer_id1, name1),
(integer_id1, name1),
(integer_id2, name2),],
values(self.read()))
def test_by_xid(self):
ExportInteger = self.registry('export.integer')
integer_id = ExportInteger.create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
xid = self.xid(ExportInteger.browse(
self.cr, openerp.SUPERUSER_ID, [integer_id])[0])
result = self.import_(['value/id'], [[xid]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
result = self.import_(['value/.id'], [[integer_id]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_names(self):
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
# names should be the same
self.assertEqual(name1, name2)
result = self.import_(['value'], [[name2]])
self.assertEqual(
result['messages'],
[message(u"Found multiple matches for field 'unknown' (2 matches)",
type='warning')])
self.assertEqual(len(result['ids']), 1)
self.assertEqual([
(integer_id1, name1)
], values(self.read()))
def test_fail_by_implicit_id(self):
""" Can't implicitly import records by id
"""
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# Because name_search all the things. Fallback schmallback
result = self.import_(['value'], [
# import by id, without specifying it
[integer_id1],
[integer_id2],
[integer_id1],
])
self.assertEqual(result['messages'], [
message(u"No matching record found for name '%s' in field 'unknown'" % id,
from_=index, to_=index, record=index,
moreinfo=moreaction(res_model='export.integer'))
for index, id in enumerate([integer_id1, integer_id2, integer_id1])])
self.assertIs(result['ids'], False)
@mute_logger('openerp.sql_db')
def test_fail_id_mistype(self):
result = self.import_(['value/.id'], [["foo"]])
self.assertEqual(result['messages'], [
message(u"Invalid database id 'foo' for the field 'unknown'",
moreinfo=moreaction(res_model='ir.model.data',
domain=[('model','=','export.integer')]))
])
self.assertIs(result['ids'], False)
def test_sub_field(self):
""" Does not implicitly create the record, does not warn that you can't
import m2o subfields (at all)...
"""
result = self.import_(['value/value'], [['42']])
self.assertEqual(result['messages'], [
message(u"Can not create Many-To-One records indirectly, import "
u"the field separately")])
self.assertIs(result['ids'], False)
def test_fail_noids(self):
result = self.import_(['value'], [['nameisnoexist:3']])
self.assertEqual(result['messages'], [message(
u"No matching record found for name 'nameisnoexist:3' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='export.integer'))])
self.assertIs(result['ids'], False)
result = self.import_(['value/id'], [['noxidhere']])
self.assertEqual(result['messages'], [message(
u"No matching record found for external id 'noxidhere' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.integer')]))])
self.assertIs(result['ids'], False)
result = self.import_(['value/.id'], [['66']])
self.assertEqual(result['messages'], [message(
u"No matching record found for database id '66' "
u"in field 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.integer')]))])
self.assertIs(result['ids'], False)
def test_fail_multiple(self):
result = self.import_(
['value', 'value/id'],
[['somename', 'somexid']])
self.assertEqual(result['messages'], [message(
u"Ambiguous specification for field 'unknown', only provide one of "
u"name, external id or database id")])
self.assertIs(result['ids'], False)
class test_m2m(ImporterCase):
model_name = 'export.many2many'
# apparently, one and only thing which works is a
# csv_internal_sep-separated list of ids, xids, or names (depending if
# m2m/.id, m2m/id or m2m[/anythingelse]
def test_ids(self):
id1 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
id5 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 99, 'str': 'record4'})
result = self.import_(['value/.id'], [
['%d,%d' % (id1, id2)],
['%d,%d,%d' % (id1, id3, id4)],
['%d,%d,%d' % (id1, id2, id3)],
['%d' % id5]
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 4)
ids = lambda records: [record.id for record in records]
b = self.browse()
self.assertEqual(ids(b[0].value), [id1, id2])
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(ids(b[2].value), [id1, id2, id3])
self.assertEqual(values(b[2].value), [3, 44, 84])
def test_noids(self):
result = self.import_(['value/.id'], [['42']])
self.assertEqual(result['messages'], [message(
u"No matching record found for database id '42' in field "
u"'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.many2many.other')]))])
self.assertIs(result['ids'], False)
def test_xids(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
result = self.import_(['value/id'], [
['%s,%s' % (self.xid(records[0]), self.xid(records[1]))],
['%s' % self.xid(records[3])],
['%s,%s' % (self.xid(records[2]), self.xid(records[1]))],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
b = self.browse()
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(values(b[2].value), [44, 84])
def test_noxids(self):
result = self.import_(['value/id'], [['noxidforthat']])
self.assertEqual(result['messages'], [message(
u"No matching record found for external id 'noxidforthat' in field"
u" 'unknown'", moreinfo=moreaction(
res_model='ir.model.data', domain=[('model','=','export.many2many.other')]))])
self.assertIs(result['ids'], False)
def test_names(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
name = lambda record: record.name_get()[0][1]
result = self.import_(['value'], [
['%s,%s' % (name(records[1]), name(records[2]))],
['%s,%s,%s' % (name(records[0]), name(records[1]), name(records[2]))],
['%s,%s' % (name(records[0]), name(records[3]))],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 3)
b = self.browse()
self.assertEqual(values(b[1].value), [3, 44, 84])
self.assertEqual(values(b[2].value), [3, 9])
def test_nonames(self):
result = self.import_(['value'], [['wherethem2mhavenonames']])
self.assertEqual(result['messages'], [message(
u"No matching record found for name 'wherethem2mhavenonames' in "
u"field 'unknown'", moreinfo=moreaction(
res_model='export.many2many.other'))])
self.assertIs(result['ids'], False)
def test_import_to_existing(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
xid = 'myxid'
result = self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id1, id2)]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
result = self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id3, id4)]])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
b = self.browse()
self.assertEqual(len(b), 1)
# TODO: replacement of existing m2m values is correct?
self.assertEqual(values(b[0].value), [84, 9])
class test_o2m(ImporterCase):
model_name = 'export.one2many'
def test_name_get(self):
s = u'Java is a DSL for taking large XML files and converting them ' \
u'to stack traces'
result = self.import_(
['const', 'value'],
[['5', s]])
self.assertEqual(result['messages'], [message(
u"No matching record found for name '%s' in field 'unknown'" % s,
moreinfo=moreaction(res_model='export.one2many.child'))])
self.assertIs(result['ids'], False)
def test_single(self):
result = self.import_(['const', 'value/value'], [
['5', '63']
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.value), [63])
def test_multicore(self):
result = self.import_(['const', 'value/value'], [
['5', '63'],
['6', '64'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
b1, b2 = self.browse()
self.assertEqual(b1.const, 5)
self.assertEqual(values(b1.value), [63])
self.assertEqual(b2.const, 6)
self.assertEqual(values(b2.value), [64])
def test_multisub(self):
result = self.import_(['const', 'value/value'], [
['5', '63'],
['', '64'],
['', '65'],
['', '66'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
def test_multi_subfields(self):
result = self.import_(['value/str', 'const', 'value/value'], [
['this', '5', '63'],
['is', '', '64'],
['the', '', '65'],
['rhythm', '', '66'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
self.assertEqual(
values(b.value, 'str'),
'this is the rhythm'.split())
def test_link_inline(self):
""" m2m-style specification for o2ms
"""
id1 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id'], [
['42', '%d,%d' % (id1, id2)]
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link(self):
""" O2M relating to an existing record (update) force a LINK_TO as well
"""
O2M = self.registry('export.one2many.child')
id1 = O2M.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id'], [
['42', str(id1)],
['', str(id2)],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link_2(self):
O2M_c = self.registry('export.one2many.child')
id1 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
result = self.import_(['const', 'value/.id', 'value/value'], [
['42', str(id1), '1'],
['', str(id2), '2'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 42)
self.assertEqual(values(b.value), [1, 2])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
class test_o2m_multiple(ImporterCase):
model_name = 'export.one2many.multiple'
def test_multi_mixed(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', '22'],
['', '13', '23'],
['', '14', ''],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '22'],
['', '', '23'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi_fullsplit(self):
result = self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', ''],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '21'],
['', '', '22'],
['', '', '23'],
])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
[b] = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
class test_realworld(common.TransactionCase):
def test_bigfile(self):
data = json.loads(pkgutil.get_data(self.__module__, 'contacts_big.json'))
result = self.registry('res.partner').load(
self.cr, openerp.SUPERUSER_ID,
['name', 'mobile', 'email', 'image'],
data)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(data))
def test_backlink(self):
data = json.loads(pkgutil.get_data(self.__module__, 'contacts.json'))
result = self.registry('res.partner').load(
self.cr, openerp.SUPERUSER_ID,
["name", "type", "street", "city", "country_id", "category_id",
"supplier", "customer", "is_company", "parent_id"],
data)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), len(data))
def test_recursive_o2m(self):
""" The content of the o2m field's dict needs to go through conversion
as it may be composed of convertables or other relational fields
"""
self.registry('ir.model.data').clear_caches()
Model = self.registry('export.one2many.recursive')
result = Model.load(self.cr, openerp.SUPERUSER_ID,
['value', 'child/const', 'child/child1/str', 'child/child2/value'],
[
['4', '42', 'foo', '55'],
['', '43', 'bar', '56'],
['', '', 'baz', ''],
['', '55', 'qux', '57'],
['5', '99', 'wheee', ''],
['', '98', '', '12'],
],
context=None)
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 2)
b = Model.browse(self.cr, openerp.SUPERUSER_ID, result['ids'], context=None)
self.assertEqual((b[0].value, b[1].value), (4, 5))
self.assertEqual([child.str for child in b[0].child[1].child1],
['bar', 'baz'])
self.assertFalse(len(b[1].child[1].child1))
self.assertEqual([child.value for child in b[1].child[1].child2],
[12])
class test_date(ImporterCase):
model_name = 'export.date'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_basic(self):
result = self.import_(['value'], [['2012-02-03']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
def test_invalid(self):
result = self.import_(['value'], [['not really a date']])
self.assertEqual(result['messages'], [
message(u"'not really a date' does not seem to be a valid date "
u"for field 'unknown'",
moreinfo=u"Use the format '2012-12-31'")])
self.assertIs(result['ids'], False)
class test_datetime(ImporterCase):
model_name = 'export.datetime'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
{'ids': [], 'messages': []})
def test_basic(self):
result = self.import_(['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(len(result['ids']), 1)
def test_invalid(self):
result = self.import_(['value'], [['not really a datetime']])
self.assertEqual(result['messages'], [
message(u"'not really a datetime' does not seem to be a valid "
u"datetime for field 'unknown'",
moreinfo=u"Use the format '2012-12-31 23:59:59'")])
self.assertIs(result['ids'], False)
def test_checktz1(self):
""" Imported date should be interpreted as being in the tz provided by
the context
"""
# write dummy tz in user (Asia/Hovd UTC+0700), should be superseded by
# context
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': 'Asia/Hovd'})
# UTC+1400
result = self.import_(
['value'], [['2012-02-03 11:11:11']], {'tz': 'Pacific/Kiritimati'})
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-02 21:11:11'])
# UTC-0930
result = self.import_(
['value'], [['2012-02-03 11:11:11']], {'tz': 'Pacific/Marquesas'})
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 20:41:11'])
def test_usertz(self):
""" If the context does not hold a timezone, the importing user's tz
should be used
"""
# UTC +1000
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': 'Asia/Yakutsk'})
result = self.import_(
['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 01:11:11'])
def test_notz(self):
""" If there is no tz either in the context or on the user, falls back
to UTC
"""
self.registry('res.users').write(
self.cr, openerp.SUPERUSER_ID, [openerp.SUPERUSER_ID],
{'tz': False})
result = self.import_(['value'], [['2012-02-03 11:11:11']])
self.assertFalse(result['messages'])
self.assertEqual(
values(self.read(domain=[('id', 'in', result['ids'])])),
['2012-02-03 11:11:11'])
class test_unique(ImporterCase):
model_name = 'export.unique'
@mute_logger('openerp.sql_db')
def test_unique(self):
result = self.import_(['value'], [
['1'],
['1'],
['2'],
['3'],
['3'],
])
self.assertFalse(result['ids'])
self.assertEqual(result['messages'], [
dict(message=u"The value for the field 'value' already exists. "
u"This might be 'unknown' in the current model, "
u"or a field of the same name in an o2m.",
type='error', rows={'from': 1, 'to': 1},
record=1, field='value'),
dict(message=u"The value for the field 'value' already exists. "
u"This might be 'unknown' in the current model, "
u"or a field of the same name in an o2m.",
type='error', rows={'from': 4, 'to': 4},
record=4, field='value'),
])
| gpl-3.0 | -1,911,940,414,141,114,400 | 36.91073 | 94 | 0.530816 | false |
vtemian/uni-west | second_year/os/exams/os_sched.py | 1 | 1793 | import Queue
import copy
fd = open('processes.txt')
processes = []
endTime = 0
time = 0
for line in fd:
tempProc = line.split(" ")
tempProc[0] = int(tempProc[0])
tempProc[1] = int(tempProc[1])
tempProc.append(0)
tempProc.append(0)
tempProc.append(0)
process = (arrival, burst, tw, tr, visited) = tempProc
processes.append(process)
for process in processes:
#print("Arrival {}; Burst {}".format(process[0],process[1]))
endTime += int(process[1])
pass
backupProcesses = copy.deepcopy(processes)
def getProcessesFifo(q, ps, time):
for process in ps:
if process[0] <= time and int(process[4]) == 0:
process[4] = 1
q.append(process)
q.sort(key=lambda tup: tup[0])
return q
def computeTr(ps):
for process in ps:
process[3] = process[1] + process[2]
def fifo(ps):
time = -1
q = []
while len(q) == 0:
time +=1
q = getProcessesFifo(q, ps, time)
while time < endTime:
q = getProcessesFifo(q, ps, time)
process = q.pop(0)
process[2] = time - process[0]
time += process[1]
computeTr(ps)
print "Fifo"
print "Arr Burst Tw Tr"
for process in ps:
print("{} {} {} {}".format(process[0],process[1],process[2],process[3]))
def sjf(ps):
time = -1
q = []
while len(q) == 0:
time +=1
q = getProcessesFifo(q, ps, time)
while time < endTime:
q = getProcessesFifo(q, ps, time)
q.sort(key=lambda tup: tup[1])
process = q.pop(0)
process[2] = time - process[0]
time += process[1]
computeTr(ps)
print "SJF"
print "Arr Burst Tw Tr"
for process in ps:
print("{} {} {} {}".format(process[0],process[1],process[2],process[3]))
fifo(processes)
processes = copy.deepcopy(backupProcesses)
sjf(processes)
processes = copy.deepcopy(backupProcesses)
| apache-2.0 | -3,198,944,461,955,452,400 | 20.987179 | 77 | 0.616843 | false |
google-code/betsynetpdf | sumatrapdf/scripts/util.py | 4 | 21807 | import os
import re
import subprocess
import sys
import hashlib
import string
import time
import types
import zipfile
import bz2
import shutil
def log(s):
print(s)
sys.stdout.flush()
def strip_empty_lines(s):
s = s.replace("\r\n", "\n")
lines = [l.strip() for l in s.split("\n") if len(l.strip()) > 0]
return string.join(lines, "\n")
def trim_str(s):
if len(s) < 75:
return (s, False)
# we don't want to trim if adding "..." would make it bigger than original
if len(s) < 78:
return (s, False)
return (s[:75], True)
def test_for_flag(args, arg, has_data=False):
if arg not in args:
if not has_data:
return False
for argx in args:
if argx.startswith(arg + "="):
args.remove(argx)
return argx[len(arg) + 1:]
return None
if not has_data:
args.remove(arg)
return True
idx = args.index(arg)
if idx == len(args) - 1:
return None
data = args[idx + 1]
args.pop(idx + 1)
args.pop(idx)
return data
def file_sha1(fp):
data = open(fp, "rb").read()
m = hashlib.sha1()
m.update(data)
return m.hexdigest()
def delete_file(path):
if os.path.exists(path):
os.remove(path)
def create_dir(d):
if not os.path.exists(d):
os.makedirs(d)
return d
def verify_path_exists(path):
if not os.path.exists(path):
print("path '%s' doesn't exist" % path)
sys.exit(1)
return path
def verify_started_in_right_directory():
if os.path.exists(os.path.join("scripts", "build.py")):
return
if os.path.exists(os.path.join(os.getcwd(), "scripts", "build.py")):
return
print("This script must be run from top of the source tree")
sys.exit(1)
def subprocess_flags():
# this magic disables the modal dialog that windows shows if the process crashes
# TODO: it doesn't seem to work, maybe because it was actually a crash in a process
# sub-launched from the process I'm launching. I had to manually disable this in
# registry, as per http://stackoverflow.com/questions/396369/how-do-i-disable-the-debug-close-application-dialog-on-windows-vista:
# DWORD HKLM or HKCU\Software\Microsoft\Windows\Windows Error Reporting\DontShowUI = "1"
# DWORD HKLM or HKCU\Software\Microsoft\Windows\Windows Error Reporting\Disabled = "1"
# see: http://msdn.microsoft.com/en-us/library/bb513638.aspx
if sys.platform.startswith("win"):
import ctypes
SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN
ctypes.windll.kernel32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
return 0x8000000 # win32con.CREATE_NO_WINDOW?
return 0
# Apparently shell argument to Popen it must be False on unix/mac and True
# on windows
def shell_arg():
if os.name == "nt":
return True
return False
# will throw an exception if a command doesn't exist
# otherwise returns a tuple:
# (stdout, stderr, errcode)
def run_cmd(*args):
cmd = " ".join(args)
print("run_cmd: '%s'" % cmd)
cmdproc = subprocess.Popen(args, shell=shell_arg(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, creationflags=subprocess_flags())
res = cmdproc.communicate()
return (res[0], res[1], cmdproc.returncode)
# like run_cmd() but throws an exception if command returns non-0 error code
def run_cmd_throw(*args):
cmd = " ".join(args)
print("run_cmd_throw: '%s'" % cmd)
cmdproc = subprocess.Popen(args, shell=shell_arg(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, creationflags=subprocess_flags())
res = cmdproc.communicate()
errcode = cmdproc.returncode
if 0 != errcode:
print("Failed with error code %d" % errcode)
if len(res[0]) > 0:
print("Stdout:\n%s" % res[0])
if len(res[1]) > 0:
print("Stderr:\n%s" % res[1])
raise Exception("'%s' failed with error code %d" % (cmd, errcode))
return (res[0], res[1])
# work-around a problem with running devenv from command-line:
# http://social.msdn.microsoft.com/Forums/en-US/msbuild/thread/9d8b9d4a-c453-4f17-8dc6-838681af90f4
def kill_msbuild():
(stdout, stderr, err) = run_cmd("taskkill", "/F", "/IM", "msbuild.exe")
if err not in (0, 128): # 0 is no error, 128 is 'process not found'
print("err: %d\n%s%s" % (err, stdout, stderr))
print("exiting")
sys.exit(1)
# Parse output of svn info and return revision number indicated by
# "Last Changed Rev" field or, if that doesn't exist, by "Revision" field
def parse_svninfo_out(txt):
ver = re.findall(r'(?m)^Last Changed Rev: (\d+)', txt)
if ver:
return ver[0]
ver = re.findall(r'(?m)^Revision: (\d+)', txt)
if ver:
return ver[0]
raise Exception("parse_svn_info_out() failed to parse '%s'" % txt)
# returns local and latest (on the server) svn versions
def get_svn_versions():
(out, err) = run_cmd_throw("svn", "info")
ver_local = str(parse_svninfo_out(out))
(out, err) = run_cmd_throw("svn", "info",
"https://sumatrapdf.googlecode.com/svn/trunk")
ver_latest = str(parse_svninfo_out(out))
return ver_local, ver_latest
# Given a line in svn info output:
# URL: https://sumatrapdf.googlecode.com/svn/trunk
# return '/trunk' part
def get_svn_branch():
(out, err) = run_cmd_throw("svn", "info")
url = re.findall(r'URL: (.+)', out)[0]
s = "https://sumatrapdf.googlecode.com/svn"
assert url.startswith(s), "'%s' should start with '%s'" % (url, s)
return url[len(s):]
# Parse output of "svn log -r${rev} -v", which looks sth. like this:
#------------------------------------------------------------------------
# r6667 | kkowalczyk | 2012-09-25 22:52:34 -0700 (Tue, 25 Sep 2012) | 1 line
# Changed paths:
# M /trunk/installer-vc2008.vcproj
# D /trunk/src/utils/Http.h
# A /trunk/src/utils/HttpUtil.cpp (from /trunk/src/utils/Http.cpp:6665)
#
# rename Http.[h|cpp] => HttpUtil.[h|cpp]
#------------------------------------------------------------------------
# Returns a tuple:
# (user, comment, modified, added, deleted)
# or None in case this is not a source checkin (but e.g. a wiki page edit)
def parse_svnlog_out(txt):
lines = [l.strip() for l in txt.split("\n")]
# remove empty line at the end
if len(lines) > 1 and len(lines[-1]) == 0:
lines = lines[:-1]
if 1 == len(lines):
return None
if not lines[0].startswith("---"):
print(txt)
print("l: '%s'" % lines[0])
assert lines[0].startswith("----")
if not lines[-1].startswith("---"):
print(txt)
print("l: '%s'" % lines[-1])
assert lines[-1].startswith("----")
user = lines[1].split(" | ")[1]
assert "Changed paths:" == lines[2]
modified = []
added = []
deleted = []
lines = lines[3:]
n = 0
while True:
if 0 == len(lines[n]):
break
s = lines[n]
#print("s: %s" % s)
typ = s[0]
name = s[2:]
assert name[0] == '/'
if typ == 'M':
modified.append(name)
elif typ == 'D':
deleted.append(name)
elif typ == 'A':
added.append(name)
else:
print("typ: %s\n" % typ)
assert False
n += 1
lines = lines[n + 1:-1] # skip the last ----
comment = string.join(lines, "\n")
return (user, comment, modified, added, deleted)
def parse_svnlog_out_test():
s = """------------------------------------------------------------------------
r6667 | kkowalczyk | 2012-09-25 22:52:34 -0700 (Tue, 25 Sep 2012) | 1 line
Changed paths:
M /trunk/src/SumatraPDF.cpp
D /trunk/src/utils/Http.h
A /trunk/src/utils/HttpUtil.h (from /trunk/src/utils/Http.h:6665)
M /trunk/sumatrapdf-vc2012.vcxproj
M /trunk/sumatrapdf-vc2012.vcxproj.filters
rename Http.[h|cpp] => HttpUtil.[h|cpp]
------------------------------------------------------------------------"""
res = parse_svnlog_out(s)
(user, comment, modified, added, deleted) = res
print("User: %s\nComment: %s\nModified: %s\nAdded: %s\nDeleted: %s\n" %
(user, comment, str(modified), str(added), str(deleted)))
assert user == "kkowalczyk"
assert comment == "rename Http.[h|cpp] => HttpUtil.[h|cpp]"
def parse_svnlog_out_test2(startrev=1, endrev=6700):
rev = endrev
while rev >= startrev:
(out, err) = run_cmd_throw("svn", "log", "-r%s" % str(rev), "-v")
res = parse_svnlog_out(out)
print("\nRev: %s" % str(rev))
if None == res:
print("Not svn checkin")
else:
(user, comment, modified, added, deleted) = res
print(
"User: %s\nComment: %s\nModified: %s\nAdded: %s\nDeleted: %s\n" %
(user, comment, str(modified), str(added), str(deleted)))
rev -= 1
# version line is in the format:
# define CURR_VERSION 1.1
def extract_sumatra_version(file_path):
content = open(file_path).read()
ver = re.findall(r'CURR_VERSION (\d+(?:\.\d+)*)', content)[0]
return ver
def file_remove_try_hard(path):
removeRetryCount = 0
while removeRetryCount < 3:
try:
os.remove(path)
return
except:
# try to sleep to make the time for the file not be used anymore
time.sleep(1)
print "exception: n %s, n %s, n %s n when trying to remove file %s" % (sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2], path)
removeRetryCount += 1
def zip_file(dst_zip_file, src_path, in_zip_name=None, compress=True, append=False):
mode = "w"
if append:
mode = "a"
if compress:
zf = zipfile.ZipFile(dst_zip_file, mode, zipfile.ZIP_DEFLATED)
else:
zf = zipfile.ZipFile(dst_zip_file, mode, zipfile.ZIP_STORED)
if in_zip_name is None:
in_zip_name = os.path.basename(src_path)
zf.write(src_path, in_zip_name)
zf.close()
def bz_file_compress(src, dst):
with open(src, "rb") as src_fo:
with bz2.BZ2File(dst, "w", buffering=16 * 1024 * 1024, compresslevel=9) as dst_fo:
shutil.copyfileobj(src_fo, dst_fo, length=1 * 1024 * 1024)
def formatInt(x):
if x < 0:
return '-' + formatInt(-x)
result = ''
while x >= 1000:
x, r = divmod(x, 1000)
result = ".%03d%s" % (r, result)
return "%d%s" % (x, result)
def str2bool(s):
if s.lower() in ("true", "1"):
return True
if s.lower() in ("false", "0"):
return False
assert(False)
class Serializable(object):
def __init__(self, fields, fields_no_serialize, read_from_file=None):
self.fields = fields
self.fields_no_serialize = fields_no_serialize
self.vals = {}
if read_from_file != None:
self.from_s(open(read_from_file, "r").read())
def type_of_field(self, name):
return type(self.fields[name])
def from_s(self, s):
# print(s)
lines = s.split("\n")
for l in lines:
(name, val) = l.split(": ", 1)
tp = self.type_of_field(name)
if tp == types.IntType:
self.vals[name] = int(val)
elif tp == types.LongType:
self.vals[name] = long(val)
elif tp == types.BooleanType:
self.vals[name] = str2bool(val)
elif tp in (types.StringType, types.UnicodeType):
self.vals[name] = val
else:
print(name)
assert(False)
def to_s(self):
res = []
for k, v in self.vals.items():
if k in self.fields_no_serialize:
continue
res.append("%s: %s" % (k, str(v)))
return string.join(res, "\n")
def write_to_file(self, filename):
open(filename, "w").write(self.to_s())
def compat_types(self, tp1, tp2):
if tp1 == tp2:
return True
num_types = (types.IntType, types.LongType)
if tp1 in num_types and tp2 in num_types:
return True
return False
def __setattr__(self, k, v):
if k in self.fields:
if not self.compat_types(type(v), type(self.fields[k])):
print("k='%s', %s != %s (type(v) != type(self.fields[k]))" % (
k, type(v), type(self.fields[k])))
assert type(v) == type(self.fields[k])
self.vals[k] = v
else:
super(Serializable, self).__setattr__(k, v)
def __getattr__(self, k):
if k in self.vals:
return self.vals[k]
if k in self.fields:
return self.fields[k]
return super(Serializable, self).__getattribute__(k)
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def sendmail(sender, senderpwd, to, subject, body):
# print("sendmail is disabled"); return
mail = MIMEMultipart()
mail['From'] = sender
toHdr = to
if isinstance(toHdr, list):
toHdr = ", ".join(toHdr)
mail['To'] = toHdr
mail['Subject'] = subject
mail.attach(MIMEText(body))
msg = mail.as_string()
# print(msg)
mailServer = smtplib.SMTP("smtp.gmail.com", 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(sender, senderpwd)
mailServer.sendmail(sender, to, msg)
mailServer.close()
# Some operations, like uploading to s3, require knowing s3 credential
# We store all such information that cannot be publicly known in a file
# config.py. This object is just a wrapper to documents the fields
# and given default values if config.py doesn't exist
class Config(object):
def __init__(self):
self.aws_access = None
self.aws_secret = None
self.cert_pwd = None
self.trans_ul_secret = None
self.notifier_email = None
self.notifier_email_pwd = None
def GetNotifierEmailAndPwdMustExist(self):
assert(None != self.notifier_email and None != self.notifier_email_pwd)
return (self.notifier_email, self.notifier_email_pwd)
def HasNotifierEmail(self):
return self.notifier_email != None and self.notifier_email_pwd != None
def GetCertPwdMustExist(self):
assert(None != self.cert_pwd)
return self.cert_pwd
def GetTransUploadSecret(self):
assert(None != self.trans_ul_secret)
return self.trans_ul_secret
# TODO: could verify aws creds better i.e. check the lengths
def GetAwsCredsMustExist(self):
assert(None != self.aws_access)
assert(None != self.aws_secret)
return (self.aws_access, self.aws_secret)
def HasAwsCreds(self):
if None is self.aws_access:
return False
if None is self.aws_secret:
return False
return True
g_config = None
def load_config():
global g_config
if g_config != None:
return g_config
c = Config()
try:
import config
c.aws_access = config.aws_access
c.aws_secret = config.aws_secret
c.cert_pwd = config.cert_pwd
c.notifier_email = config.notifier_email
c.notifier_email_pwd = config.notifier_email_pwd
c.trans_ul_secret = config.trans_ul_secret
except:
# it's ok if doesn't exist, we just won't have the config data
print("no config.py!")
g_config = c
return g_config
def test_load_config():
c = load_config()
vals = (c.aws_access, c.aws_secret, c.cert_pwd, c.trans_ul_secret)
print("aws_secret: %s\naws_secret: %s\ncert_pwd: %s\ntrans_ul_secret: %s" %
vals)
def gob_uvarint_encode(i):
assert i >= 0
if i <= 0x7f:
return chr(i)
res = ""
while i > 0:
b = i & 0xff
res += chr(b)
i = i >> 8
l = 256 - len(res)
res = res[::-1] # reverse string
return chr(l) + res
def gob_varint_encode(i):
if i < 0:
i = (~i << 1) | 1
else:
i = i << 1
return gob_uvarint_encode(i)
# data generated with UtilTests.cpp (define GEN_PYTHON_TESTS to 1)
def test_gob():
assert gob_varint_encode(0) == chr(0)
assert gob_varint_encode(1) == chr(2)
assert gob_varint_encode(127) == chr(255) + chr(254)
assert gob_varint_encode(128) == chr(254) + chr(1) + chr(0)
assert gob_varint_encode(129) == chr(254) + chr(1) + chr(2)
assert gob_varint_encode(254) == chr(254) + chr(1) + chr(252)
assert gob_varint_encode(255) == chr(254) + chr(1) + chr(254)
assert gob_varint_encode(256) == chr(254) + chr(2) + chr(0)
assert gob_varint_encode(4660) == chr(254) + chr(36) + chr(104)
assert gob_varint_encode(74565) == chr(253) + chr(2) + chr(70) + chr(138)
assert gob_varint_encode(1193046) == chr(253) + \
chr(36) + chr(104) + chr(172)
assert gob_varint_encode(19088743) == chr(252) + \
chr(2) + chr(70) + chr(138) + chr(206)
assert gob_varint_encode(305419896) == chr(252) + \
chr(36) + chr(104) + chr(172) + chr(240)
assert gob_varint_encode(2147483647) == chr(252) + \
chr(255) + chr(255) + chr(255) + chr(254)
assert gob_varint_encode(-1) == chr(1)
assert gob_varint_encode(-2) == chr(3)
assert gob_varint_encode(-255) == chr(254) + chr(1) + chr(253)
assert gob_varint_encode(-256) == chr(254) + chr(1) + chr(255)
assert gob_varint_encode(-257) == chr(254) + chr(2) + chr(1)
assert gob_varint_encode(-4660) == chr(254) + chr(36) + chr(103)
assert gob_varint_encode(-74565) == chr(253) + chr(2) + chr(70) + chr(137)
assert gob_varint_encode(-1193046) == chr(253) + \
chr(36) + chr(104) + chr(171)
assert gob_varint_encode(-1197415) == chr(253) + \
chr(36) + chr(138) + chr(205)
assert gob_varint_encode(-19158648) == chr(252) + \
chr(2) + chr(72) + chr(172) + chr(239)
assert gob_uvarint_encode(0) == chr(0)
assert gob_uvarint_encode(1) == chr(1)
assert gob_uvarint_encode(127) == chr(127)
assert gob_uvarint_encode(128) == chr(255) + chr(128)
assert gob_uvarint_encode(129) == chr(255) + chr(129)
assert gob_uvarint_encode(254) == chr(255) + chr(254)
assert gob_uvarint_encode(255) == chr(255) + chr(255)
assert gob_uvarint_encode(256) == chr(254) + chr(1) + chr(0)
assert gob_uvarint_encode(4660) == chr(254) + chr(18) + chr(52)
assert gob_uvarint_encode(74565) == chr(253) + chr(1) + chr(35) + chr(69)
assert gob_uvarint_encode(1193046) == chr(253) + \
chr(18) + chr(52) + chr(86)
assert gob_uvarint_encode(19088743) == chr(252) + \
chr(1) + chr(35) + chr(69) + chr(103)
assert gob_uvarint_encode(305419896) == chr(252) + \
chr(18) + chr(52) + chr(86) + chr(120)
assert gob_uvarint_encode(2147483647) == chr(252) + \
chr(127) + chr(255) + chr(255) + chr(255)
assert gob_uvarint_encode(2147483648) == chr(252) + \
chr(128) + chr(0) + chr(0) + chr(0)
assert gob_uvarint_encode(2147483649) == chr(252) + \
chr(128) + chr(0) + chr(0) + chr(1)
assert gob_uvarint_encode(4294967294) == chr(252) + \
chr(255) + chr(255) + chr(255) + chr(254)
assert gob_uvarint_encode(4294967295) == chr(252) + \
chr(255) + chr(255) + chr(255) + chr(255)
# for easy generation of the compact form of storing strings in C
class SeqStrings(object):
def __init__(self):
self.strings = {}
self.strings_seq = ""
def get_all(self):
return self.strings_seq + chr(0)
# Note: this only works if strings are ascii, which is the case for us so
# far
def get_all_c_escaped(self):
s = self.get_all()
s = s.replace(chr(0), "\\0")
return '"' + s + '"'
def add(self, s):
self.get_offset(s)
def get_offset(self, s):
if s not in self.strings:
self.strings[s] = len(self.strings_seq)
self.strings_seq = self.strings_seq + s + chr(0)
return self.strings[s]
(FMT_NONE, FMT_LEFT, FMT_RIGHT) = (0, 1, 2)
def get_col_fmt(col_fmt, col):
if col >= len(col_fmt):
return FMT_NONE
return col_fmt[col]
def fmt_str(s, max, fmt):
add = max - len(s)
if fmt == FMT_LEFT:
return " " * add + s
elif fmt == FMT_RIGHT:
return s + " " * add
return s
"""
[
["a", "bc", "def"],
["ab", "fabo", "d"]
]
=>
[
["a ", "bc ", "def"],
["ab", "fabo", "d "]
]
"""
def fmt_rows(rows, col_fmt=[]):
col_max_len = {}
for row in rows:
for col in range(len(row)):
el_len = len(row[col])
curr_max = col_max_len.get(col, 0)
if el_len > curr_max:
col_max_len[col] = el_len
res = []
for row in rows:
res_row = []
for col in range(len(row)):
s = fmt_str(row[col], col_max_len[col], get_col_fmt(col_fmt, col))
res_row.append(s)
res.append(res_row)
return res
if __name__ == "__main__":
# parse_svnlog_out_test2()
# test_load_config()
test_gob()
| gpl-3.0 | 5,237,735,412,752,482,000 | 30.891403 | 149 | 0.552988 | false |
ran0101/namebench | libnamebench/config.py | 173 | 11391 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define and process configuration from command-line or config file."""
__author__ = '[email protected] (Thomas Stromberg)'
import ConfigParser
import csv
import optparse
import os.path
import re
import StringIO
import tempfile
import nb_third_party
# from third_party
import httplib2
import addr_util
import data_sources
import nameserver
import nameserver_list
import sys_nameservers
import util
import version
TRUNK_URL = 'http://namebench.googlecode.com/svn/trunk/'
SETS_TO_TAGS_MAP = {
'system': ['system', 'dhcp'],
'global': ['global', 'preferred'],
'preferred': ['preferred'],
'nearby': ['nearby', 'dhcp', 'internal'],
'all': ['global', 'nearby', 'country', 'system', 'dhcp', 'internal', 'network', 'preferred', 'isp', 'likely-isp'],
'regional': ['internal', 'country', 'nearby', 'network', 'isp', 'likely-isp'],
'isp': ['isp', 'dhcp', 'internal', 'likely-isp'],
'network': ['network', 'internal', 'dhcp'],
}
def ExpandSetsToTags(set_names):
tags = set()
for set_name in set_names:
tags.update(set(SETS_TO_TAGS_MAP.get(set_name, [set_name])))
return tags
def GetMergedConfiguration():
"""Get all of our configuration setup."""
options = ParseCommandLineArguments()
return MergeConfigurationFileOptions(options)
def ParseCommandLineArguments(default_config_file='config/namebench.cfg'):
"""Get our option configuration setup.
Args:
default_config_file: path to configuration (may be relative)
Returns:
stuple of (OptionParser object, args)
"""
ds = data_sources.DataSources()
import_types = ds.ListSourceTypes()
parser = optparse.OptionParser()
parser.add_option('-6', '--ipv6_only', dest='ipv6_only', action='store_true', help='Only include IPv6 name servers')
parser.add_option('-4', '--ipv4_only', dest='ipv4_only', action='store_true', help='Only include IPv4 name servers')
parser.add_option('-b', '--censorship-checks', dest='enable_censorship_checks', action='store_true', help='Enable censorship checks')
parser.add_option('-c', '--country', dest='country', default=None, help='Set country (overrides GeoIP)')
parser.add_option('-H', '--skip-health-checks', dest='skip_health_checks', action='store_true', default=False, help='Skip health checks')
parser.add_option('-G', '--hide_results', dest='hide_results', action='store_true', help='Upload results, but keep them hidden from indexes.')
parser.add_option('-i', '--input', dest='input_source', help=('Import hostnames from an filename or application (%s)' % ', '.join(import_types)))
parser.add_option('-I', '--ips', dest='servers', default=[], help='A list of ips to test (can also be passed as arguments)')
parser.add_option('-j', '--health_threads', dest='health_thread_count', type='int', help='# of health check threads to use')
parser.add_option('-J', '--benchmark_threads', dest='benchmark_thread_count', type='int', help='# of benchmark threads to use')
parser.add_option('-k', '--distance_km', dest='distance', default=1250, help='Distance in km for determining if server is nearby')
parser.add_option('-K', '--overload_distance_km', dest='overload_distance', default=250, help='Like -k, but used if the country already has >350 servers.')
parser.add_option('-m', '--select_mode', dest='select_mode', default='automatic', help='Selection algorithm to use (weighted, random, chunk)')
parser.add_option('-M', '--max_servers_to_check', dest='max_servers_to_check', default=350, help='Maximum number of servers to inspect')
parser.add_option('-n', '--num_servers', dest='num_servers', type='int', help='Number of nameservers to include in test')
parser.add_option('-o', '--output', dest='output_file', default=None, help='Filename to write output to')
parser.add_option('-O', '--csv_output', dest='csv_file', default=None, help='Filename to write query details to (CSV)')
parser.add_option('-p', '--psn') # Silly Mac OS X adding -psn_0_xxxx
parser.add_option('-P', '--ping_timeout', dest='ping_timeout', type='float', help='# of seconds ping requests timeout in.')
parser.add_option('-q', '--query_count', dest='query_count', type='int', help='Number of queries per run.')
parser.add_option('-r', '--runs', dest='run_count', default=1, type='int', help='Number of test runs to perform on each nameserver.')
parser.add_option('-s', '--sets', dest='server_sets', default=[], help='Comma-separated list of sets to test (%s)' % SETS_TO_TAGS_MAP.keys())
parser.add_option('-T', '--template', dest='template', default='html', help='Template to use for output generation (ascii, html, resolv.conf)')
parser.add_option('-U', '--site_url', dest='site_url', help='URL to upload results to (http://namebench.appspot.com/)')
parser.add_option('-u', '--upload_results', dest='upload_results', action='store_true', help='Upload anonymized results to SITE_URL (False)')
parser.add_option('-V', '--invalidate_cache', dest='invalidate_cache', action='store_true', help='Force health cache to be invalidated')
parser.add_option('-w', '--open_webbrowser', dest='open_webbrowser', action='store_true', help='Opens the final report in your browser')
parser.add_option('-x', '--no_gui', dest='no_gui', action='store_true', help='Disable GUI')
parser.add_option('-Y', '--health_timeout', dest='health_timeout', type='float', help='health check timeout (in seconds)')
parser.add_option('-y', '--timeout', dest='timeout', type='float', help='# of seconds general requests timeout in.')
parser.add_option('-z', '--config', dest='config', default=default_config_file, help='Config file to use.')
options, args = parser.parse_args()
if options.server_sets:
if ',' in options.server_sets:
sets = options.server_sets.split(',')
else:
sets = [options.server_sets,]
options.tags = ExpandSetsToTags(sets)
else:
options.tags = set()
if args:
options.servers.extend(addr_util.ExtractIPsFromString(' '.join(args)))
options.tags.add('specified')
return options
def GetNameServerData(filename='config/servers.csv'):
server_file = util.FindDataFile(filename)
ns_data = _ParseNameServerListing(open(server_file))
# Add the system servers for later reference.
for i, ip in enumerate(sys_nameservers.GetCurrentNameServers()):
ns = nameserver.NameServer(ip, name='SYS%s-%s' % (i, ip), system_position=i)
ns_data.append(ns)
for i, ip in enumerate(sys_nameservers.GetAssignedNameServers()):
ns = nameserver.NameServer(ip, name='DHCP%s-%s' % (i, ip), dhcp_position=i)
ns_data.append(ns)
return ns_data
def _ParseNameServerListing(fp):
fields = ['ip', 'tags', 'provider', 'instance', 'hostname', 'location',
'coords', 'asn', 'list_note', 'urls']
reader = csv.DictReader(fp, fieldnames=fields)
ns_data = nameserver_list.NameServers()
for row in reader:
if row['instance']:
name = "%s (%s)" % (row['provider'], row['instance'])
else:
name = row['provider']
if row['coords']:
lat, lon = row['coords'].split(',')
else:
lat = lon = None
as_match = re.match('AS(\d+)(.*)', row['asn'])
if as_match:
asn, network_owner = as_match.groups()
network_owner = network_owner.lstrip(' ').rstrip(' ')
else:
asn = network_owner = None
ns_data.append(nameserver.NameServer(
row['ip'],
name=name,
tags=row['tags'].split(),
provider=row['provider'],
instance=row['instance'],
location=row['location'],
latitude=lat,
longitude=lon,
asn=asn,
hostname=row['hostname'],
network_owner=network_owner
))
return ns_data
def GetSanityChecks():
return GetAutoUpdatingConfigFile('config/sanity_checks.cfg')
def _GetLocalConfig(conf_file):
"""Read a simple local config file."""
local_config = _ReadConfigFile(conf_file)
return _ExpandConfigSections(local_config)
def _ReadConfigFile(conf_file):
"""Read a local config file."""
ref_file = util.FindDataFile(conf_file)
local_config = ConfigParser.ConfigParser()
local_config.read(ref_file)
return local_config
def GetAutoUpdatingConfigFile(conf_file):
"""Get the latest copy of the config file"""
local_config = _ReadConfigFile(conf_file)
download_latest = int(local_config.get('config', 'download_latest'))
local_version = int(local_config.get('config', 'version'))
if download_latest == 0:
return _ExpandConfigSections(local_config)
h = httplib2.Http(tempfile.gettempdir(), timeout=10)
url = '%s/%s' % (TRUNK_URL, conf_file)
content = None
try:
_, content = h.request(url, 'GET')
remote_config = ConfigParser.ConfigParser()
except:
print '* Unable to fetch remote %s: %s' % (conf_file, util.GetLastExceptionString())
return _ExpandConfigSections(local_config)
if content and '[config]' in content:
fp = StringIO.StringIO(content)
try:
remote_config.readfp(fp)
except:
print '* Unable to read remote %s: %s' % (conf_file, util.GetLastExceptionString())
return _ExpandConfigSections(local_config)
if remote_config and remote_config.has_section('config') and int(remote_config.get('config', 'version')) > local_version:
print '- Using %s' % url
return _ExpandConfigSections(remote_config)
else:
return _ExpandConfigSections(local_config)
def _ExpandConfigSections(config):
return dict([ (y, config.items(y)) for y in config.sections() if y != 'config' ])
def MergeConfigurationFileOptions(options):
"""Process configuration file, merge configuration with OptionParser.
Args:
options: optparse.OptionParser() object
Returns:
options: optparse.OptionParser() object
Raises:
ValueError: If we are unable to find a usable configuration file.
"""
config = ConfigParser.ConfigParser()
full_path = util.FindDataFile(options.config)
config.read(full_path)
if not config or not config.has_section('general'):
raise ValueError('Could not find usable configuration in %s (%s)' % (full_path, options.config))
general = dict(config.items('general'))
# -U implies -u
if options.site_url:
options.upload_results = True
for option in general:
if not getattr(options, option, None):
if 'timeout' in option:
value = float(general[option])
elif 'count' in option or 'num' in option or 'hide' in option:
value = int(general[option])
else:
value = general[option]
setattr(options, option, value)
for key in ('input_file', 'output_file', 'csv_file', 'input_source'):
value = getattr(options, key, None)
if value:
setattr(options, key, os.path.expanduser(value))
# This makes it easier to pass around later. Lazy-hack.
options.version = version.VERSION
return options
| apache-2.0 | 7,441,814,001,223,399,000 | 40.271739 | 157 | 0.680976 | false |
zzcclp/spark | examples/src/main/python/mllib/stratified_sampling_example.py | 27 | 1329 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext(appName="StratifiedSamplingExample") # SparkContext
# $example on$
# an RDD of any key value pairs
data = sc.parallelize([(1, 'a'), (1, 'b'), (2, 'c'), (2, 'd'), (2, 'e'), (3, 'f')])
# specify the exact fraction desired from each key as a dictionary
fractions = {1: 0.1, 2: 0.6, 3: 0.3}
approxSample = data.sampleByKey(False, fractions)
# $example off$
for each in approxSample.collect():
print(each)
sc.stop()
| apache-2.0 | -536,255,007,076,426,750 | 35.916667 | 87 | 0.700527 | false |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/google/appengine/_internal/django/core/validators.py | 23 | 6691 | import re
import urlparse
from google.appengine._internal.django.core.exceptions import ValidationError
from google.appengine._internal.django.utils.translation import ugettext_lazy as _
from google.appengine._internal.django.utils.encoding import smart_unicode
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
try:
from google.appengine._internal.django.conf import settings
URL_VALIDATOR_USER_AGENT = settings.URL_VALIDATOR_USER_AGENT
except ImportError:
# It's OK if Django settings aren't configured.
URL_VALIDATOR_USER_AGENT = 'Django (http://www.djangoproject.com/)'
class RegexValidator(object):
regex = ''
message = _(u'Enter a valid value.')
code = 'invalid'
def __init__(self, regex=None, message=None, code=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if isinstance(self.regex, basestring):
self.regex = re.compile(regex)
def __call__(self, value):
"""
Validates that the input matches the regular expression.
"""
if not self.regex.search(smart_unicode(value)):
raise ValidationError(self.message, code=self.code)
class URLValidator(RegexValidator):
regex = re.compile(
r'^https?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def __init__(self, verify_exists=False, validator_user_agent=URL_VALIDATOR_USER_AGENT):
super(URLValidator, self).__init__()
self.verify_exists = verify_exists
self.user_agent = validator_user_agent
def __call__(self, value):
try:
super(URLValidator, self).__call__(value)
except ValidationError, e:
# Trivial case failed. Try for possible IDN domain
if value:
value = smart_unicode(value)
scheme, netloc, path, query, fragment = urlparse.urlsplit(value)
try:
netloc = netloc.encode('idna') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
url = value
if self.verify_exists:
import urllib2
headers = {
"Accept": "text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Connection": "close",
"User-Agent": self.user_agent,
}
try:
req = urllib2.Request(url, None, headers)
u = urllib2.urlopen(req)
except ValueError:
raise ValidationError(_(u'Enter a valid URL.'), code='invalid')
except: # urllib2.URLError, httplib.InvalidURL, etc.
raise ValidationError(_(u'This URL appears to be a broken link.'), code='invalid_link')
def validate_integer(value):
try:
int(value)
except (ValueError, TypeError), e:
raise ValidationError('')
class EmailValidator(RegexValidator):
def __call__(self, value):
try:
super(EmailValidator, self).__call__(value)
except ValidationError, e:
# Trivial case failed. Try for possible IDN domain-part
if value and u'@' in value:
parts = value.split(u'@')
domain_part = parts[-1]
try:
parts[-1] = parts[-1].encode('idna')
except UnicodeError:
raise e
super(EmailValidator, self).__call__(u'@'.join(parts))
else:
raise
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE) # domain
validate_email = EmailValidator(email_re, _(u'Enter a valid e-mail address.'), 'invalid')
slug_re = re.compile(r'^[-\w]+$')
validate_slug = RegexValidator(slug_re, _(u"Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."), 'invalid')
ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
validate_ipv4_address = RegexValidator(ipv4_re, _(u'Enter a valid IPv4 address.'), 'invalid')
comma_separated_int_list_re = re.compile('^[\d,]+$')
validate_comma_separated_integer_list = RegexValidator(comma_separated_int_list_re, _(u'Enter only digits separated by commas.'), 'invalid')
class BaseValidator(object):
compare = lambda self, a, b: a is not b
clean = lambda self, x: x
message = _(u'Ensure this value is %(limit_value)s (it is %(show_value)s).')
code = 'limit_value'
def __init__(self, limit_value):
self.limit_value = limit_value
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned}
if self.compare(cleaned, self.limit_value):
raise ValidationError(
self.message % params,
code=self.code,
params=params,
)
class MaxValueValidator(BaseValidator):
compare = lambda self, a, b: a > b
message = _(u'Ensure this value is less than or equal to %(limit_value)s.')
code = 'max_value'
class MinValueValidator(BaseValidator):
compare = lambda self, a, b: a < b
message = _(u'Ensure this value is greater than or equal to %(limit_value)s.')
code = 'min_value'
class MinLengthValidator(BaseValidator):
compare = lambda self, a, b: a < b
clean = lambda self, x: len(x)
message = _(u'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).')
code = 'min_length'
class MaxLengthValidator(BaseValidator):
compare = lambda self, a, b: a > b
clean = lambda self, x: len(x)
message = _(u'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).')
code = 'max_length'
| lgpl-3.0 | -5,722,117,972,611,805,000 | 37.901163 | 140 | 0.574055 | false |
certik/sympy-oldcore | sympy/ntheory/partitions_.py | 1 | 1870 |
def npartitions(n):
"""
Calculate the partition function P(n), i.e. the number of ways that
n can be written as a sum of positive integers.
P(n) is computed using a straightforward implementation of the
Hardy-Ramanujan-Rademacher formula, described e.g. at
http://mathworld.wolfram.com/PartitionFunctionP.html
The speed is decent up to n = 10**5 or so. The function has
been tested to give the correct result for n = 10**6.
"""
n = int(n)
if n < 0:
return 0
if n <= 5:
return [1, 1, 2, 3, 5, 7][n]
from sympy.core.numbers import gcd
from sympy.numerics import Float
from sympy.numerics.functions import pi_float, sqrt, exp, log, cos
def frac(x):
return x - int(x)
def D(n, j):
pi = pi_float()
a = sqrt(Float(2)/3) * pi / j
b = Float(n) - Float(1)/24
c = sqrt(b)
expa = exp(a*c)
iexpa = Float(1)/expa
ch = (expa + iexpa)*0.5
sh = (expa - iexpa)*0.5
return sqrt(j) / (2*sqrt(2)*b*pi) * (a*ch-sh/c)
def A(n, j):
if j == 1:
return Float(1)
s = Float(0)
pi = pi_float()
for h in xrange(1, j):
if gcd(h,j) == 1:
s += cos((g(h,j)-2*h*n)*pi/j)
return s
def g(h, j):
if j < 3:
return Float(0)
s = Float(0)
for k in xrange(1, j):
s += k*(frac(h*Float(k)/j)-0.5)
return s
# estimate number of digits in p(n)
pdigits = int((pi_float()*sqrt(2.0*n/3)-log(4*n))/log(10)+1)
Float.store()
Float.setdps(pdigits*1.1 + 10)
s = Float(0)
M = max(6, int(0.24*sqrt(n)+4))
for q in xrange(1, M):
s += A(n,q) * D(n,q)
p = int(s + 0.5)
Float.revert()
return p
| bsd-3-clause | -861,416,958,363,658,400 | 25.101449 | 71 | 0.494118 | false |
noobcoderT/ryu-3.21 | ryu/app/rest_conf_switch.py | 22 | 5715 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides a set of REST API for switch configuration.
- Per-switch Key-Value store
Used by OpenStack Ryu agent.
"""
import httplib
import json
import logging
from webob import Response
from ryu.app.wsgi import ControllerBase
from ryu.base import app_manager
from ryu.controller import conf_switch
from ryu.lib import dpid as dpid_lib
# REST API for switch configuration
#
# get all the switches
# GET /v1.0/conf/switches
#
# get all the configuration keys of a switch
# GET /v1.0/conf/switches/<dpid>
#
# delete all the configuration of a switch
# DELETE /v1.0/conf/switches/<dpid>
#
# set the <key> configuration of a switch
# PUT /v1.0/conf/switches/<dpid>/<key>
#
# get the <key> configuration of a switch
# GET /v1.0/conf/switches/<dpid>/<key>
#
# delete the <key> configuration of a switch
# DELETE /v1.0/conf/switches/<dpid>/<key>
#
# where
# <dpid>: datapath id in 16 hex
class ConfSwitchController(ControllerBase):
def __init__(self, req, link, data, **config):
super(ConfSwitchController, self).__init__(req, link, data, **config)
self.conf_switch = data
def list_switches(self, _req, **_kwargs):
dpids = self.conf_switch.dpids()
body = json.dumps([dpid_lib.dpid_to_str(dpid) for dpid in dpids])
return Response(content_type='application/json', body=body)
@staticmethod
def _do_switch(dpid, func, ret_func):
dpid = dpid_lib.str_to_dpid(dpid)
try:
ret = func(dpid)
except KeyError:
return Response(status=httplib.NOT_FOUND,
body='no dpid is found %s' %
dpid_lib.dpid_to_str(dpid))
return ret_func(ret)
def delete_switch(self, _req, dpid, **_kwargs):
def _delete_switch(dpid):
self.conf_switch.del_dpid(dpid)
return None
def _ret(_ret):
return Response(status=httplib.ACCEPTED)
return self._do_switch(dpid, _delete_switch, _ret)
def list_keys(self, _req, dpid, **_kwargs):
def _list_keys(dpid):
return self.conf_switch.keys(dpid)
def _ret(keys):
body = json.dumps(keys)
return Response(content_type='application/json', body=body)
return self._do_switch(dpid, _list_keys, _ret)
@staticmethod
def _do_key(dpid, key, func, ret_func):
dpid = dpid_lib.str_to_dpid(dpid)
try:
ret = func(dpid, key)
except KeyError:
return Response(status=httplib.NOT_FOUND,
body='no dpid/key is found %s %s' %
(dpid_lib.dpid_to_str(dpid), key))
return ret_func(ret)
def set_key(self, req, dpid, key, **_kwargs):
def _set_val(dpid, key):
val = json.loads(req.body)
self.conf_switch.set_key(dpid, key, val)
return None
def _ret(_ret):
return Response(status=httplib.CREATED)
return self._do_key(dpid, key, _set_val, _ret)
def get_key(self, _req, dpid, key, **_kwargs):
def _get_key(dpid, key):
return self.conf_switch.get_key(dpid, key)
def _ret(val):
return Response(content_type='application/json',
body=json.dumps(val))
return self._do_key(dpid, key, _get_key, _ret)
def delete_key(self, _req, dpid, key, **_kwargs):
def _delete_key(dpid, key):
self.conf_switch.del_key(dpid, key)
return None
def _ret(_ret):
return Response()
return self._do_key(dpid, key, _delete_key, _ret)
class ConfSwitchAPI(app_manager.RyuApp):
_CONTEXTS = {
'conf_switch': conf_switch.ConfSwitchSet,
}
def __init__(self, *args, **kwargs):
super(ConfSwitchAPI, self).__init__(*args, **kwargs)
self.conf_switch = kwargs['conf_switch']
wsgi = kwargs['wsgi']
mapper = wsgi.mapper
controller = ConfSwitchController
wsgi.registory[controller.__name__] = self.conf_switch
route_name = 'conf_switch'
uri = '/v1.0/conf/switches'
mapper.connect(route_name, uri, controller=controller,
action='list_switches',
conditions=dict(method=['GET']))
uri += '/{dpid}'
requirements = {'dpid': dpid_lib.DPID_PATTERN}
s = mapper.submapper(controller=controller, requirements=requirements)
s.connect(route_name, uri, action='delete_switch',
conditions=dict(method=['DELETE']))
s.connect(route_name, uri, action='list_keys',
conditions=dict(method=['GET']))
uri += '/{key}'
s.connect(route_name, uri, action='set_key',
conditions=dict(method=['PUT']))
s.connect(route_name, uri, action='get_key',
conditions=dict(method=['GET']))
s.connect(route_name, uri, action='delete_key',
conditions=dict(method=['DELETE']))
| apache-2.0 | 6,416,562,680,468,467,000 | 31.288136 | 78 | 0.605249 | false |
kawasaki2013/python-for-android-x86 | python3-alpha/python3-src/Lib/idlelib/GrepDialog.py | 49 | 4062 | import os
import fnmatch
import sys
from tkinter import *
from idlelib import SearchEngine
from idlelib.SearchDialogBase import SearchDialogBase
def grep(text, io=None, flist=None):
root = text._root()
engine = SearchEngine.get(root)
if not hasattr(engine, "_grepdialog"):
engine._grepdialog = GrepDialog(root, engine, flist)
dialog = engine._grepdialog
searchphrase = text.get("sel.first", "sel.last")
dialog.open(text, searchphrase, io)
class GrepDialog(SearchDialogBase):
title = "Find in Files Dialog"
icon = "Grep"
needwrapbutton = 0
def __init__(self, root, engine, flist):
SearchDialogBase.__init__(self, root, engine)
self.flist = flist
self.globvar = StringVar(root)
self.recvar = BooleanVar(root)
def open(self, text, searchphrase, io=None):
SearchDialogBase.open(self, text, searchphrase)
if io:
path = io.filename or ""
else:
path = ""
dir, base = os.path.split(path)
head, tail = os.path.splitext(base)
if not tail:
tail = ".py"
self.globvar.set(os.path.join(dir, "*" + tail))
def create_entries(self):
SearchDialogBase.create_entries(self)
self.globent = self.make_entry("In files:", self.globvar)
def create_other_buttons(self):
f = self.make_frame()
btn = Checkbutton(f, anchor="w",
variable=self.recvar,
text="Recurse down subdirectories")
btn.pack(side="top", fill="both")
btn.select()
def create_command_buttons(self):
SearchDialogBase.create_command_buttons(self)
self.make_button("Search Files", self.default_command, 1)
def default_command(self, event=None):
prog = self.engine.getprog()
if not prog:
return
path = self.globvar.get()
if not path:
self.top.bell()
return
from idlelib.OutputWindow import OutputWindow
save = sys.stdout
try:
sys.stdout = OutputWindow(self.flist)
self.grep_it(prog, path)
finally:
sys.stdout = save
def grep_it(self, prog, path):
dir, base = os.path.split(path)
list = self.findfiles(dir, base, self.recvar.get())
list.sort()
self.close()
pat = self.engine.getpat()
print("Searching %r in %s ..." % (pat, path))
hits = 0
for fn in list:
try:
f = open(fn)
except IOError as msg:
print(msg)
continue
lineno = 0
while 1:
block = f.readlines(100000)
if not block:
break
for line in block:
lineno = lineno + 1
if line[-1:] == '\n':
line = line[:-1]
if prog.search(line):
sys.stdout.write("%s: %s: %s\n" % (fn, lineno, line))
hits = hits + 1
if hits:
if hits == 1:
s = ""
else:
s = "s"
print("Found", hits, "hit%s." % s)
print("(Hint: right-click to open locations.)")
else:
print("No hits.")
def findfiles(self, dir, base, rec):
try:
names = os.listdir(dir or os.curdir)
except os.error as msg:
print(msg)
return []
list = []
subdirs = []
for name in names:
fn = os.path.join(dir, name)
if os.path.isdir(fn):
subdirs.append(fn)
else:
if fnmatch.fnmatch(name, base):
list.append(fn)
if rec:
for subdir in subdirs:
list.extend(self.findfiles(subdir, base, rec))
return list
def close(self, event=None):
if self.top:
self.top.grab_release()
self.top.withdraw()
| apache-2.0 | -2,802,053,775,348,888,000 | 29.541353 | 77 | 0.513294 | false |
OCA/hr | hr_employee_display_own_info/tests/test_employee_display_own_info.py | 1 | 1081 | # Copyright 2017-2019 Onestein (<https://www.onestein.eu>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
class TestEmployeeDisplayOwnInfo(TransactionCase):
def setUp(self):
super(TestEmployeeDisplayOwnInfo, self).setUp()
self.user_test = self.env.ref('base.user_demo')
self.employee = self.env['hr.employee'].create({
'name': 'Employee',
})
def test_01(self):
self.assertFalse(self.user_test.has_group('hr.group_hr_user'))
self.assertFalse(
self.employee.sudo(self.user_test).employee_display_personal_data)
def test_02(self):
self.assertTrue(self.env.user.has_group('hr.group_hr_user'))
self.assertTrue(self.employee.employee_display_personal_data)
def test_03(self):
self.employee.write({'user_id': self.user_test.id})
self.assertFalse(self.user_test.has_group('hr.group_hr_user'))
self.assertTrue(
self.employee.sudo(self.user_test).employee_display_personal_data)
| agpl-3.0 | 6,524,081,741,879,936,000 | 35.033333 | 78 | 0.6642 | false |
genesi/u-boot-upstream | tools/patman/gitutil.py | 7 | 13538 | # Copyright (c) 2011 The Chromium OS Authors.
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import command
import re
import os
import series
import settings
import subprocess
import sys
import terminal
def CountCommitsToBranch():
"""Returns number of commits between HEAD and the tracking branch.
This looks back to the tracking branch and works out the number of commits
since then.
Return:
Number of patches that exist on top of the branch
"""
pipe = [['git', 'log', '--no-color', '--oneline', '@{upstream}..'],
['wc', '-l']]
stdout = command.RunPipe(pipe, capture=True, oneline=True)
patch_count = int(stdout)
return patch_count
def CreatePatches(start, count, series):
"""Create a series of patches from the top of the current branch.
The patch files are written to the current directory using
git format-patch.
Args:
start: Commit to start from: 0=HEAD, 1=next one, etc.
count: number of commits to include
Return:
Filename of cover letter
List of filenames of patch files
"""
if series.get('version'):
version = '%s ' % series['version']
cmd = ['git', 'format-patch', '-M', '--signoff']
if series.get('cover'):
cmd.append('--cover-letter')
prefix = series.GetPatchPrefix()
if prefix:
cmd += ['--subject-prefix=%s' % prefix]
cmd += ['HEAD~%d..HEAD~%d' % (start + count, start)]
stdout = command.RunList(cmd)
files = stdout.splitlines()
# We have an extra file if there is a cover letter
if series.get('cover'):
return files[0], files[1:]
else:
return None, files
def ApplyPatch(verbose, fname):
"""Apply a patch with git am to test it
TODO: Convert these to use command, with stderr option
Args:
fname: filename of patch file to apply
"""
cmd = ['git', 'am', fname]
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
re_error = re.compile('^error: patch failed: (.+):(\d+)')
for line in stderr.splitlines():
if verbose:
print line
match = re_error.match(line)
if match:
print GetWarningMsg('warning', match.group(1), int(match.group(2)),
'Patch failed')
return pipe.returncode == 0, stdout
def ApplyPatches(verbose, args, start_point):
"""Apply the patches with git am to make sure all is well
Args:
verbose: Print out 'git am' output verbatim
args: List of patch files to apply
start_point: Number of commits back from HEAD to start applying.
Normally this is len(args), but it can be larger if a start
offset was given.
"""
error_count = 0
col = terminal.Color()
# Figure out our current position
cmd = ['git', 'name-rev', 'HEAD', '--name-only']
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, stderr = pipe.communicate()
if pipe.returncode:
str = 'Could not find current commit name'
print col.Color(col.RED, str)
print stdout
return False
old_head = stdout.splitlines()[0]
# Checkout the required start point
cmd = ['git', 'checkout', 'HEAD~%d' % start_point]
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
if pipe.returncode:
str = 'Could not move to commit before patch series'
print col.Color(col.RED, str)
print stdout, stderr
return False
# Apply all the patches
for fname in args:
ok, stdout = ApplyPatch(verbose, fname)
if not ok:
print col.Color(col.RED, 'git am returned errors for %s: will '
'skip this patch' % fname)
if verbose:
print stdout
error_count += 1
cmd = ['git', 'am', '--skip']
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, stderr = pipe.communicate()
if pipe.returncode != 0:
print col.Color(col.RED, 'Unable to skip patch! Aborting...')
print stdout
break
# Return to our previous position
cmd = ['git', 'checkout', old_head]
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
if pipe.returncode:
print col.Color(col.RED, 'Could not move back to head commit')
print stdout, stderr
return error_count == 0
def BuildEmailList(in_list, tag=None, alias=None):
"""Build a list of email addresses based on an input list.
Takes a list of email addresses and aliases, and turns this into a list
of only email address, by resolving any aliases that are present.
If the tag is given, then each email address is prepended with this
tag and a space. If the tag starts with a minus sign (indicating a
command line parameter) then the email address is quoted.
Args:
in_list: List of aliases/email addresses
tag: Text to put before each address
Returns:
List of email addresses
>>> alias = {}
>>> alias['fred'] = ['[email protected]']
>>> alias['john'] = ['[email protected]']
>>> alias['mary'] = ['Mary Poppins <[email protected]>']
>>> alias['boys'] = ['fred', ' john']
>>> alias['all'] = ['fred ', 'john', ' mary ']
>>> BuildEmailList(['john', 'mary'], None, alias)
['[email protected]', 'Mary Poppins <[email protected]>']
>>> BuildEmailList(['john', 'mary'], '--to', alias)
['--to "[email protected]"', \
'--to "Mary Poppins <[email protected]>"']
>>> BuildEmailList(['john', 'mary'], 'Cc', alias)
['Cc [email protected]', 'Cc Mary Poppins <[email protected]>']
"""
quote = '"' if tag and tag[0] == '-' else ''
raw = []
for item in in_list:
raw += LookupEmail(item, alias)
result = []
for item in raw:
if not item in result:
result.append(item)
if tag:
return ['%s %s%s%s' % (tag, quote, email, quote) for email in result]
return result
def EmailPatches(series, cover_fname, args, dry_run, cc_fname,
self_only=False, alias=None):
"""Email a patch series.
Args:
series: Series object containing destination info
cover_fname: filename of cover letter
args: list of filenames of patch files
dry_run: Just return the command that would be run
cc_fname: Filename of Cc file for per-commit Cc
self_only: True to just email to yourself as a test
Returns:
Git command that was/would be run
# For the duration of this doctest pretend that we ran patman with ./patman
>>> _old_argv0 = sys.argv[0]
>>> sys.argv[0] = './patman'
>>> alias = {}
>>> alias['fred'] = ['[email protected]']
>>> alias['john'] = ['[email protected]']
>>> alias['mary'] = ['[email protected]']
>>> alias['boys'] = ['fred', ' john']
>>> alias['all'] = ['fred ', 'john', ' mary ']
>>> alias[os.getenv('USER')] = ['[email protected]']
>>> series = series.Series()
>>> series.to = ['fred']
>>> series.cc = ['mary']
>>> EmailPatches(series, 'cover', ['p1', 'p2'], True, 'cc-fname', False, \
alias)
'git send-email --annotate --to "[email protected]" --cc \
"[email protected]" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'
>>> EmailPatches(series, None, ['p1'], True, 'cc-fname', False, alias)
'git send-email --annotate --to "[email protected]" --cc \
"[email protected]" --cc-cmd "./patman --cc-cmd cc-fname" p1'
>>> series.cc = ['all']
>>> EmailPatches(series, 'cover', ['p1', 'p2'], True, 'cc-fname', True, \
alias)
'git send-email --annotate --to "[email protected]" --cc-cmd "./patman \
--cc-cmd cc-fname" cover p1 p2'
>>> EmailPatches(series, 'cover', ['p1', 'p2'], True, 'cc-fname', False, \
alias)
'git send-email --annotate --to "[email protected]" --cc \
"[email protected]" --cc "[email protected]" --cc \
"[email protected]" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'
# Restore argv[0] since we clobbered it.
>>> sys.argv[0] = _old_argv0
"""
to = BuildEmailList(series.get('to'), '--to', alias)
if not to:
print ("No recipient, please add something like this to a commit\n"
"Series-to: Fred Bloggs <[email protected]>")
return
cc = BuildEmailList(series.get('cc'), '--cc', alias)
if self_only:
to = BuildEmailList([os.getenv('USER')], '--to', alias)
cc = []
cmd = ['git', 'send-email', '--annotate']
cmd += to
cmd += cc
cmd += ['--cc-cmd', '"%s --cc-cmd %s"' % (sys.argv[0], cc_fname)]
if cover_fname:
cmd.append(cover_fname)
cmd += args
str = ' '.join(cmd)
if not dry_run:
os.system(str)
return str
def LookupEmail(lookup_name, alias=None, level=0):
"""If an email address is an alias, look it up and return the full name
TODO: Why not just use git's own alias feature?
Args:
lookup_name: Alias or email address to look up
Returns:
tuple:
list containing a list of email addresses
Raises:
OSError if a recursive alias reference was found
ValueError if an alias was not found
>>> alias = {}
>>> alias['fred'] = ['[email protected]']
>>> alias['john'] = ['[email protected]']
>>> alias['mary'] = ['[email protected]']
>>> alias['boys'] = ['fred', ' john', '[email protected]']
>>> alias['all'] = ['fred ', 'john', ' mary ']
>>> alias['loop'] = ['other', 'john', ' mary ']
>>> alias['other'] = ['loop', 'john', ' mary ']
>>> LookupEmail('mary', alias)
['[email protected]']
>>> LookupEmail('[email protected]', alias)
['[email protected]']
>>> LookupEmail('boys', alias)
['[email protected]', '[email protected]']
>>> LookupEmail('all', alias)
['[email protected]', '[email protected]', '[email protected]']
>>> LookupEmail('odd', alias)
Traceback (most recent call last):
...
ValueError: Alias 'odd' not found
>>> LookupEmail('loop', alias)
Traceback (most recent call last):
...
OSError: Recursive email alias at 'other'
"""
if not alias:
alias = settings.alias
lookup_name = lookup_name.strip()
if '@' in lookup_name: # Perhaps a real email address
return [lookup_name]
lookup_name = lookup_name.lower()
if level > 10:
raise OSError, "Recursive email alias at '%s'" % lookup_name
out_list = []
if lookup_name:
if not lookup_name in alias:
raise ValueError, "Alias '%s' not found" % lookup_name
for item in alias[lookup_name]:
todo = LookupEmail(item, alias, level + 1)
for new_item in todo:
if not new_item in out_list:
out_list.append(new_item)
#print "No match for alias '%s'" % lookup_name
return out_list
def GetTopLevel():
"""Return name of top-level directory for this git repo.
Returns:
Full path to git top-level directory
This test makes sure that we are running tests in the right subdir
>>> os.path.realpath(os.path.dirname(__file__)) == \
os.path.join(GetTopLevel(), 'tools', 'patman')
True
"""
return command.OutputOneLine('git', 'rev-parse', '--show-toplevel')
def GetAliasFile():
"""Gets the name of the git alias file.
Returns:
Filename of git alias file, or None if none
"""
fname = command.OutputOneLine('git', 'config', 'sendemail.aliasesfile')
if fname:
fname = os.path.join(GetTopLevel(), fname.strip())
return fname
def GetDefaultUserName():
"""Gets the user.name from .gitconfig file.
Returns:
User name found in .gitconfig file, or None if none
"""
uname = command.OutputOneLine('git', 'config', '--global', 'user.name')
return uname
def GetDefaultUserEmail():
"""Gets the user.email from the global .gitconfig file.
Returns:
User's email found in .gitconfig file, or None if none
"""
uemail = command.OutputOneLine('git', 'config', '--global', 'user.email')
return uemail
def Setup():
"""Set up git utils, by reading the alias files."""
# Check for a git alias file also
alias_fname = GetAliasFile()
if alias_fname:
settings.ReadGitAliases(alias_fname)
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-2.0 | 4,558,747,888,253,173,000 | 33.273418 | 80 | 0.603117 | false |
webcomics/dosage | dosagelib/plugins/namirdeiter.py | 1 | 2179 | # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from .common import _ParserScraper
class NamirDeiter(_ParserScraper):
imageSearch = '//img[contains(@src, "comics/")]'
prevSearch = ('//a[@rel="prev"]',
'//a[./img[contains(@src, "previous")]]',
'//a[contains(text(), "Previous")]')
def __init__(self, name, baseUrl, first=None, last=None):
if name == 'NamirDeiter':
super(NamirDeiter, self).__init__(name)
else:
super(NamirDeiter, self).__init__('NamirDeiter/' + name)
self.url = 'https://' + baseUrl + '/'
self.stripUrl = self.url + 'comics/index.php?date=%s'
if first:
self.firstStripUrl = self.stripUrl % first
else:
self.firstStripUrl = self.url + 'comics/'
if last:
self.url = self.stripUrl % last
self.endOfLife = True
def link_modifier(self, fromurl, tourl):
# Links are often absolute and keep jumping between http and https
return tourl.replace('http:', 'https:').replace('/www.', '/')
@classmethod
def getmodules(cls):
return (
cls('ApartmentForTwo', 'apartmentfor2.com'),
cls('NamirDeiter', 'namirdeiter.com', last='20150410'),
cls('NicoleAndDerek', 'nicoleandderek.com'),
cls('OneHundredPercentCat', 'ndunlimited.com/100cat', last='20121001'),
cls('SpareParts', 'sparepartscomics.com', first='20031022', last='20080331'),
cls('TheNDU', 'thendu.com'),
cls('WonderKittens', 'wonderkittens.com'),
cls('YouSayItFirst', 'yousayitfirst.com', first='20040220', last='20130125'),
)
class UnlikeMinerva(_ParserScraper):
name = 'NamirDeiter/UnlikeMinerva'
baseUrl = 'https://unlikeminerva.com/archive/index.php'
stripUrl = baseUrl + '?week=%s'
url = stripUrl % '127'
firstStripUrl = stripUrl % '26'
imageSearch = '//img[contains(@src, "archive/")]'
prevSearch = '//a[./img[contains(@src, "previous")]]'
multipleImagesPerStrip = True
endOfLife = True
| mit | 4,637,421,516,255,357,000 | 36.568966 | 89 | 0.591556 | false |
weaver-viii/h2o-3 | py2/h2o_cmd.py | 20 | 16497 |
import h2o_nodes
from h2o_test import dump_json, verboseprint
import h2o_util
import h2o_print as h2p
from h2o_test import OutputObj
#************************************************************************
def runStoreView(node=None, **kwargs):
print "FIX! disabling runStoreView for now"
return {}
if not node: node = h2o_nodes.nodes[0]
print "\nStoreView:"
# FIX! are there keys other than frames and models
a = node.frames(**kwargs)
# print "storeview frames:", dump_json(a)
frameList = [af['key']['name'] for af in a['frames']]
for f in frameList:
print "frame:", f
print "# of frames:", len(frameList)
b = node.models()
# print "storeview models:", dump_json(b)
modelList = [bm['key'] for bm in b['models']]
for m in modelList:
print "model:", m
print "# of models:", len(modelList)
return {'keys': frameList + modelList}
#************************************************************************
def runExec(node=None, **kwargs):
if not node: node = h2o_nodes.nodes[0]
a = node.rapids(**kwargs)
return a
def runInspect(node=None, key=None, verbose=False, **kwargs):
if not key: raise Exception('No key for Inspect')
if not node: node = h2o_nodes.nodes[0]
a = node.frames(key, **kwargs)
if verbose:
print "inspect of %s:" % key, dump_json(a)
return a
#************************************************************************
def infoFromParse(parse):
if not parse:
raise Exception("parse is empty for infoFromParse")
# assumes just one result from Frames
if 'frames' not in parse:
raise Exception("infoFromParse expects parse= param from parse result: %s" % parse)
if len(parse['frames'])!=1:
raise Exception("infoFromParse expects parse= param from parse result: %s " % parse['frames'])
# it it index[0] or key '0' in a dictionary?
frame = parse['frames'][0]
# need more info about this dataset for debug
numCols = len(frame['columns'])
numRows = frame['rows']
key_name = frame['frame_id']['name']
return numRows, numCols, key_name
#************************************************************************
# make this be the basic way to get numRows, numCols
def infoFromInspect(inspect):
if not inspect:
raise Exception("inspect is empty for infoFromInspect")
# assumes just one result from Frames
if 'frames' not in inspect:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s" % inspect)
if len(inspect['frames'])!=1:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s " % inspect['frames'])
# it it index[0] or key '0' in a dictionary?
frame = inspect['frames'][0]
# need more info about this dataset for debug
columns = frame['columns']
key_name = frame['frame_id']['name']
missingList = []
labelList = []
typeList = []
for i, colDict in enumerate(columns): # columns is a list
if 'missing_count' not in colDict:
# debug
print "\ncolDict"
for k in colDict:
print " key: %s" % k
# data
# domain
# string_data
# type
# label
# percentiles
# precision
# mins
# maxs
# mean
# histogram_base
# histogram_bins
# histogram_stride
# zero_count
# missing_count
# positive_infinity_count
# negative_infinity_count
# __meta
mins = colDict['mins']
maxs = colDict['maxs']
missing = colDict['missing_count']
label = colDict['label']
stype = colDict['type']
missingList.append(missing)
labelList.append(label)
typeList.append(stype)
if missing!=0:
print "%s: col: %s %s, missing: %d" % (key_name, i, label, missing)
print "inspect typeList:", typeList
# make missingList empty if all 0's
if sum(missingList)==0:
missingList = []
# no type per col in inspect2
numCols = len(frame['columns'])
numRows = frame['rows']
print "\n%s numRows: %s, numCols: %s" % (key_name, numRows, numCols)
return missingList, labelList, numRows, numCols
#************************************************************************
# does all columns unless you specify column index.
# only will return first or specified column
def runSummary(node=None, key=None, column=None, expected=None, maxDelta=None, noPrint=False, **kwargs):
if not key: raise Exception('No key for Summary')
if not node: node = h2o_nodes.nodes[0]
# return node.summary(key, **kwargs)
i = InspectObj(key=key)
# just so I don't have to change names below
missingList = i.missingList
labelList = i.labelList
numRows = i.numRows
numCols = i.numCols
print "labelList:", labelList
assert labelList is not None
# doesn't take indices? only column labels?
# return first column, unless specified
if not (column is None or isinstance(column, (basestring, int))):
raise Exception("column param should be string or integer index or None %s %s" % (type(column), column))
# either return the first col, or the col indentified by label. the column identifed could be string or index?
if column is None: # means the summary json when we ask for col 0, will be what we return (do all though)
colNameToDo = labelList
colIndexToDo = range(len(labelList))
elif isinstance(column, int):
colNameToDo = [labelList[column]]
colIndexToDo = [column]
elif isinstance(column, basestring):
colNameToDo = [column]
if column not in labelList:
raise Exception("% not in labellist: %s" % (column, labellist))
colIndexToDo = [labelList.index(column)]
else:
raise Exception("wrong type %s for column %s" % (type(column), column))
# we get the first column as result after walking across all, if no column parameter
desiredResult = None
for (colIndex, colName) in zip(colIndexToDo, colNameToDo):
print "doing summary on %s %s" % (colIndex, colName)
# ugly looking up the colIndex
co = SummaryObj(key=key, colIndex=colIndex, colName=colName)
if not desiredResult:
desiredResult = co
if not noPrint:
for k,v in co:
# only print [0] of mins and maxs because of the e308 values when they don't have dataset values
if k=='mins' or k=='maxs':
print "%s[0]" % k, v[0]
else:
print k, v
if expected is not None:
print "len(co.histogram_bins):", len(co.histogram_bins)
print "co.label:", co.label, "mean (2 places):", h2o_util.twoDecimals(co.mean)
# what is precision. -1?
print "co.label:", co.label, "std dev. (2 places):", h2o_util.twoDecimals(co.sigma)
# print "FIX! hacking the co.percentiles because it's short by two"
# if co.percentiles:
# percentiles = [0] + co.percentiles + [0]
# else:
# percentiles = None
percentiles = co.percentiles
assert len(co.percentiles) == len(co.default_percentiles)
# the thresholds h2o used, should match what we expected
# expected = [0] * 5
# Fix. doesn't check for expected = 0?
# max of one bin
if maxDelta is None:
maxDelta = (co.maxs[0] - co.mins[0])/1000
if expected[0]: h2o_util.assertApproxEqual(co.mins[0], expected[0], tol=maxDelta,
msg='min is not approx. expected')
if expected[1]: h2o_util.assertApproxEqual(percentiles[2], expected[1], tol=maxDelta,
msg='25th percentile is not approx. expected')
if expected[2]: h2o_util.assertApproxEqual(percentiles[4], expected[2], tol=maxDelta,
msg='50th percentile (median) is not approx. expected')
if expected[3]: h2o_util.assertApproxEqual(percentiles[6], expected[3], tol=maxDelta,
msg='75th percentile is not approx. expected')
if expected[4]: h2o_util.assertApproxEqual(co.maxs[0], expected[4], tol=maxDelta,
msg='max is not approx. expected')
# figure out the expected max error
# use this for comparing to sklearn/sort
MAX_QBINS = 1000
if expected[0] and expected[4]:
expectedRange = expected[4] - expected[0]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
pt = h2o_util.twoDecimals(percentiles)
# only look at [0] for now...bit e308 numbers if unpopulated due to not enough unique values in dataset column
mx = h2o_util.twoDecimals(co.maxs[0])
mn = h2o_util.twoDecimals(co.mins[0])
print "co.label:", co.label, "co.percentiles (2 places):", pt
print "co.default_percentiles:", co.default_percentiles
print "co.label:", co.label, "co.maxs: (2 places):", mx
print "co.label:", co.label, "co.mins: (2 places):", mn
# FIX! why would percentiles be None? enums?
if pt is None:
compareActual = mn, [None] * 3, mx
else:
compareActual = mn, pt[2], pt[4], pt[6], mx
h2p.green_print("actual min/25/50/75/max co.label:", co.label, "(2 places):", compareActual)
h2p.green_print("expected min/25/50/75/max co.label:", co.label, "(2 places):", expected)
return desiredResult
# this parses the json object returned for one col from runSummary...returns an OutputObj object
# summaryResult = h2o_cmd.runSummary(key=hex_key, column=0)
# co = h2o_cmd.infoFromSummary(summaryResult)
# print co.label
# legacy
def infoFromSummary(summaryResult, column=None):
return SummaryObj(summaryResult, column=column)
class ParseObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, parseResult, expectedNumRows=None, expectedNumCols=None, noPrint=False, **kwargs):
super(ParseObj, self).__init__(parseResult['frames'][0], "Parse", noPrint=noPrint)
# add my stuff
self.numRows, self.numCols, self.parse_key = infoFromParse(parseResult)
# h2o_import.py does this for test support
if 'python_elapsed' in parseResult:
self.python_elapsed = parseResult['python_elapsed']
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
print "ParseObj created for:", self.parse_key # vars(self)
# Let's experiment with creating new objects that are an api I control for generic operations (Inspect)
class InspectObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, key,
expectedNumRows=None, expectedNumCols=None, expectedMissingList=None, expectedLabelList=None,
noPrint=False, **kwargs):
inspectResult = runInspect(key=key)
super(InspectObj, self).__init__(inspectResult['frames'][0], "Inspect", noPrint=noPrint)
# add my stuff
self.missingList, self.labelList, self.numRows, self.numCols = infoFromInspect(inspectResult)
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
if expectedMissingList is not None:
assert self.missingList == expectedMissingList, "%s %s" % (self.MissingList, expectedMissingList)
if expectedLabelList is not None:
assert self.labelList == expectedLabelList, "%s %s" % (self.labelList, expectedLabelList)
print "InspectObj created for:", key #, vars(self)
class SummaryObj(OutputObj):
@classmethod
def check(self,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, **kwargs):
if expectedLabel is not None:
assert self.label != expectedLabel
if expectedType is not None:
assert self.type != expectedType
if expectedMissing is not None:
assert self.missing != expectedMissing
if expectedDomain is not None:
assert self.domain != expectedDomain
if expectedBinsSum is not None:
assert self.binsSum != expectedBinsSum
# column is column name?
def __init__(self, key, colIndex, colName,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, timeoutSecs=30, **kwargs):
# we need both colInndex and colName for doing Summary efficiently
# ugly.
assert colIndex is not None
assert colName is not None
summaryResult = h2o_nodes.nodes[0].summary(key=key, column=colName, timeoutSecs=timeoutSecs, **kwargs)
# this should be the same for all the cols? Or does the checksum change?
frame = summaryResult['frames'][0]
default_percentiles = frame['default_percentiles']
checksum = frame['checksum']
rows = frame['rows']
# assert colIndex < len(frame['columns']), "You're asking for colIndex %s but there are only %s. " % \
# (colIndex, len(frame['columns']))
# coJson = frame['columns'][colIndex]
# is it always 0 now? the one I asked for ?
coJson = frame['columns'][0]
assert checksum !=0 and checksum is not None
assert rows!=0 and rows is not None
# FIX! why is frame['key'] = None here?
# assert frame['key'] == key, "%s %s" % (frame['key'], key)
super(SummaryObj, self).__init__(coJson, "Summary for %s" % colName, noPrint=noPrint)
# how are enums binned. Stride of 1? (what about domain values)
# touch all
# print "vars", vars(self)
coList = [
len(self.data),
self.domain,
self.string_data,
self.type,
self.label,
self.percentiles,
self.precision,
self.mins,
self.maxs,
self.mean,
self.histogram_base,
len(self.histogram_bins),
self.histogram_stride,
self.zero_count,
self.missing_count,
self.positive_infinity_count,
self.negative_infinity_count,
]
assert self.label==colName, "%s You must have told me the wrong colName %s for the given colIndex %s" % \
(self.label, colName, colIndex)
print "you can look at this attributes in the returned object (which is OutputObj if you assigned to 'co')"
for k,v in self:
print "%s" % k,
# hack these into the column object from the full summary
self.default_percentiles = default_percentiles
self.checksum = checksum
self.rows = rows
print "\nSummaryObj for", key, "for colName", colName, "colIndex:", colIndex
print "SummaryObj created for:", key # vars(self)
# now do the assertion checks
self.check(expectedNumRows, expectedNumCols,
expectedLabel, expectedType, expectedMissing, expectedDomain, expectedBinsSum,
noPrint=noPrint, **kwargs)
| apache-2.0 | -2,744,904,721,334,919,000 | 39.633005 | 122 | 0.600048 | false |
ingted/voltdb | tests/test_apps/csvbenchmark/csvbenchmark.py | 7 | 15913 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2015 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from optparse import OptionParser
from random import randint
import os
import sys
import re
from numpy import *
import random
from subprocess import Popen,PIPE
import shlex
import datetime
from voltdbclient import FastSerializer, VoltProcedure
import time
CSVLOADER = "bin/csvloader"
#SQLCMD = "$VOLTDB_HOME/bin/sqlcmd --servers=%s" % servers
# declare cases impmeneted and the data generator code
CASES = {
"narrow_short_noix" : "data_narrow_short",
"narrow_short_ix" : "data_narrow_short",
"narrow_short_cmpix" : "data_narrow_short",
"narrow_short_hasview" : "data_narrow_short",
"narrow_long_noix" : "data_narrow_long",
"narrow_long_ix" : "data_narrow_long",
"narrow_long_cmpix" : "data_narrow_long",
"narrow_long_hasview" : "data_narrow_long",
"generic_noix" : "data_generic",
"generic_ix" : "data_generic",
"replicated_pk" : "data_replicated_pk",
}
def list_cases():
print "List of implemented csvloader cases:\n"
for k in sorted(CASES.keys()):
print "\t%s" % k
# build the reference character set
# user all possible unicode-16 codes (first code page 0000-ffff)
UNICODE_CHARSET = ""
#for c in range(32,64*1024):
for c in range(32,127):
# 0-31 control chars
# 34 "
# 36 $
# 37 %
# 38 &
# 39 '
# 44 , reserved as field separator
# 91 [
# 92 \ just avoid it
# 93 ]
# 94 ^ quote reserved for loader
# 95 _ 37 % for LIKE % bbi escape doesn't work
# 96 `
# 123 {
# 124 | reserved as field separator
# 125 }
# 126 ~
# 127 DLE
if not (c==44 or c==127):
UNICODE_CHARSET += unichr(c)
ESCAPE_CHAR="\\"
QUOTE_CHAR="\""
UNICODE_CHARSET_MINUS_QUOTE_CHAR = UNICODE_CHARSET.replace(QUOTE_CHAR, "")
UNICODE_CHARSET_MINUS_WHITESPACE_CHARS = UNICODE_CHARSET.replace(" \t\n","")
NUMERIC_CHARSET="0123456789"
# XXX not yet handling leading/trailing zeroes and many other
# cases which are useful in testing, but this is not a test it is a benchmark.
def gentext(size):
r = ''.join(random.sample(UNICODE_CHARSET, len(UNICODE_CHARSET)))
s = r * int(size/len(r)) + r[:size%len(r)]
m = re.match(r'(.*)([ \t\n]+)$', s)
if m:
s = m.group(1) + ''.join(random.sample(UNICODE_CHARSET_MINUS_WHITESPACE_CHARS, len(m.group(2))))
s = s.replace(QUOTE_CHAR, QUOTE_CHAR+QUOTE_CHAR)[:size]
if (len(s) == 1 and s[0] == QUOTE_CHAR) or (len(s) > 1 and s[-1] == QUOTE_CHAR and s[-2] != QUOTE_CHAR):
s = s[:-1] + random.choice(UNICODE_CHARSET_MINUS_QUOTE_CHAR)
assert len(s) == size
return QUOTE_CHAR + s[:size] + QUOTE_CHAR
def genfixeddecimalstr(size=38, precision=12, signed=True):
# voltdb decimal is 16-byte with fixed scale of 12 and precision of 38
p = -1*precision
r = ''.join(random.sample(NUMERIC_CHARSET, len(NUMERIC_CHARSET)))
r = r * int(size/len(r)) + r[:size%len(r)]
if (p>0):
r = r[:p] + '.' + r[p:]
if signed:
r = random.choose(["-","+",""]) + r
return r
def gencurrency(size=16, precision=4):
c = genfixeddecimalstr(size, precision)
curr = re.match(r'^0*(\d+\.*\d+)0*$', c)
print curr.group(1)
return curr.group(1)
def genint(size):
if size == 1:
return randint(-2**7+1, 2**7-1)
elif size == 2:
return randint(-2**15+1, 2**15-1)
elif size == 4:
return randint(-2**31+1, 2**31-1)
elif size == 8:
return randint(-2**63+1, 2**63-1)
else:
raise RuntimeError ("invalid size for integer %d" % size)
def gennumsequence(__seq):
# pass in a list of on one number
assert (isinstance(__seq, list) and len(__seq) == 1)
__seq[0] += 1
return __seq[0]
def gentimestamp():
return datetime.datetime.today().strftime('"%Y-%m-%d %H:%M:%S"')
def gendouble():
return random.random() * genint(4)
def run_readlines(cmd):
fd = os.popen(cmd)
result = fd.read()
#print result
fd.close()
return result
def run_csvloader(schema, data_file):
rowcount = options.ROW_COUNT
elapsed_results = []
parsing_results = []
loading_results = []
for I in range(0, options.TRIES):
home = os.getenv("VOLTDB_HOME")
before_row_count = get_table_row_count(schema)
cmd = "%s --servers=%s" % (os.path.join(home, CSVLOADER), ','.join(options.servers))
if options.csvoptions:
cmd += " -o " + ",".join(options.csvoptions)
cmd += " %s -f %s" % (schema, data_file)
if options.VERBOSE:
print "starting csvloader with command: " + cmd
start_time = time.time()
p = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate()
run_time = time.time() - start_time
stdout_lines = stdout.split('\n')
if options.VERBOSE:
for l in stdout_lines:
print '[csvloader stdout] ' + l
rc = p.returncode
actual_row_count = get_table_row_count(schema)
if rc != 0:
print "CSVLoader failed with rc %d" % rc
for l in stderr.split('\n'):
print '[csvloader stderr] ' + l
raise RuntimeError ("CSV Loader failed")
# XXX seems that csvloader doesnt always returncode nonzero if it fails to load rows
m = re.search(r'^Read (\d+) rows from file and successfully inserted (\d+) rows \(final\)$',
stdout, flags=re.M)
if m is None or int(m.group(1)) != rowcount or m.group(1) != m.group(2):
raise RuntimeError ("CSV Loader failed to load all rows")
if int(before_row_count) + rowcount != int(actual_row_count):
raise RuntimeError ("Actual table row count was not as expected exp:%d act:%d" % (rowcount,actual_row_count))
elapsed_results.append(float(run_time))
def analyze_results(perf_results):
#print "raw perf_results: %s" % perf_results
pr = sorted(perf_results)[1:-1]
if len(pr) == 0:
pr = perf_results
return (average(pr), std(pr))
avg, stddev = analyze_results(elapsed_results)
print "statistics for %s execution time avg: %f stddev: %f rows/sec: %f rows: %d file size: %d tries: %d" %\
(schema, avg, stddev, rowcount/avg, rowcount, os.path.getsize(data_file), options.TRIES)
if options.statsfile:
with open(options.statsfile, "a") as sf:
# report duration in milliseconds for stats collector
print >>sf, "%s,%f,%d,0,0,0,0,0,0,0,0,0,0" % (schema, avg*1000.0, rowcount)
return (rowcount, avg, stddev)
def get_table_row_count(table_name):
host = random.choice(options.servers)
pyclient = FastSerializer(host=host, port=21212)
count = VoltProcedure(pyclient, '@AdHoc', [FastSerializer.VOLTTYPE_STRING])
resp = count.call(['select count(*) from %s' % table_name], timeout=360)
if resp.status != 1 or len(resp.tables[0].tuples) != 1:
print "Unexpected response to count query from host %s: %s" % (host, resp)
raise RuntimeError()
__tuples = resp.tables[0].tuples[0]
result = __tuples[0]
print "count query returned: %s" % result
return result
def get_datafile_path(case):
return os.path.join(DATA_DIR, "csvbench_%s_%d.dat" % (case, options.ROW_COUNT))
def get_filesize(file):
return int(run_readlines("wc -l %s" % file).split(' ')[0])
def list_callback (option, opt, value, parser):
"""split the list of strings and store it in the parser options """
setattr(parser.values, option.dest, value.split(','))
def parse_cmdline():
global options, args, DATA_DIR
usage = "usage: %prog [options] path-to-loadfiles"
parser = OptionParser()
parser.add_option ("-s", "--servers",
type = "string",
action = "callback", callback = list_callback,
default=["localhost"],
help ="list of servers")
# WNG Don't run more than one case at a time in apprunner if collecting stats
parser.add_option ("-c", "--case",
type = "string",
action = "callback", callback = list_callback,
default=None,
help ="comma separate list of cases to run")
parser.add_option ("-n", "--rows",
type = "int",
dest = "ROW_COUNT",
default = 100000,
help ="number of rows to test")
parser.add_option ("-r", "--regeneratedata",
dest = "REGENERATE",
action="store_true", default=False,
help ="regenerate the data'")
parser.add_option ("-t", "--tries",
type = "int",
dest = "TRIES",
default = 1,
help ="number of time to run the test case and average the performance results")
parser.add_option ("-o", "--csvoptions",
type = "string",
action = "callback", callback = list_callback,
default=None,
help ="comma separated list of options to be passed to the csvloader")
parser.add_option ("-v", "--verbose",
dest = "VERBOSE",
action="store_true", default=False,
help ="print csv output'")
parser.add_option ("-l", "--list",
dest = "LIST",
action="store_true", default=False,
help ="list cases supported and exit'")
parser.add_option ("--statsfile",
type = "string",
dest = "statsfile",
default=None,
help ="file to write statistics for apprunner")
(options, args) = parser.parse_args()
if options.LIST:
list_cases()
sys.exit(0)
if len(args) < 1:
print "ERROR load file directory not specified"
sys.exit(1)
DATA_DIR = args[0]
if not os.path.isdir(DATA_DIR):
print "ERROR load file directory does not exist, or is not a directory"
sys.exit(1)
if options.statsfile:
f = open(options.statsfile, 'w')
f.close
def data_narrow_short(rebuild=False):
data_file = get_datafile_path("narrow_short")
if rebuild or not os.path.exists(data_file):
with open(data_file, "w") as f:
for I in range(0, options.ROW_COUNT):
print >>f, "%d,%d,%d,%d,%s" % (I, genint(2), genint(1), genint(8), gentext(60))
print "data file %s was written" % data_file
return data_file
def data_narrow_long(rebuild=False):
data_file = get_datafile_path("narrow_long")
if rebuild or not os.path.exists(data_file):
with open(data_file, "w") as f:
for I in range(0, options.ROW_COUNT):
print >>f, "%d,%d,%d,%d,%s" % (I, randint(-32766,32767),randint(-127,127),randint(-2**63,2**63),gentext(512))
print "data file %s was written" % data_file
return data_file
def data_generic(rebuild=False):
"""
a integer NOT NULL
, b tinyint
, c smallint
, d varchar(1)
, e timestamp
, f timestamp
, h varchar(60)
, i varchar(60)
, j varchar(60)
, k varchar(1024)
, l varchar(1024)
, m varchar(1024)
, n double
, o bigint
, p varchar(1)
, r bigint
a integer NOT NULL
, b tinyint
, c smallint
, d varchar(1)
, e timestamp
, f timestamp
, h varchar(60)
, i varchar(60)
, j varchar(60)
, k varchar(1024)
, l varchar(1024)
, m varchar(1024)
, n float
, o bigint
, p varchar(1)
, r bigint
, s decimal(32,4)
, t decimal(32,4)
, u decimal(32,4)
"""
case = "generic"
data_file = get_datafile_path(case)
if rebuild or not os.path.exists(data_file) or get_filesize(data_file) != options.ROW_COUNT:
with open(data_file, "w") as f:
for I in range(0, options.ROW_COUNT):
print >>f, "%d,%d,%d,%s,%s,%s,%s,%s,%s,%s,%s,%s,%f,%d,%s,%d" \
% ( I,
genint(1),
genint(2),
gentext(1),
gentimestamp(),
gentimestamp(),
gentext(60),
gentext(60),
gentext(60),
gentext(1024),
gentext(1024),
gentext(1024),
gendouble(),
genint(8),
gentext(1),
genint(8)
)
print "data file %s was written" % data_file
return data_file
def case_generic_noix():
schema = "generic_noix"
data_file = data_generic(False)
run_csvloader(schema, data_file)
def data_replicated_pk(rebuild=False):
data_file = get_datafile_path("replicated_pk")
if rebuild or not os.path.exists(data_file) or get_filesize(data_file) != options.ROW_COUNT:
myseq = [0]
with open(data_file, "w") as f:
for I in range(0, options.ROW_COUNT):
print >>f, "%d,%s,%s,%s,%s,%s" % (gennumsequence(myseq),
gentext(60),
gentext(1024),
gentimestamp(),
gentext(30),
genfixeddecimalstr(size=1, precision=0, signed=False)
)
print "data file %s was written" % data_file
return data_file
parse_cmdline()
cases = options.case or CASES.keys()
for schema in cases:
if schema not in CASES:
print "ERROR unknown case: %s" % c
print list_cases()
sys.exit(1)
data_file = globals()[CASES[schema]](options.REGENERATE)
run_csvloader(schema, data_file)
| agpl-3.0 | -1,589,348,484,211,064,300 | 36.179907 | 125 | 0.539873 | false |
mat650/metagoofil | hachoir_parser/container/realmedia.py | 95 | 6851 | """
RealMedia (.rm) parser
Author: Mike Melanson
Creation date: 15 december 2006
References:
- http://wiki.multimedia.cx/index.php?title=RealMedia
- Appendix E: RealMedia File Format (RMFF) Reference
https://common.helixcommunity.org/nonav/2003/HCS_SDK_r5/htmfiles/rmff.htm
Samples:
- http://samples.mplayerhq.hu/real/
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
UInt16, UInt32, Bit, RawBits,
RawBytes, String, PascalString8, PascalString16)
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_core.endian import BIG_ENDIAN
def parseHeader(self):
yield UInt32(self, "filever", "File version")
yield UInt32(self, "numheaders", "number of headers")
def parseFileProperties(self):
yield UInt32(self, "max_bit_rate", "Maximum bit rate")
yield UInt32(self, "avg_bit_rate", "Average bit rate")
yield UInt32(self, "max_pkt_size", "Size of largest data packet")
yield UInt32(self, "avg_pkt_size", "Size of average data packet")
yield UInt32(self, "num_pkts", "Number of data packets")
yield UInt32(self, "duration", "File duration in milliseconds")
yield UInt32(self, "preroll", "Suggested preroll in milliseconds")
yield textHandler(UInt32(self, "index_offset", "Absolute offset of first index chunk"), hexadecimal)
yield textHandler(UInt32(self, "data_offset", "Absolute offset of first data chunk"), hexadecimal)
yield UInt16(self, "stream_count", "Number of streams in the file")
yield RawBits(self, "reserved", 13)
yield Bit(self, "is_live", "Whether file is a live broadcast")
yield Bit(self, "is_perfect_play", "Whether PerfectPlay can be used")
yield Bit(self, "is_saveable", "Whether file can be saved")
def parseContentDescription(self):
yield PascalString16(self, "title", charset="ISO-8859-1", strip=" \0")
yield PascalString16(self, "author", charset="ISO-8859-1", strip=" \0")
yield PascalString16(self, "copyright", charset="ISO-8859-1", strip=" \0")
yield PascalString16(self, "comment", charset="ISO-8859-1", strip=" \0")
class NameValueProperty(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["size"].value * 8
def createFields(self):
yield UInt32(self, "size")
yield UInt16(self, "obj_version")
yield PascalString8(self, "name", charset="ASCII")
yield UInt32(self, "type")
yield PascalString16(self, "value", charset="ISO-8859-1", strip=" \0")
class LogicalFileInfo(FieldSet):
def createFields(self):
yield UInt32(self, "size")
yield UInt16(self, "obj_version")
yield UInt16(self, "nb_physical_stream")
for index in xrange(self["nb_physical_stream"].value):
yield UInt16(self, "physical_stream[]")
for index in xrange(self["nb_physical_stream"].value):
yield UInt16(self, "data_offset[]")
yield UInt16(self, "nb_rule")
for index in xrange(self["nb_rule"].value):
yield UInt16(self, "rule[]")
yield UInt16(self, "nb_prop")
for index in xrange(self["nb_prop"].value):
yield NameValueProperty(self, "prop[]")
def parseMediaPropertiesHeader(self):
yield UInt16(self, "stream_number", "Stream number")
yield UInt32(self, "max_bit_rate", "Maximum bit rate")
yield UInt32(self, "avg_bit_rate", "Average bit rate")
yield UInt32(self, "max_pkt_size", "Size of largest data packet")
yield UInt32(self, "avg_pkt_size", "Size of average data packet")
yield UInt32(self, "stream_start", "Stream start offset in milliseconds")
yield UInt32(self, "preroll", "Preroll in milliseconds")
yield UInt32(self, "duration", "Stream duration in milliseconds")
yield PascalString8(self, "desc", "Stream description", charset="ISO-8859-1")
yield PascalString8(self, "mime_type", "MIME type string", charset="ASCII")
yield UInt32(self, "specific_size", "Size of type-specific data")
size = self['specific_size'].value
if size:
if self["mime_type"].value == "logical-fileinfo":
yield LogicalFileInfo(self, "file_info", size=size*8)
else:
yield RawBytes(self, "specific", size, "Type-specific data")
class Chunk(FieldSet):
tag_info = {
".RMF": ("header", parseHeader),
"PROP": ("file_prop", parseFileProperties),
"CONT": ("content_desc", parseContentDescription),
"MDPR": ("stream_prop[]", parseMediaPropertiesHeader),
"DATA": ("data[]", None),
"INDX": ("file_index[]", None)
}
def createValueFunc(self):
return self.value_func(self)
def __init__(self, parent, name, description=None):
FieldSet.__init__(self, parent, name, description)
self._size = (self["size"].value) * 8
tag = self["tag"].value
if tag in self.tag_info:
self._name, self.parse_func = self.tag_info[tag]
else:
self._description = ""
self.parse_func = None
def createFields(self):
yield String(self, "tag", 4, "Chunk FourCC", charset="ASCII")
yield UInt32(self, "size", "Chunk Size")
yield UInt16(self, "version", "Chunk Version")
if self.parse_func:
for field in self.parse_func(self):
yield field
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "raw", size)
def createDescription(self):
return "Chunk: %s" % self["tag"].display
class RealMediaFile(Parser):
MAGIC = '.RMF\0\0\0\x12\0\1' # (magic, size=18, version=1)
PARSER_TAGS = {
"id": "real_media",
"category": "container",
"file_ext": ("rm",),
"mime": (
u"video/x-pn-realvideo",
u"audio/x-pn-realaudio",
u"audio/x-pn-realaudio-plugin",
u"audio/x-real-audio",
u"application/vnd.rn-realmedia"),
"min_size": len(MAGIC)*8, # just the identifier
"magic": ((MAGIC, 0),),
"description": u"RealMedia (rm) Container File",
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != '.RMF':
return "Invalid magic"
if self["header/size"].value != 18:
return "Invalid header size"
if self["header/version"].value not in (0, 1):
return "Unknown file format version (%s)" % self["header/version"].value
return True
def createFields(self):
while not self.eof:
yield Chunk(self, "chunk")
def createMimeType(self):
for prop in self.array("stream_prop"):
if prop["mime_type"].value == "video/x-pn-realvideo":
return u"video/x-pn-realvideo"
return u"audio/x-pn-realaudio"
| gpl-2.0 | -1,140,659,469,835,173,000 | 38.831395 | 104 | 0.62604 | false |
ovnicraft/odoo | addons/association/__openerp__.py | 260 | 1700 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Associations Management',
'version': '0.1',
'category': 'Specific Industry Applications',
'description': """
This module is to configure modules related to an association.
==============================================================
It installs the profile for associations to manage events, registrations, memberships,
membership products (schemes).
""",
'author': 'OpenERP SA',
'depends': ['base_setup', 'membership', 'event'],
'data': ['security/ir.model.access.csv', 'profile_association.xml'],
'demo': [],
'installable': True,
'auto_install': False,
'website': 'https://www.odoo.com'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,436,279,413,439,626,000 | 39.47619 | 87 | 0.601765 | false |
partofthething/home-assistant | homeassistant/components/alpha_vantage/sensor.py | 16 | 6966 | """Stock market information from Alpha Vantage."""
from datetime import timedelta
import logging
from alpha_vantage.foreignexchange import ForeignExchange
from alpha_vantage.timeseries import TimeSeries
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY, CONF_CURRENCY, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_CLOSE = "close"
ATTR_HIGH = "high"
ATTR_LOW = "low"
ATTRIBUTION = "Stock market information provided by Alpha Vantage"
CONF_FOREIGN_EXCHANGE = "foreign_exchange"
CONF_FROM = "from"
CONF_SYMBOL = "symbol"
CONF_SYMBOLS = "symbols"
CONF_TO = "to"
ICONS = {
"BTC": "mdi:currency-btc",
"EUR": "mdi:currency-eur",
"GBP": "mdi:currency-gbp",
"INR": "mdi:currency-inr",
"RUB": "mdi:currency-rub",
"TRY": "mdi:currency-try",
"USD": "mdi:currency-usd",
}
SCAN_INTERVAL = timedelta(minutes=5)
SYMBOL_SCHEMA = vol.Schema(
{
vol.Required(CONF_SYMBOL): cv.string,
vol.Optional(CONF_CURRENCY): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
CURRENCY_SCHEMA = vol.Schema(
{
vol.Required(CONF_FROM): cv.string,
vol.Required(CONF_TO): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_FOREIGN_EXCHANGE): vol.All(cv.ensure_list, [CURRENCY_SCHEMA]),
vol.Optional(CONF_SYMBOLS): vol.All(cv.ensure_list, [SYMBOL_SCHEMA]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Alpha Vantage sensor."""
api_key = config[CONF_API_KEY]
symbols = config.get(CONF_SYMBOLS, [])
conversions = config.get(CONF_FOREIGN_EXCHANGE, [])
if not symbols and not conversions:
msg = "No symbols or currencies configured."
hass.components.persistent_notification.create(msg, "Sensor alpha_vantage")
_LOGGER.warning(msg)
return
timeseries = TimeSeries(key=api_key)
dev = []
for symbol in symbols:
try:
_LOGGER.debug("Configuring timeseries for symbols: %s", symbol[CONF_SYMBOL])
timeseries.get_intraday(symbol[CONF_SYMBOL])
except ValueError:
_LOGGER.error("API Key is not valid or symbol '%s' not known", symbol)
dev.append(AlphaVantageSensor(timeseries, symbol))
forex = ForeignExchange(key=api_key)
for conversion in conversions:
from_cur = conversion.get(CONF_FROM)
to_cur = conversion.get(CONF_TO)
try:
_LOGGER.debug("Configuring forex %s - %s", from_cur, to_cur)
forex.get_currency_exchange_rate(from_currency=from_cur, to_currency=to_cur)
except ValueError as error:
_LOGGER.error(
"API Key is not valid or currencies '%s'/'%s' not known",
from_cur,
to_cur,
)
_LOGGER.debug(str(error))
dev.append(AlphaVantageForeignExchange(forex, conversion))
add_entities(dev, True)
_LOGGER.debug("Setup completed")
class AlphaVantageSensor(Entity):
"""Representation of a Alpha Vantage sensor."""
def __init__(self, timeseries, symbol):
"""Initialize the sensor."""
self._symbol = symbol[CONF_SYMBOL]
self._name = symbol.get(CONF_NAME, self._symbol)
self._timeseries = timeseries
self.values = None
self._unit_of_measurement = symbol.get(CONF_CURRENCY, self._symbol)
self._icon = ICONS.get(symbol.get(CONF_CURRENCY, "USD"))
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the sensor."""
return self.values["1. open"]
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.values is not None:
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_CLOSE: self.values["4. close"],
ATTR_HIGH: self.values["2. high"],
ATTR_LOW: self.values["3. low"],
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("Requesting new data for symbol %s", self._symbol)
all_values, _ = self._timeseries.get_intraday(self._symbol)
self.values = next(iter(all_values.values()))
_LOGGER.debug("Received new values for symbol %s", self._symbol)
class AlphaVantageForeignExchange(Entity):
"""Sensor for foreign exchange rates."""
def __init__(self, foreign_exchange, config):
"""Initialize the sensor."""
self._foreign_exchange = foreign_exchange
self._from_currency = config[CONF_FROM]
self._to_currency = config[CONF_TO]
if CONF_NAME in config:
self._name = config.get(CONF_NAME)
else:
self._name = f"{self._to_currency}/{self._from_currency}"
self._unit_of_measurement = self._to_currency
self._icon = ICONS.get(self._from_currency, "USD")
self.values = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the sensor."""
return round(float(self.values["5. Exchange Rate"]), 4)
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.values is not None:
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
CONF_FROM: self._from_currency,
CONF_TO: self._to_currency,
}
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug(
"Requesting new data for forex %s - %s",
self._from_currency,
self._to_currency,
)
self.values, _ = self._foreign_exchange.get_currency_exchange_rate(
from_currency=self._from_currency, to_currency=self._to_currency
)
_LOGGER.debug(
"Received new data for forex %s - %s",
self._from_currency,
self._to_currency,
)
| mit | 3,556,419,044,797,914,600 | 30.808219 | 88 | 0.61269 | false |
stamhe/zulip | zerver/lib/response.py | 124 | 1316 | from __future__ import absolute_import
from django.http import HttpResponse, HttpResponseNotAllowed
import ujson
class HttpResponseUnauthorized(HttpResponse):
status_code = 401
def __init__(self, realm):
HttpResponse.__init__(self)
self["WWW-Authenticate"] = 'Basic realm="%s"' % (realm,)
def json_unauthorized(message):
resp = HttpResponseUnauthorized("zulip")
resp.content = ujson.dumps({"result": "error",
"msg": message}) + "\n"
return resp
def json_method_not_allowed(methods):
resp = HttpResponseNotAllowed(methods)
resp.content = ujson.dumps({"result": "error",
"msg": "Method Not Allowed",
"allowed_methods": methods})
return resp
def json_response(res_type="success", msg="", data={}, status=200):
content = {"result": res_type, "msg": msg}
content.update(data)
return HttpResponse(content=ujson.dumps(content) + "\n",
content_type='application/json', status=status)
def json_success(data={}):
return json_response(data=data)
def json_error(msg, data={}, status=400):
return json_response(res_type="error", msg=msg, data=data, status=status)
def json_unhandled_exception():
return json_response(res_type="error", msg="Internal server error", status=500)
| apache-2.0 | 3,456,960,120,447,376,400 | 32.74359 | 83 | 0.653495 | false |
ojengwa/flask-oauthlib | example/linkedin.py | 16 | 2007 | from flask import Flask, redirect, url_for, session, request, jsonify
from flask_oauthlib.client import OAuth
app = Flask(__name__)
app.debug = True
app.secret_key = 'development'
oauth = OAuth(app)
linkedin = oauth.remote_app(
'linkedin',
consumer_key='k8fhkgkkqzub',
consumer_secret='ZZtLETQOQYNDjMrz',
request_token_params={
'scope': 'r_basicprofile',
'state': 'RandomString',
},
base_url='https://api.linkedin.com/v1/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://www.linkedin.com/uas/oauth2/accessToken',
authorize_url='https://www.linkedin.com/uas/oauth2/authorization',
)
@app.route('/')
def index():
if 'linkedin_token' in session:
me = linkedin.get('people/~')
return jsonify(me.data)
return redirect(url_for('login'))
@app.route('/login')
def login():
return linkedin.authorize(callback=url_for('authorized', _external=True))
@app.route('/logout')
def logout():
session.pop('linkedin_token', None)
return redirect(url_for('index'))
@app.route('/login/authorized')
def authorized():
resp = linkedin.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['linkedin_token'] = (resp['access_token'], '')
me = linkedin.get('people/~')
return jsonify(me.data)
@linkedin.tokengetter
def get_linkedin_oauth_token():
return session.get('linkedin_token')
def change_linkedin_query(uri, headers, body):
auth = headers.pop('Authorization')
headers['x-li-format'] = 'json'
if auth:
auth = auth.replace('Bearer', '').strip()
if '?' in uri:
uri += '&oauth2_access_token=' + auth
else:
uri += '?oauth2_access_token=' + auth
return uri, headers, body
linkedin.pre_request = change_linkedin_query
if __name__ == '__main__':
app.run()
| bsd-3-clause | 7,344,566,381,991,752,000 | 24.730769 | 77 | 0.63129 | false |
anilmuthineni/tensorflow | tensorflow/python/kernel_tests/transpose_op_test.py | 29 | 9886 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Transpose op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class TransposeTest(test.TestCase):
def _np_transpose(self, x, perm):
ret = np.copy(x)
ret = ret.transpose(perm)
return ret
def _compareCpu(self, x, p):
np_ans = self._np_transpose(x, p)
with self.test_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
y = array_ops.transpose(inx, p)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
jacob_t = None
# Gradient check on CPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype == np.float32:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype == np.float64:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
def _compareGpu(self, x, p):
np_ans = self._np_transpose(x, p)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
y = array_ops.transpose(inx, p)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
jacob_t = None
# Gradient check on GPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype == np.float32:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype == np.float64:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
def _compare(self, x, use_gpu=False):
n = np.ndim(x)
# generate all permutations of [0, 1, ... n-1] in random order.
all_perm = np.random.permutation(
[p for p in itertools.permutations(range(n))]).astype(np.int32)
for p in all_perm[0:2]:
self._compareCpu(x, p)
if use_gpu:
self._compareGpu(x, p)
def _compare_cpu_gpu(self, x):
n = np.ndim(x)
# generate all permutation of [0, 1, ... n-1] in random order,
# choose the first two.
perms = itertools.permutations(range(n))
for _ in range(2):
p = np.random.permutation(next(perms)).astype(np.int32)
tf_a_cpu, tf_g_cpu = self._compareCpu(x, p)
tf_a_gpu, tf_g_gpu = self._compareGpu(x, p)
assert tf_g_cpu is not None
assert tf_g_gpu is not None
if x.dtype == np.float32:
self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-3, 1e-3)
self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-3, 1e-3)
elif x.dtype == np.float64:
self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-6, 1e-6)
self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-6, 1e-6)
def _testBoth(self, x):
self._compare(x, use_gpu=False)
self._compare(x, use_gpu=True)
def testRank1(self):
self._compareCpu(np.arange(0., 2), [0])
def test1D(self):
vector = np.arange(0, 2).reshape((1, 1, 1, 2, 1))
self._compare(vector, use_gpu=False)
self._compare(vector, use_gpu=True)
def testNop(self):
self._compareCpu(np.arange(0, 6).reshape([3, 2]).astype(np.float32), [0, 1])
def testSimple(self):
self._compareCpu(
np.arange(0, 8).reshape([2, 4]).astype(np.float32),
np.array([1, 0]).astype(np.int32))
def testHalf(self):
self._compare(np.arange(0, 21).reshape([3, 7]).astype(np.float16))
self._compare(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float16))
self._compare(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float16))
def testFloat(self):
self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float32))
self._compare_cpu_gpu(
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32))
self._compare_cpu_gpu(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float32))
def testDouble(self):
self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float64))
self._compare_cpu_gpu(
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float64))
self._compare_cpu_gpu(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float64))
def testComplex64(self):
self._testBoth(
np.complex(1, 2) *
np.arange(0, 21).reshape([3, 7]).astype(np.complex64))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex64))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex64))
def testComplex128(self):
self._testBoth(
np.complex(1, 2) *
np.arange(0, 21).reshape([3, 7]).astype(np.complex128))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex128))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex128))
def testInt8(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int8))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int8))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int8))
def testInt16(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int16))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int16))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int16))
def testInt32(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int32))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int32))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int32))
def testInt64(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int64))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int64))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int64))
def testTranspose2DAuto(self):
x_np = [[1, 2, 3], [4, 5, 6]]
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = array_ops.transpose(x_np).eval()
self.assertAllEqual(x_tf, [[1, 4], [2, 5], [3, 6]])
def testTransposeShapes(self):
self.assertEqual(
[],
array_ops.transpose(array_ops.placeholder(
dtypes.int32, shape=[])).get_shape().dims)
self.assertEqual(
[100],
array_ops.transpose(array_ops.placeholder(
dtypes.int32, shape=[100])).get_shape().dims)
self.assertEqual(
[37, 100],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37])).get_shape().dims)
self.assertEqual(
[100, 37],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37]), [0, 1]).get_shape().dims)
self.assertEqual(
[15, 37, 100],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37, 15])).get_shape().dims)
self.assertEqual(
[15, 100, 37],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37, 15]), [2, 0, 1]).get_shape().dims)
self.assertEqual(
tensor_shape.TensorShape(None),
array_ops.transpose(array_ops.placeholder(dtypes.int32)).get_shape())
def testNullTensor(self):
with self.test_session():
x = constant_op.constant([], dtype=dtypes.float32, shape=[1, 4, 0])
xt = array_ops.transpose(x, [0, 2, 1]).eval()
self.assertAllEqual(xt.shape, (1, 0, 4))
def _testError(self, x, p, err):
with self.test_session():
with self.assertRaisesOpError(err):
array_ops.transpose(x, p).eval()
def testError(self):
with self.assertRaises(ValueError):
array_ops.transpose(
np.arange(0., 30).reshape([2, 3, 5]), [[0, 1], [2, 3]])
self._testError(
np.arange(0., 2**11).reshape([2] * 11), np.arange(11),
"not implemented")
with self.assertRaises(ValueError):
array_ops.transpose(np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 3])
self._testError(
np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 1], "2 is missing")
if __name__ == "__main__":
test.main()
| apache-2.0 | 1,598,801,942,631,539,000 | 35.750929 | 80 | 0.589521 | false |
portante/sosreport | sos/plugins/psacct.py | 1 | 1824 | ### This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Psacct(Plugin):
"""Process accounting related information
"""
option_list = [("all", "collect all process accounting files",
"slow", False)]
packages = [ "psacct" ]
class RedHatPsacct(Psacct, RedHatPlugin):
"""Process accounting related information for RedHat based distributions
"""
plugin_name = "psacct"
packages = [ "psacct" ]
def setup(self):
super(RedHatPsacct, self).setup()
self.add_copy_spec("/var/account/pacct")
if self.get_option("all"):
self.add_copy_spec("/var/account/pacct*.gz")
class DebianPsacct(Psacct, DebianPlugin, UbuntuPlugin):
"""Process accounting related information for Debian based distributions
"""
plugin_name = "acct"
packages = [ "acct" ]
def setup(self):
super(DebianPsacct, self).setup()
self.add_copy_specs(["/var/log/account/pacct", "/etc/default/acct"])
if self.get_option("all"):
self.add_copy_spec("/var/log/account/pacct*.gz")
# vim: et ts=4 sw=4
| gpl-2.0 | -1,659,996,354,937,482,800 | 33.415094 | 76 | 0.678728 | false |
hakonsbm/nest-simulator | pynest/nest/tests/test_connect_array_fixed_indegree.py | 2 | 3322 | # -*- coding: utf-8 -*-
#
# test_connect_array_fixed_indegree.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests of connection with rule fixed_indegree
and parameter arrays in syn_spec
"""
import unittest
import nest
import numpy
@nest.ll_api.check_stack
class ConnectArrayFixedIndegreeTestCase(unittest.TestCase):
"""Tests of connections with fixed indegree and parameter arrays"""
def test_Connect_Array_Fixed_Indegree(self):
"""Tests of connections with fixed indegree and parameter arrays"""
N = 20 # number of neurons in each subnet
K = 5 # number of connections per neuron
############################################
# test with connection rule fixed_indegree
############################################
nest.ResetKernel()
net1 = nest.Create('iaf_psc_alpha', N) # creates source subnet
net2 = nest.Create('iaf_psc_alpha', N) # creates target subnet
Warr = [[y*K+x for x in range(K)] for y in range(N)] # weight array
Darr = [[y*K+x + 1 for x in range(K)] for y in range(N)] # delay array
# synapses and connection dictionaries
syn_dict = {'model': 'static_synapse', 'weight': Warr, 'delay': Darr}
conn_dict = {'rule': 'fixed_indegree', 'indegree': K}
# connects source to target subnet
nest.Connect(net1, net2, conn_spec=conn_dict, syn_spec=syn_dict)
for i in range(N): # loop on all neurons of target subnet
# gets all connections to the target neuron
conns = nest.GetConnections(target=net2[i:i+1])
Warr1 = [] # creates empty weight array
# loop on synapses that connect to target neuron
for j in range(len(conns)):
c = conns[j:j+1]
w = nest.GetStatus(c, 'weight')[0] # gets synaptic weight
d = nest.GetStatus(c, 'delay')[0] # gets synaptic delay
self.assertTrue(d - w == 1) # checks that delay = weight + 1
Warr1.append(w) # appends w to Warr1
self.assertTrue(len(Warr1) == K) # checks the size of Warr1
Warr1.sort() # sorts the elements of Warr1
# get row of original weight array, sort it
# and compare it with Warr1
Warr2 = sorted(Warr[i])
for k in range(K):
self.assertTrue(Warr1[k]-Warr2[k] == 0.0)
def suite():
suite = unittest.makeSuite(ConnectArrayFixedIndegreeTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 | -5,572,829,488,600,662,000 | 32.897959 | 79 | 0.611078 | false |
txemi/ansible | lib/ansible/modules/system/gconftool2.py | 22 | 9101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Kenneth D. Evensen <[email protected]>
#
# This file is part of Ansible (sort of)
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: gconftool2
author:
- "Kenneth D. Evensen (@kevensen)"
short_description: Edit GNOME Configurations
description:
- This module allows for the manipulation of GNOME 2 Configuration via
gconftool-2. Please see the gconftool-2(1) man pages for more details.
version_added: "2.3"
options:
key:
required: true
description:
- A GConf preference key is an element in the GConf repository
that corresponds to an application preference. See man gconftool-2(1)
value:
required: false
description:
- Preference keys typically have simple values such as strings,
integers, or lists of strings and integers. This is ignored if the state
is "get". See man gconftool-2(1)
value_type:
required: false
choices:
- int
- bool
- float
- string
description:
- The type of value being set. This is ignored if the state is "get".
state:
required: true
choices:
- get
- present
- absent
description:
- The action to take upon the key/value.
config_source:
required: false
description:
- Specify a configuration source to use rather than the default path.
See man gconftool-2(1)
direct:
required: false
choices: [ "yes", "no" ]
default: no
description:
- Access the config database directly, bypassing server. If direct is
specified then the config_source must be specified as well.
See man gconftool-2(1)
"""
EXAMPLES = """
- name: Change the widget font to "Serif 12"
gconftool2:
key: "/desktop/gnome/interface/font_name"
value_type: "string"
value: "Serif 12"
"""
RETURN = '''
key:
description: The key specified in the module parameters
returned: success
type: string
sample: "/desktop/gnome/interface/font_name"
value_type:
description: The type of the value that was changed
returned: success
type: string
sample: "string"
value:
description: The value of the preference key after executing the module
returned: success
type: string
sample: "Serif 12"
...
'''
from ansible.module_utils.basic import AnsibleModule, BOOLEANS_TRUE
from ansible.module_utils.pycompat24 import get_exception
import subprocess
class GConf2Preference(object):
def __init__(self, ansible, key, value_type, value,
direct=False, config_source=""):
self.ansible = ansible
self.key = key
self.value_type = value_type
self.value = value
self.config_source = config_source
self.direct = direct
def value_already_set(self):
return False
def call(self, call_type):
config_source = ''
direct = ''
changed = False
out = ''
# If the configuration source is different from the default, create
# the argument
if self.config_source is not None and len(self.config_source) > 0:
config_source = "--config-source " + self.config_source
# If direct is true, create the argument
if self.direct:
direct = "--direct"
# Execute the call
try:
# If the call is "get", then we don't need as many parameters and
# we can ignore some
if call_type == 'get':
process = subprocess.Popen(["gconftool-2 --get " + self.key],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
# Otherwise, we will use all relevant parameters
else:
process = subprocess.Popen(["gconftool-2 " + direct + " " +
config_source + " --type " +
self.value_type + " --" +
call_type + " " + self.key + " " +
self.value], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
# In either case, we will capture the output
out = process.stdout.read()
err = process.stderr.read()
if len(err) > 0:
self.ansible.fail_json(msg='gconftool-2 failed with error: %s'
% (str(err)))
else:
changed = True
except OSError:
self.ansible.fail_json(msg='gconftool-2 failed with and exception')
return changed, out.rstrip()
def main():
# Setup the Ansible module
module = AnsibleModule(
argument_spec=dict(
key=dict(required=True, default=None, type='str'),
value_type=dict(required=False,
choices=['int', 'bool',
'float', 'string'],
type='str'),
value=dict(required=False, default=None,
type='str'),
state=dict(required=True, default=None,
choices=['present', 'get', 'absent'],
type='str'),
direct=dict(required=False,
default=False, type='bool'),
config_source=dict(required=False,
default=None, type='str')
),
supports_check_mode=True
)
state_values = {"present": "set", "absent": "unset", "get": "get"}
direct = False
# Assign module values to dictionary values
key = module.params['key']
value_type = module.params['value_type']
if module.params['value'].lower() == "true":
value = "true"
elif module.params['value'] == "false":
value = "false"
else:
value = module.params['value']
state = state_values[module.params['state']]
if module.params['direct'] in BOOLEANS_TRUE:
direct = True
config_source = module.params['config_source']
# Initialize some variables for later
change = False
new_value = ''
if state != "get":
if value is None or value == "":
module.fail_json(msg='State %s requires "value" to be set'
% str(state))
elif value_type is None or value_type == "":
module.fail_json(msg='State %s requires "value_type" to be set'
% str(state))
if direct and config_source is None:
module.fail_json(msg='If "direct" is "yes" then the ' +
'"config_source" must be specified')
elif not direct and config_source is not None:
module.fail_json(msg='If the "config_source" is specified ' +
'then "direct" must be "yes"')
# Create a gconf2 preference
gconf_pref = GConf2Preference(module, key, value_type,
value, direct, config_source)
# Now we get the current value
_, current_value = gconf_pref.call("get")
# Check if the current value equals the value we want to set. If not, make
# a change
if current_value != value:
# If check mode, we know a change would have occured.
if module.check_mode:
# So we will set the change to True
change = True
# And set the new_value to the value that would have been set
new_value = value
# If not check mode make the change.
else:
change, new_value = gconf_pref.call(state)
# If the value we want to set is the same as the current_value, we will
# set the new_value to the current_value for reporting
else:
new_value = current_value
facts = {}
facts['gconftool2'] = {'changed': change, 'key': key,
'value_type': value_type, 'new_value': new_value,
'previous_value': current_value,
'playbook_value': module.params['value']}
module.exit_json(changed=change, ansible_facts=facts)
if __name__ == '__main__':
main()
| gpl-3.0 | -516,726,721,405,931,140 | 33.736641 | 79 | 0.564773 | false |
leilihh/novaha | nova/db/sqlalchemy/utils.py | 9 | 23587 | # Copyright (c) 2013 Boris Pavlovic ([email protected]).
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from migrate.changeset import UniqueConstraint, ForeignKeyConstraint
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.exc import OperationalError
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import schema
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from nova.db.sqlalchemy import api as db
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class DeleteFromSelect(UpdateBase):
def __init__(self, table, select, column):
self.table = table
self.select = select
self.column = column
# NOTE(guochbo): some verions of MySQL doesn't yet support subquery with
# 'LIMIT & IN/ALL/ANY/SOME' We need work around this with nesting select .
@compiles(DeleteFromSelect)
def visit_delete_from_select(element, compiler, **kw):
return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.column),
element.column.name,
compiler.process(element.select))
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except Exception:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise exception.NovaException(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise exception.NovaException(msg % column_name)
return column
def _get_unique_constraints_in_sqlite(migrate_engine, table_name):
regexp = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
sql_data = migrate_engine.execute(
"""
SELECT sql
FROM
sqlite_master
WHERE
type = 'table' AND
name = :table_name;
""",
table_name=table_name
).fetchone()[0]
uniques = set([
schema.UniqueConstraint(
*[getattr(table.c, c.strip(' "'))
for c in cols.split(",")], name=name
)
for name, cols in re.findall(regexp, sql_data)
])
return uniques
def _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
if isinstance(column.type, NullType):
new_column = _get_not_supported_column(col_name_col_instance,
column.name)
columns.append(new_column)
else:
columns.append(column.copy())
uniques = _get_unique_constraints_in_sqlite(migrate_engine, table_name)
table.constraints.update(uniques)
constraints = [constraint for constraint in table.constraints
if not constraint.name == uc_name and
not isinstance(constraint, schema.ForeignKeyConstraint)]
new_table = Table(table_name + "__tmp__", meta, *(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"],
*column_names,
unique=index["unique"]))
f_keys = []
for fk in insp.get_foreign_keys(table_name):
refcolumns = [fk['referred_table'] + '.' + col
for col in fk['referred_columns']]
f_keys.append(ForeignKeyConstraint(fk['constrained_columns'],
refcolumns, table=new_table, name=fk['name']))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
for fkey in f_keys:
fkey.create()
new_table.rename(table_name)
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""This method drops UC from table and works for mysql, postgresql and
sqlite. In mysql and postgresql we are able to use "alter table"
construction. In sqlite is only one way to drop UC:
1) Create new table with same columns, indexes and constraints
(except one that we want to drop).
2) Copy data from old table to new.
3) Drop old table.
4) Rename new table to the name of old table.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
if migrate_engine.name == "sqlite":
_drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
**col_name_col_instance)
else:
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""This method is used to drop all old rows that have the same values for
columns in uc_columns.
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(list(columns_for_group_by))
duplicated_rows_select = select(columns_for_select,
group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = select([table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_("Deleted duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def check_shadow_table(migrate_engine, table_name):
"""This method checks that table with ``table_name`` and
corresponding shadow table have same columns.
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
autoload=True)
columns = dict([(c.name, c) for c in table.columns])
shadow_columns = dict([(c.name, c) for c in shadow_table.columns])
for name, column in columns.iteritems():
if name not in shadow_columns:
raise exception.NovaException(
_("Missing column %(table)s.%(column)s in shadow table")
% {'column': name, 'table': shadow_table.name})
shadow_column = shadow_columns[name]
if not isinstance(shadow_column.type, type(column.type)):
raise exception.NovaException(
_("Different types in %(table)s.%(column)s and shadow table: "
"%(c_type)s %(shadow_c_type)s")
% {'column': name, 'table': table.name,
'c_type': column.type,
'shadow_c_type': shadow_column.type})
for name, column in shadow_columns.iteritems():
if name not in columns:
raise exception.NovaException(
_("Extra column %(table)s.%(column)s in shadow table")
% {'column': name, 'table': shadow_table.name})
return True
def create_shadow_table(migrate_engine, table_name=None, table=None,
**col_name_col_instance):
"""This method create shadow table for table with name ``table_name``
or table instance ``table``.
:param table_name: Autoload table with this name and create shadow table
:param table: Autoloaded table, so just create corresponding shadow table.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
:returns: The created shadow_table object.
"""
meta = MetaData(bind=migrate_engine)
if table_name is None and table is None:
raise exception.NovaException(_("Specify `table_name` or `table` "
"param"))
if not (table_name is None or table is None):
raise exception.NovaException(_("Specify only one param `table_name` "
"`table`"))
if table is None:
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
if isinstance(column.type, NullType):
new_column = _get_not_supported_column(col_name_col_instance,
column.name)
columns.append(new_column)
else:
columns.append(column.copy())
shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name
shadow_table = Table(shadow_table_name, meta, *columns,
mysql_engine='InnoDB')
try:
shadow_table.create()
return shadow_table
except (OperationalError, ProgrammingError):
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
raise exception.ShadowTableExists(name=shadow_table_name)
except Exception:
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise exception.NovaException(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict([(index['name'], index['column_names'])
for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(migrate_engine,
table_name,
**col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = MetaData(bind=migrate_engine)
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(migrate_engine,
table_name,
**col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
table.update().\
where(table.c.deleted == True).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
# NOTE(I159): when the type of column `deleted` is changed from boolean
# to int, the corresponding CHECK constraint is dropped too. But
# starting from SQLAlchemy version 0.8.3, those CHECK constraints
# aren't dropped anymore. So despite the fact that column deleted is
# of type int now, we still restrict its values to be either 0 or 1.
constraint_markers = (
"deleted in (0, 1)",
"deleted IN (:deleted_1, :deleted_2)",
"deleted IN (:param_1, :param_2)"
)
return any(sqltext.endswith(marker) for marker in constraint_markers)
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == True).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
new_table.update().\
where(new_table.c.deleted == False).\
values(deleted=default_deleted_value).\
execute()
def _index_exists(migrate_engine, table_name, index_name):
inspector = reflection.Inspector.from_engine(migrate_engine)
indexes = inspector.get_indexes(table_name)
index_names = [index['name'] for index in indexes]
return index_name in index_names
def _add_index(migrate_engine, table, index_name, idx_columns):
index = Index(
index_name, *[getattr(table.c, col) for col in idx_columns]
)
index.create()
def _drop_index(migrate_engine, table, index_name, idx_columns):
if _index_exists(migrate_engine, table.name, index_name):
index = Index(
index_name, *[getattr(table.c, col) for col in idx_columns]
)
index.drop()
def _change_index_columns(migrate_engine, table, index_name,
new_columns, old_columns):
_drop_index(migrate_engine, table, index_name, old_columns)
_add_index(migrate_engine, table, index_name, new_columns)
def modify_indexes(migrate_engine, data, upgrade=True):
if migrate_engine.name == 'sqlite':
return
meta = MetaData()
meta.bind = migrate_engine
for table_name, indexes in data.iteritems():
table = Table(table_name, meta, autoload=True)
for index_name, old_columns, new_columns in indexes:
if not upgrade:
new_columns, old_columns = old_columns, new_columns
if migrate_engine.name == 'postgresql':
if upgrade:
_add_index(migrate_engine, table, index_name, new_columns)
else:
_drop_index(migrate_engine, table, index_name, old_columns)
elif migrate_engine.name == 'mysql':
_change_index_columns(migrate_engine, table, index_name,
new_columns, old_columns)
else:
raise ValueError('Unsupported DB %s' % migrate_engine.name)
| apache-2.0 | 8,441,293,079,857,176,000 | 37.730706 | 79 | 0.604358 | false |
JCA-Developpement/Odoo | addons/hr_payroll/__init__.py | 433 | 1137 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll
import report
import wizard
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -333,697,164,099,535,800 | 38.206897 | 80 | 0.62533 | false |
openDAQ/easydaq | easydaq/config.py | 1 | 2487 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'daqcontrol/config.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(240, 120)
MainWindow.setMinimumSize(QtCore.QSize(240, 120))
MainWindow.setMaximumSize(QtCore.QSize(250, 120))
self.verticalLayout = QtWidgets.QVBoxLayout(MainWindow)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.connectButton = QtWidgets.QPushButton(MainWindow)
self.connectButton.setMinimumSize(QtCore.QSize(70, 27))
self.connectButton.setMaximumSize(QtCore.QSize(70, 27))
self.connectButton.setObjectName("connectButton")
self.gridLayout.addWidget(self.connectButton, 2, 1, 1, 1)
self.cbport = QtWidgets.QComboBox(MainWindow)
self.cbport.setObjectName("cbport")
self.gridLayout.addWidget(self.cbport, 1, 0, 1, 2)
self.verticalLayout.addLayout(self.gridLayout)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Configuration"))
self.label.setText(_translate("MainWindow", "Select Serial Port: "))
self.connectButton.setText(_translate("MainWindow", "Connect"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QDialog()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| lgpl-3.0 | -1,450,624,204,759,073,000 | 39.770492 | 100 | 0.695617 | false |
bsc-renewit/d2.2 | monitoringFramework/gmetric.py | 1 | 3582 |
#!/usr/bin/env python
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
__author__="mcanuto"
__date__ ="$Feb 13, 2014 6:03:13 PM$"
from xdrlib import Packer, Unpacker
import socket
slope_str2int = {'zero':0,
'positive':1,
'negative':2,
'both':3,
'unspecified':4}
# could be autogenerated from previous but whatever
slope_int2str = {0: 'zero',
1: 'positive',
2: 'negative',
3: 'both',
4: 'unspecified'}
class Gmetric:
"""
Class to send gmetric/gmond 2.X packets
Thread safe
"""
type = ('', 'string', 'uint16', 'int16', 'uint32', 'int32', 'float',
'double', 'timestamp')
protocol = ('udp', 'multicast')
def __init__(self, host, port, protocol):
if protocol not in self.protocol:
raise ValueError("Protocol must be one of: " + str(self.protocol))
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if protocol == 'multicast':
self.socket.setsockopt(socket.IPPROTO_IP,
socket.IP_MULTICAST_TTL, 20)
self.hostport = (host, int(port))
#self.socket.connect(self.hostport)
def send(self, NAME, VAL, TYPE='', UNITS='', SLOPE='both',
TMAX=60, DMAX=0, GROUP="", SPOOF=""):
if SLOPE not in slope_str2int:
raise ValueError("Slope must be one of: " + str(self.slope.keys()))
if TYPE not in self.type:
raise ValueError("Type must be one of: " + str(self.type))
if len(NAME) == 0:
raise ValueError("Name must be non-empty")
( meta_msg, data_msg ) = gmetric_write(NAME, VAL, TYPE, UNITS, SLOPE, TMAX, DMAX, GROUP, SPOOF)
#print data_msg
self.socket.sendto(meta_msg, self.hostport)
self.socket.sendto(data_msg, self.hostport)
def gmetric_write(NAME, VAL, TYPE, UNITS, SLOPE, TMAX, DMAX, GROUP, SPOOF):
"""
Arguments are in all upper-case to match XML
"""
packer = Packer()
HOSTNAME="test"
if SPOOF == "":
SPOOFENABLED=0
else :
SPOOFENABLED=1
# Meta data about a metric
packer.pack_int(128)
if SPOOFENABLED == 1:
packer.pack_string(SPOOF)
else:
packer.pack_string(HOSTNAME)
packer.pack_string(NAME)
packer.pack_int(SPOOFENABLED)
packer.pack_string(TYPE)
packer.pack_string(NAME)
packer.pack_string(UNITS)
packer.pack_int(slope_str2int[SLOPE]) # map slope string to int
packer.pack_uint(int(TMAX))
packer.pack_uint(int(DMAX))
# Magic number. Indicates number of entries to follow. Put in 1 for GROUP
if GROUP == "":
packer.pack_int(0)
else:
packer.pack_int(1)
packer.pack_string("GROUP")
packer.pack_string(GROUP)
# Actual data sent in a separate packet
data = Packer()
data.pack_int(128+5)
if SPOOFENABLED == 1:
data.pack_string(SPOOF)
else:
data.pack_string(HOSTNAME)
data.pack_string(NAME)
data.pack_int(SPOOFENABLED)
data.pack_string("%s")
data.pack_string(str(VAL))
return ( packer.get_buffer() , data.get_buffer() )
class GmetricConf:
def __init__(self, host, port, protocol, slope, spoof):
self.host = host
self.port = port
self.protocol = protocol
self.slope = slope
self.spoof = spoof
| apache-2.0 | -6,962,524,313,266,357,000 | 28.603306 | 104 | 0.584869 | false |
aerospike/aerospike-client-python | test/old_tests/_test_remove_bin.py | 1 | 12571 | # -*- coding: utf-8 -*-
import pytest
import sys
from .test_base_class import TestBaseClass
from aerospike import exception as e
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
class TestRemovebin(object):
def setup_class(cls):
"""
Setup class.
"""
hostlist, user, password = TestBaseClass.get_hosts()
config = {'hosts': hostlist}
if user is None and password is None:
TestRemovebin.client = aerospike.client(config).connect()
else:
TestRemovebin.client = aerospike.client(config).connect(user,
password)
def teardown_class(cls):
TestRemovebin.client.close()
def setup_method(self, method):
"""
Setup method.
"""
for i in range(5):
key = ('test', 'demo', i)
rec = {'name': 'name%s' % (str(i)), 'age': i}
TestRemovebin.client.put(key, rec)
def teardown_method(self, method):
"""
Teardoen method.
"""
for i in range(5):
key = ('test', 'demo', i)
try:
(key, _, _) = TestRemovebin.client.get(key)
except e.RecordNotFound:
TestRemovebin.client.remove(key)
def test_remove_bin_with_no_parameters(self):
"""
Invoke remove_bin() without any mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
TestRemovebin.client.remove_bin()
assert "argument 'key' (pos 1)" in str(
typeError.value)
def test_remove_bin_with_correct_parameters(self):
"""
Invoke remove_bin() with correct parameters
"""
key = ('test', 'demo', 1)
TestRemovebin.client.remove_bin(key, ["age"])
(key, _, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1'}
def test_remove_bin_with_correct_policy(self):
"""
Invoke remove_bin() with correct policy
"""
key = ('test', 'demo', 1)
policy = {'timeout': 1000}
TestRemovebin.client.remove_bin(key, ["age"], {}, policy)
(key, _, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1'}
def test_remove_bin_with_policy_send_gen_ignore(self):
"""
Invoke remove_bin() with policy send
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
'gen': aerospike.POLICY_GEN_IGNORE
}
meta = {'gen': 2, 'ttl': 1000}
TestRemovebin.client.remove_bin(key, ["age"], meta, policy)
(key, meta, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1'}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_remove_bin_with_policy_send_gen_eq_positive(self):
"""
Invoke remove_bin() with policy gen eq less
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
'gen': aerospike.POLICY_GEN_EQ
}
(key, meta) = TestRemovebin.client.exists(key)
gen = meta['gen']
meta = {'gen': gen, 'ttl': 1000}
TestRemovebin.client.remove_bin(key, ["age"], meta, policy)
(key, meta, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1'}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_remove_bin_with_policy_send_gen_eq_not_equal(self):
"""
Invoke remove_bin() with policy gen eq not equal
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
'gen': aerospike.POLICY_GEN_EQ
}
(key, meta) = TestRemovebin.client.exists(key)
gen = meta['gen']
meta = {'gen': gen + 5, 'ttl': 1000}
try:
TestRemovebin.client.remove_bin(key, ["age"], meta, policy)
except e.RecordGenerationError as exception:
assert exception.code == 3
assert exception.msg == "AEROSPIKE_ERR_RECORD_GENERATION"
(key, meta, bins) = TestRemovebin.client.get(key)
assert bins == {'age': 1, 'name': 'name1'}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_remove_bin_with_policy_send_gen_GT_lesser(self):
"""
Invoke remove_bin() with policy gen GT lesser
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
'gen': aerospike.POLICY_GEN_GT
}
(key, meta) = TestRemovebin.client.exists(key)
gen = meta['gen']
meta = {'gen': gen, 'ttl': 1000}
try:
TestRemovebin.client.remove_bin(key, ["age"], meta, policy)
except e.RecordGenerationError as exception:
assert exception.code == 3
assert exception.msg == "AEROSPIKE_ERR_RECORD_GENERATION"
(key, meta, bins) = TestRemovebin.client.get(key)
assert bins == {'age': 1, 'name': 'name1'}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_remove_bin_with_policy_send_gen_GT_positive(self):
"""
Invoke remove_bin() with policy gen GT positive
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'key': aerospike.POLICY_KEY_SEND,
'gen': aerospike.POLICY_GEN_GT
}
(key, meta) = TestRemovebin.client.exists(key)
gen = meta['gen']
meta = {'gen': gen + 5, 'ttl': 1000}
TestRemovebin.client.remove_bin(key, ["age"], meta, policy)
(key, meta, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1'}
assert key == ('test', 'demo', None, bytearray(
b'\xb7\xf4\xb88\x89\xe2\xdag\xdeh>\x1d\xf6\x91\x9a\x1e\xac\xc4F\xc8')
)
def test_remove_bin_with_policy_key_digest(self):
"""
Invoke remove_bin() with policy key digest
"""
key = ('test', 'demo', None, bytearray("asd;as[d'as;djk;uyfl",
"utf-8"))
rec = {'age': 1, 'name': 'name1'}
TestRemovebin.client.put(key, rec)
policy = {'timeout': 1000, 'key': aerospike.POLICY_KEY_DIGEST}
TestRemovebin.client.remove_bin(key, ["age"], {}, policy)
(key, _, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1'}
assert key == ('test', 'demo', None,
bytearray(b"asd;as[d\'as;djk;uyfl"))
TestRemovebin.client.remove(key)
def test_remove_bin_with_incorrect_policy(self):
"""
Invoke remove_bin() with incorrect policy
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 0.5
}
try:
TestRemovebin.client.remove_bin(key, ["age"], {}, policy)
except e.ClientError as exception:
assert exception.code == -1
assert exception.msg == "Incorrect policy"
def test_remove_bin_with_nonexistent_key(self):
"""
Invoke remove_bin() with non-existent key
"""
key = ('test', 'demo', "non-existent")
status = TestRemovebin.client.remove_bin(key, ["age"])
assert status == 0
def test_remove_bin_with_nonexistent_bin(self):
"""
Invoke remove_bin() with non-existent bin
"""
key = ('test', 'demo', 1)
status = TestRemovebin.client.remove_bin(key, ["non-existent"])
assert status == 0
def test_remove_bin_with_single_bin_in_a_record(self):
"""
Invoke remove_bin() with policy key digest
"""
key = ('test', 'demo', "single-bin")
try:
TestRemovebin.client.remove(key)
except:
pass
rec = {'name': 'single'}
TestRemovebin.client.put(key, rec)
policy = {'timeout': 1000}
TestRemovebin.client.remove_bin(key, ["name"], {}, policy)
_, _, bins = TestRemovebin.client.get(key)
assert bins is None
def test_remove_bin_with_extra_parameter(self):
"""
Invoke remove_bin() with extra parameter.
"""
key = ('test', 'demo', 1)
policy = {'timeout': 1000}
with pytest.raises(TypeError) as typeError:
TestRemovebin.client.remove_bin(key, ["age"], {}, policy, "")
assert "remove_bin() takes at most 4 arguments (5 given)" in str(
typeError.value)
def test_remove_bin_key_is_none(self):
"""
Invoke remove_bin() with key is none
"""
try:
TestRemovebin.client.remove_bin(None, ["age"])
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "key is invalid"
def test_remove_bin_bin_is_none(self):
"""
Invoke remove_bin() with bin is none
"""
key = ('test', 'demo', 1)
try:
TestRemovebin.client.remove_bin(key, None)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Bins should be a list"
def test_remove_bin_no_bin(self):
"""
Invoke remove_bin() no bin
"""
key = ('test', 'demo', 1)
try:
TestRemovebin.client.remove_bin(key, [])
(key, _, bins) = TestRemovebin.client.get(key)
assert bins == {'name': 'name1', 'age': 1}
except e.InvalidRequest:
pass
def test_remove_bin_all_bins(self):
"""
Invoke remove_bin() all bins
"""
key = ('test', 'demo', 1)
TestRemovebin.client.remove_bin(key, ["name", "age"])
try:
(key, _, _) = TestRemovebin.client.get(key)
except e.RecordNotFound as exception:
assert exception.code == 2
for i in range(5):
key = ('test', 'demo', i)
rec = {
'name': 'name%s' % (str(i)),
'age': i
}
TestRemovebin.client.put(key, rec)
def test_remove_bin_with_unicode_binname(self):
"""
Invoke remove_bin() with unicode bin name
"""
key = ('test', 'demo', 2)
TestRemovebin.client.remove_bin(key, [u"name"])
(key, _, bins) = TestRemovebin.client.get(key)
assert bins == {'age': 2}
key = ('test', 'demo', 3)
TestRemovebin.client.remove_bin(key, [u"name", "age"])
try:
(key, _, bins) = TestRemovebin.client.get(key)
except e.RecordNotFound as exception:
assert exception.code == 2
key = ('test', 'demo', 4)
TestRemovebin.client.remove_bin(key, ["name", u"age"])
try:
(key, _, bins) = TestRemovebin.client.get(key)
except e.RecordNotFound as exception:
assert exception.code == 2
for i in range(5):
key = ('test', 'demo', i)
rec = {
'name': 'name%s' % (str(i)),
'age': i
}
TestRemovebin.client.put(key, rec)
def test_remove_bin_with_correct_parameters_without_connection(self):
"""
Invoke remove_bin() with correct parameters without connection
"""
config = {'hosts': [('127.0.0.1', 3000)]}
client1 = aerospike.client(config)
key = ('test', 'demo', 1)
try:
client1.remove_bin(key, ["age"])
except e.ClusterError as exception:
assert exception.code == 11
assert exception.msg == 'No connection to aerospike cluster'
| apache-2.0 | 5,883,646,650,752,111,000 | 29.886978 | 81 | 0.52637 | false |
bob-white/UnityIronPythonConsole | Assets/IronPythonConsole/Plugins/Lib/email/generator.py | 106 | 13930 | # Copyright (C) 2001-2010 Python Software Foundation
# Contact: [email protected]
"""Classes to generate plain text from a message object tree."""
__all__ = ['Generator', 'DecodedGenerator']
import re
import sys
import time
import random
import warnings
from cStringIO import StringIO
from email.header import Header
UNDERSCORE = '_'
NL = '\n'
fcre = re.compile(r'^From ', re.MULTILINE)
def _is8bitstring(s):
if isinstance(s, str):
try:
unicode(s, 'us-ascii')
except UnicodeError:
return True
return False
class Generator:
"""Generates output from a Message object tree.
This basic generator writes the message to the given file object as plain
text.
"""
#
# Public interface
#
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78):
"""Create the generator for message flattening.
outfp is the output file-like object for writing the message to. It
must have a write() method.
Optional mangle_from_ is a flag that, when True (the default), escapes
From_ lines in the body of the message by putting a `>' in front of
them.
Optional maxheaderlen specifies the longest length for a non-continued
header. When a header line is longer (in characters, with tabs
expanded to 8 spaces) than maxheaderlen, the header will split as
defined in the Header class. Set maxheaderlen to zero to disable
header wrapping. The default is 78, as recommended (but not required)
by RFC 2822.
"""
self._fp = outfp
self._mangle_from_ = mangle_from_
self._maxheaderlen = maxheaderlen
def write(self, s):
# Just delegate to the file object
self._fp.write(s)
def flatten(self, msg, unixfrom=False):
"""Print the message object tree rooted at msg to the output file
specified when the Generator instance was created.
unixfrom is a flag that forces the printing of a Unix From_ delimiter
before the first object in the message tree. If the original message
has no From_ delimiter, a `standard' one is crafted. By default, this
is False to inhibit the printing of any From_ delimiter.
Note that for subobjects, no From_ line is printed.
"""
if unixfrom:
ufrom = msg.get_unixfrom()
if not ufrom:
ufrom = 'From nobody ' + time.ctime(time.time())
print >> self._fp, ufrom
self._write(msg)
def clone(self, fp):
"""Clone this generator with the exact same options."""
return self.__class__(fp, self._mangle_from_, self._maxheaderlen)
#
# Protected interface - undocumented ;/
#
def _write(self, msg):
# We can't write the headers yet because of the following scenario:
# say a multipart message includes the boundary string somewhere in
# its body. We'd have to calculate the new boundary /before/ we write
# the headers so that we can write the correct Content-Type:
# parameter.
#
# The way we do this, so as to make the _handle_*() methods simpler,
# is to cache any subpart writes into a StringIO. The we write the
# headers and the StringIO contents. That way, subpart handlers can
# Do The Right Thing, and can still modify the Content-Type: header if
# necessary.
oldfp = self._fp
try:
self._fp = sfp = StringIO()
self._dispatch(msg)
finally:
self._fp = oldfp
# Write the headers. First we see if the message object wants to
# handle that itself. If not, we'll do it generically.
meth = getattr(msg, '_write_headers', None)
if meth is None:
self._write_headers(msg)
else:
meth(self)
self._fp.write(sfp.getvalue())
def _dispatch(self, msg):
# Get the Content-Type: for the message, then try to dispatch to
# self._handle_<maintype>_<subtype>(). If there's no handler for the
# full MIME type, then dispatch to self._handle_<maintype>(). If
# that's missing too, then dispatch to self._writeBody().
main = msg.get_content_maintype()
sub = msg.get_content_subtype()
specific = UNDERSCORE.join((main, sub)).replace('-', '_')
meth = getattr(self, '_handle_' + specific, None)
if meth is None:
generic = main.replace('-', '_')
meth = getattr(self, '_handle_' + generic, None)
if meth is None:
meth = self._writeBody
meth(msg)
#
# Default handlers
#
def _write_headers(self, msg):
for h, v in msg.items():
print >> self._fp, '%s:' % h,
if self._maxheaderlen == 0:
# Explicit no-wrapping
print >> self._fp, v
elif isinstance(v, Header):
# Header instances know what to do
print >> self._fp, v.encode()
elif _is8bitstring(v):
# If we have raw 8bit data in a byte string, we have no idea
# what the encoding is. There is no safe way to split this
# string. If it's ascii-subset, then we could do a normal
# ascii split, but if it's multibyte then we could break the
# string. There's no way to know so the least harm seems to
# be to not split the string and risk it being too long.
print >> self._fp, v
else:
# Header's got lots of smarts, so use it. Note that this is
# fundamentally broken though because we lose idempotency when
# the header string is continued with tabs. It will now be
# continued with spaces. This was reversedly broken before we
# fixed bug 1974. Either way, we lose.
print >> self._fp, Header(
v, maxlinelen=self._maxheaderlen, header_name=h).encode()
# A blank line always separates headers from body
print >> self._fp
#
# Handlers for writing types and subtypes
#
def _handle_text(self, msg):
payload = msg.get_payload()
if payload is None:
return
if not isinstance(payload, basestring):
raise TypeError('string payload expected: %s' % type(payload))
if self._mangle_from_:
payload = fcre.sub('>From ', payload)
self._fp.write(payload)
# Default body handler
_writeBody = _handle_text
def _handle_multipart(self, msg):
# The trick here is to write out each part separately, merge them all
# together, and then make sure that the boundary we've chosen isn't
# present in the payload.
msgtexts = []
subparts = msg.get_payload()
if subparts is None:
subparts = []
elif isinstance(subparts, basestring):
# e.g. a non-strict parse of a message with no starting boundary.
self._fp.write(subparts)
return
elif not isinstance(subparts, list):
# Scalar payload
subparts = [subparts]
for part in subparts:
s = StringIO()
g = self.clone(s)
g.flatten(part, unixfrom=False)
msgtexts.append(s.getvalue())
# BAW: What about boundaries that are wrapped in double-quotes?
boundary = msg.get_boundary()
if not boundary:
# Create a boundary that doesn't appear in any of the
# message texts.
alltext = NL.join(msgtexts)
boundary = _make_boundary(alltext)
msg.set_boundary(boundary)
# If there's a preamble, write it out, with a trailing CRLF
if msg.preamble is not None:
print >> self._fp, msg.preamble
# dash-boundary transport-padding CRLF
print >> self._fp, '--' + boundary
# body-part
if msgtexts:
self._fp.write(msgtexts.pop(0))
# *encapsulation
# --> delimiter transport-padding
# --> CRLF body-part
for body_part in msgtexts:
# delimiter transport-padding CRLF
print >> self._fp, '\n--' + boundary
# body-part
self._fp.write(body_part)
# close-delimiter transport-padding
self._fp.write('\n--' + boundary + '--')
if msg.epilogue is not None:
print >> self._fp
self._fp.write(msg.epilogue)
def _handle_multipart_signed(self, msg):
# The contents of signed parts has to stay unmodified in order to keep
# the signature intact per RFC1847 2.1, so we disable header wrapping.
# RDM: This isn't enough to completely preserve the part, but it helps.
old_maxheaderlen = self._maxheaderlen
try:
self._maxheaderlen = 0
self._handle_multipart(msg)
finally:
self._maxheaderlen = old_maxheaderlen
def _handle_message_delivery_status(self, msg):
# We can't just write the headers directly to self's file object
# because this will leave an extra newline between the last header
# block and the boundary. Sigh.
blocks = []
for part in msg.get_payload():
s = StringIO()
g = self.clone(s)
g.flatten(part, unixfrom=False)
text = s.getvalue()
lines = text.split('\n')
# Strip off the unnecessary trailing empty line
if lines and lines[-1] == '':
blocks.append(NL.join(lines[:-1]))
else:
blocks.append(text)
# Now join all the blocks with an empty line. This has the lovely
# effect of separating each block with an empty line, but not adding
# an extra one after the last one.
self._fp.write(NL.join(blocks))
def _handle_message(self, msg):
s = StringIO()
g = self.clone(s)
# The payload of a message/rfc822 part should be a multipart sequence
# of length 1. The zeroth element of the list should be the Message
# object for the subpart. Extract that object, stringify it, and
# write it out.
# Except, it turns out, when it's a string instead, which happens when
# and only when HeaderParser is used on a message of mime type
# message/rfc822. Such messages are generated by, for example,
# Groupwise when forwarding unadorned messages. (Issue 7970.) So
# in that case we just emit the string body.
payload = msg.get_payload()
if isinstance(payload, list):
g.flatten(msg.get_payload(0), unixfrom=False)
payload = s.getvalue()
self._fp.write(payload)
_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]'
class DecodedGenerator(Generator):
"""Generates a text representation of a message.
Like the Generator base class, except that non-text parts are substituted
with a format string representing the part.
"""
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
"""Like Generator.__init__() except that an additional optional
argument is allowed.
Walks through all subparts of a message. If the subpart is of main
type `text', then it prints the decoded payload of the subpart.
Otherwise, fmt is a format string that is used instead of the message
payload. fmt is expanded with the following keywords (in
%(keyword)s format):
type : Full MIME type of the non-text part
maintype : Main MIME type of the non-text part
subtype : Sub-MIME type of the non-text part
filename : Filename of the non-text part
description: Description associated with the non-text part
encoding : Content transfer encoding of the non-text part
The default value for fmt is None, meaning
[Non-text (%(type)s) part of message omitted, filename %(filename)s]
"""
Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
if fmt is None:
self._fmt = _FMT
else:
self._fmt = fmt
def _dispatch(self, msg):
for part in msg.walk():
maintype = part.get_content_maintype()
if maintype == 'text':
print >> self, part.get_payload(decode=True)
elif maintype == 'multipart':
# Just skip this
pass
else:
print >> self, self._fmt % {
'type' : part.get_content_type(),
'maintype' : part.get_content_maintype(),
'subtype' : part.get_content_subtype(),
'filename' : part.get_filename('[no filename]'),
'description': part.get('Content-Description',
'[no description]'),
'encoding' : part.get('Content-Transfer-Encoding',
'[no encoding]'),
}
# Helper
_width = len(repr(sys.maxint-1))
_fmt = '%%0%dd' % _width
def _make_boundary(text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
token = random.randrange(sys.maxint)
boundary = ('=' * 15) + (_fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
| mpl-2.0 | -4,094,764,582,836,201,500 | 36.956403 | 79 | 0.580546 | false |
ThomasJunk/ringo | ringo/tests/functional/test_forms.py | 4 | 3977 | #!/usr/bin/env python
# encoding: utf-8
import pytest
from pytest_ringo import login, transaction_begin, transaction_rollback
class TestList:
def test_GET(self, app):
login(app, "admin", "secret")
app.get("/forms/list")
class TestRead:
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_GET(self, app):
login(app, "admin", "secret")
app.get("/forms/read/1")
class TestCreate:
def test_GET(self, app):
login(app, "admin", "secret")
app.get("/forms/create")
@pytest.mark.xfail
def test_POST(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"title": "test", "definiton": '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'}
app.post("/forms/create", params=values, status=302)
transaction_rollback(app)
@pytest.mark.xfail
def test_POST_missing_title(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"title": "", "definiton": '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'}
app.post("/forms/create", params=values, status=200)
transaction_rollback(app)
@pytest.mark.xfail
def test_POST_missing_definition(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"title": "test", "definiton": ''}
app.post("/forms/create", params=values, status=200)
transaction_rollback(app)
class TestUpdate:
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_update(self, app):
login(app, "admin", "secret")
app.get("/forms/update/1")
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_update_POST(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"title": "test", "definiton": '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'}
app.post("/forms/update/1", params=values, status=302)
transaction_rollback(app)
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_update_POST_missing_title(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"title": "", "definiton": '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'}
app.post("/forms/update/1", params=values, status=200)
transaction_rollback(app)
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_update_POST_missing_defintion(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"title": "test", "definiton": ''}
app.post("/forms/update/1", params=values, status=200)
transaction_rollback(app)
class TestDelete:
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_delete(self, app):
login(app, "admin", "secret")
transaction_begin(app)
app.get("/forms/delete/2")
transaction_rollback(app)
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_delete_POST_confirm_yes(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"confirmed": 1}
app.post("/forms/delete/2", params=values, status=302)
transaction_rollback(app)
# FIXME: There is currently no form in the database () <2016-02-08 10:30>
@pytest.mark.xfail
def test_delete_POST_admin_confirm_yes(self, app):
login(app, "admin", "secret")
transaction_begin(app)
values = {"confirmed": 1}
app.post("/forms/delete/1", params=values, status=302)
transaction_rollback(app)
| gpl-2.0 | 7,994,911,757,015,152,000 | 33.284483 | 106 | 0.611516 | false |
memsharded/conan | conans/test/functional/scm/workflows/test_scm_subfolder.py | 1 | 4596 | # coding=utf-8
import os
import textwrap
import unittest
from nose.plugins.attrib import attr
from conans.test.functional.scm.workflows.common import TestWorkflow
from conans.test.utils.tools import SVNLocalRepoTestCase
from conans.test.utils.tools import TestClient, create_local_git_repo
class SCMSubfolder(TestWorkflow):
""" The conanfile.py is in a subfolder inside the package,
also using subfolder for repo checkout
"""
path_to_conanfile = "cc" # It
path_from_conanfile_to_root = ".."
scm_subfolder = "scm_subfolder"
@attr("svn")
class SVNConanfileInRepoRootTest(SCMSubfolder, SVNLocalRepoTestCase):
""" Test SCM url='auto' with SVN, it can only work if conanfile is in the root of the repo
In this case, it is exactly the same to have the url="auto" or to implement a custom
get_remote_url function with the following behavior because the SVN class will be
created in the conanfile.py directory by default:
def get_remote_url():
here = os.path.dirname(__file__)
svn = tools.SVN(os.path.join(here, "."))
return svn.get_remote_url()
"""
extra_header = textwrap.dedent("""\
def get_remote_url():
here = os.path.dirname(__file__)
svn = tools.SVN(os.path.join(here, "%s"))
return svn.get_remote_url()
""" % SCMSubfolder.path_from_conanfile_to_root)
conanfile = SCMSubfolder.conanfile_base.format(extra_header=extra_header,
type="svn",
url="get_remote_url()",
scm_subfolder=SCMSubfolder.scm_subfolder)
def setUp(self):
self.lib1_ref = "lib1/version@user/channel"
files = self.get_files(subfolder='lib1', conanfile=self.conanfile, lib_ref=self.lib1_ref)
self.url, _ = self.create_project(files=files)
# Local workflow
def test_local_root_folder(self):
t = TestClient(path_with_spaces=False)
t.runner("svn co {}/lib1 .".format(self.url), cwd=t.current_folder)
self._run_local_test(t, t.current_folder, self.path_to_conanfile)
def test_local_monorepo(self):
t = TestClient(path_with_spaces=False)
t.runner("svn co {} .".format(self.url), cwd=t.current_folder)
self._run_local_test(t, t.current_folder, os.path.join("lib1", self.path_to_conanfile))
def test_local_monorepo_chdir(self):
t = TestClient(path_with_spaces=False)
t.runner("svn co {} .".format(self.url), cwd=t.current_folder)
self._run_local_test(t, os.path.join(t.current_folder, "lib1"), self.path_to_conanfile)
# Cache workflow
def test_remote_root_folder(self):
t = TestClient(path_with_spaces=False)
t.runner("svn co {}/lib1 .".format(self.url), cwd=t.current_folder)
self._run_remote_test(t, t.current_folder, self.path_to_conanfile)
def test_remote_monorepo(self):
t = TestClient(path_with_spaces=False)
t.runner("svn co {} .".format(self.url), cwd=t.current_folder)
self._run_remote_test(t, t.current_folder, os.path.join("lib1", self.path_to_conanfile))
def test_remote_monorepo_chdir(self):
t = TestClient(path_with_spaces=False)
t.runner("svn co {} .".format(self.url), cwd=t.current_folder)
self._run_remote_test(t, os.path.join(t.current_folder, "lib1"), self.path_to_conanfile)
class GitConanfileInRepoRootTest(SCMSubfolder, unittest.TestCase):
conanfile = SCMSubfolder.conanfile_base.format(extra_header="",
type="git",
url="\"auto\"",
scm_subfolder=SCMSubfolder.scm_subfolder)
def setUp(self):
self.lib1_ref = "lib1/version@user/channel"
files = self.get_files(subfolder=".", conanfile=self.conanfile, lib_ref=self.lib1_ref)
self.url, _ = create_local_git_repo(files=files)
# Local workflow
def test_local_root_folder(self):
t = TestClient(path_with_spaces=False)
t.runner('git clone "{}" .'.format(self.url), cwd=t.current_folder)
self._run_local_test(t, t.current_folder, self.path_to_conanfile)
# Cache workflow
def test_remote_root_folder(self):
t = TestClient(path_with_spaces=False)
t.runner('git clone "{}" .'.format(self.url), cwd=t.current_folder)
self._run_remote_test(t, t.current_folder, self.path_to_conanfile)
| mit | -2,842,551,029,878,063,600 | 40.405405 | 97 | 0.617711 | false |
bimbam23/tools-iuc | tools/resize_coordinate_window/resize_coordinate_window.py | 6 | 3693 | from __future__ import print_function
import argparse
import fileinput
import sys
# Maximum value of a signed 32 bit integer (2**31 - 1).
MAX_CHROM_LEN = 2147483647
def stop_err(msg):
sys.stderr.write(msg)
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument('--input', dest='input', help="Input dataset")
parser.add_argument('--start_coordinate', dest='start_coordinate', type=int, help='Chromosome start coordinate, either 0 or 1.')
parser.add_argument('--subtract_from_start', dest='subtract_from_start', type=int, help='Distance to subtract from start.')
parser.add_argument('--add_to_end', dest='add_to_end', type=int, help='Distance to add to end.')
parser.add_argument('--extend_existing', dest='extend_existing', help='Extend existing start/end instead of from computed midpoint.')
parser.add_argument('--chrom_len_file', dest='chrom_len_file', help="File names of .len files for chromosome lengths")
parser.add_argument('--region_boundaries', dest='region_boundaries', help="Option for handling region boundaries")
parser.add_argument('--output', dest='output', help="Output dataset")
args = parser.parse_args()
extend_existing = args.extend_existing == 'existing'
out = open(args.output, 'wb')
chrom_start = int(args.start_coordinate)
chrom_lens = dict()
# Determine the length of each chromosome and add it to the chrom_lens dictionary.
len_file_missing = False
len_file_error = None
len_file = fileinput.FileInput(args.chrom_len_file)
try:
for line in len_file:
fields = line.split("\t")
chrom_lens[fields[0]] = int(fields[1])
except Exception as e:
len_file_error = str(e)
with open(args.input) as fhi:
for line in fhi:
if line.startswith('#'):
# Skip comments.
continue
items = line.split('\t')
if len(items) != 9:
# Skip invalid gff data.
continue
chrom = items[0]
start = int(items[3])
end = int(items[4])
if extend_existing:
new_start = start - args.subtract_from_start
new_end = end + args.add_to_end
else:
midpoint = (start + end) // 2
new_start = midpoint - args.subtract_from_start
new_end = midpoint + args.add_to_end
# Check start boundary.
if new_start < chrom_start:
if args.region_boundaries == 'discard':
continue
elif args.region_boundaries == 'limit':
new_start = chrom_start
elif args.region_boundaries == 'error':
out.close()
stop_err('Requested expansion places region beyond chromosome start boundary of %d.' % chrom_start)
# Check end boundary.
chrom_len = chrom_lens.get(chrom, None)
if chrom_len is None:
len_file_missing = True
chrom_len = MAX_CHROM_LEN
if new_end > chrom_len:
if args.region_boundaries == 'discard':
continue
elif args.region_boundaries == 'limit':
new_end = chrom_len
elif args.region_boundaries == 'error':
out.close()
stop_err('Requested expansion places region beyond chromosome end boundary of %d.' % chrom_len)
new_line = '\t'.join([chrom, items[1], items[2], str(new_start), str(new_end), items[5], items[6], items[7], items[8]])
out.write(new_line)
out.close()
if len_file_error is not None:
print("All chrom lengths set to %d, error in chrom len file: %s" % (MAX_CHROM_LEN, len_file_error))
if len_file_missing:
print("All chrom lengths set to %d, chrom len files are not installed." % MAX_CHROM_LEN)
| mit | -7,648,661,794,467,845,000 | 39.582418 | 133 | 0.62632 | false |
mihailignatenko/erp | addons/l10n_uy/__openerp__.py | 260 | 1807 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Openerp.uy <[email protected]>
# Proyecto de Localización de OperERP para Uruguay
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Uruguay - Chart of Accounts',
'version': '0.1',
'author': 'Uruguay l10n Team & Guillem Barba',
'category': 'Localization/Account Charts',
'website': 'https://launchpad.net/openerp-uruguay',
'description': """
General Chart of Accounts.
==========================
Provide Templates for Chart of Accounts, Taxes for Uruguay.
""",
'license': 'AGPL-3',
'depends': ['account'],
'data': [
'account_types.xml',
'taxes_code_template.xml',
'account_chart_template.xml',
'taxes_template.xml',
'l10n_uy_wizard.xml',
],
'demo': [],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,590,585,674,305,178,600 | 34.411765 | 78 | 0.594131 | false |
zhangxq5012/sky_engine | build/android/pylib/perf/surface_stats_collector_unittest.py | 99 | 2384 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for SurfaceStatsCollector."""
# pylint: disable=W0212
import unittest
from pylib.perf.surface_stats_collector import SurfaceStatsCollector
class TestSurfaceStatsCollector(unittest.TestCase):
@staticmethod
def _CreateUniformTimestamps(base, num, delta):
return [base + i * delta for i in range(1, num + 1)]
@staticmethod
def _CreateDictionaryFromResults(results):
dictionary = {}
for result in results:
dictionary[result.name] = result
return dictionary
def setUp(self):
self.refresh_period = 0.1
def testOneFrameDelta(self):
timestamps = self._CreateUniformTimestamps(0, 10, self.refresh_period)
results = self._CreateDictionaryFromResults(
SurfaceStatsCollector._CalculateResults(
self.refresh_period, timestamps, ''))
self.assertEquals(results['avg_surface_fps'].value,
int(round(1 / self.refresh_period)))
self.assertEquals(results['jank_count'].value, 0)
self.assertEquals(results['max_frame_delay'].value, 1)
self.assertEquals(len(results['frame_lengths'].value), len(timestamps) - 1)
def testAllFramesTooShort(self):
timestamps = self._CreateUniformTimestamps(0, 10, self.refresh_period / 100)
self.assertRaises(Exception,
SurfaceStatsCollector._CalculateResults,
[self.refresh_period, timestamps, ''])
def testSomeFramesTooShort(self):
timestamps = self._CreateUniformTimestamps(0, 5, self.refresh_period)
# The following timestamps should be skipped.
timestamps += self._CreateUniformTimestamps(timestamps[4],
5,
self.refresh_period / 100)
timestamps += self._CreateUniformTimestamps(timestamps[4],
5,
self.refresh_period)
results = self._CreateDictionaryFromResults(
SurfaceStatsCollector._CalculateResults(
self.refresh_period, timestamps, ''))
self.assertEquals(len(results['frame_lengths'].value), 9)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -8,193,354,568,199,450,000 | 36.25 | 80 | 0.636745 | false |
schwartzmx/ansible-modules-extras | notification/flowdock.py | 55 | 6057 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Matt Coddington <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: flowdock
version_added: "1.2"
author: Matt Coddington
short_description: Send a message to a flowdock
description:
- Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
options:
token:
description:
- API token.
required: true
type:
description:
- Whether to post to 'inbox' or 'chat'
required: true
choices: [ "inbox", "chat" ]
msg:
description:
- Content of the message
required: true
tags:
description:
- tags of the message, separated by commas
required: false
external_user_name:
description:
- (chat only - required) Name of the "user" sending the message
required: false
from_address:
description:
- (inbox only - required) Email address of the message sender
required: false
source:
description:
- (inbox only - required) Human readable identifier of the application that uses the Flowdock API
required: false
subject:
description:
- (inbox only - required) Subject line of the message
required: false
from_name:
description:
- (inbox only) Name of the message sender
required: false
reply_to:
description:
- (inbox only) Email address for replies
required: false
project:
description:
- (inbox only) Human readable identifier for more detailed message categorization
required: false
link:
description:
- (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
# informational: requirements for nodes
requirements: [ urllib, urllib2 ]
'''
EXAMPLES = '''
- flowdock: type=inbox
token=AAAAAA
[email protected]
source='my cool app'
msg='test from ansible'
subject='test subject'
- flowdock: type=chat
token=AAAAAA
external_user_name=testuser
msg='test from ansible'
tags=tag1,tag2,tag3
'''
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
msg=dict(required=True),
type=dict(required=True, choices=["inbox","chat"]),
external_user_name=dict(required=False),
from_address=dict(required=False),
source=dict(required=False),
subject=dict(required=False),
from_name=dict(required=False),
reply_to=dict(required=False),
project=dict(required=False),
tags=dict(required=False),
link=dict(required=False),
validate_certs = dict(default='yes', type='bool'),
),
supports_check_mode=True
)
type = module.params["type"]
token = module.params["token"]
if type == 'inbox':
url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
else:
url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
params = {}
# required params
params['content'] = module.params["msg"]
# required params for the 'chat' type
if module.params['external_user_name']:
if type == 'inbox':
module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
else:
params['external_user_name'] = module.params["external_user_name"]
elif type == 'chat':
module.fail_json(msg="%s is required for the 'inbox' type" % item)
# required params for the 'inbox' type
for item in [ 'from_address', 'source', 'subject' ]:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
elif type == 'inbox':
module.fail_json(msg="%s is required for the 'inbox' type" % item)
# optional params
if module.params["tags"]:
params['tags'] = module.params["tags"]
# optional params for the 'inbox' type
for item in [ 'from_name', 'reply_to', 'project', 'link' ]:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=False)
# Send the data to Flowdock
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] != 200:
module.fail_json(msg="unable to send msg: %s" % info['msg'])
module.exit_json(changed=True, msg=module.params["msg"])
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 | 7,757,007,189,330,942,000 | 30.546875 | 155 | 0.626548 | false |
TheJJ100100/bedrock | bedrock/base/tests/test_accepted_locales.py | 6 | 3508 | import os
import shutil
from django.conf import settings
import test_utils
from bedrock.settings.base import get_dev_languages, path
class AcceptedLocalesTest(test_utils.TestCase):
"""Test lazy evaluation of locale related settings.
Verify that some localization-related settings are lazily evaluated based
on the current value of the DEV variable. Depending on the value,
DEV_LANGUAGES or PROD_LANGUAGES should be used.
"""
locale = path('locale')
locale_bkp = path('locale_bkp')
@classmethod
def setup_class(cls):
"""Create a directory structure for locale/.
Back up the existing project/locale/ directory and create the following
hierarchy in its place:
- project/locale/en-US/LC_MESSAGES
- project/locale/fr/LC_MESSAGES
- project/locale/templates/LC_MESSAGES
- project/locale/empty_file
Also, set PROD_LANGUAGES to ('en-US',).
"""
if os.path.exists(cls.locale_bkp):
raise Exception('A backup of locale/ exists at %s which might '
'mean that previous tests didn\'t end cleanly. '
'Skipping the test suite.' % cls.locale_bkp)
cls.DEV = settings.DEV
cls.PROD_LANGUAGES = settings.PROD_LANGUAGES
cls.DEV_LANGUAGES = settings.DEV_LANGUAGES
settings.PROD_LANGUAGES = ('en-US',)
if os.path.exists(cls.locale):
shutil.move(cls.locale, cls.locale_bkp)
else:
cls.locale_bkp = None
for loc in ('en-US', 'fr', 'templates'):
os.makedirs(os.path.join(cls.locale, loc, 'LC_MESSAGES'))
open(os.path.join(cls.locale, 'empty_file'), 'w').close()
@classmethod
def teardown_class(cls):
"""Remove the testing locale/ dir and bring back the backup."""
settings.DEV = cls.DEV
settings.PROD_LANGUAGES = cls.PROD_LANGUAGES
settings.DEV_LANGUAGES = cls.DEV_LANGUAGES
shutil.rmtree(cls.locale)
if cls.locale_bkp:
shutil.move(cls.locale_bkp, cls.locale)
def test_build_dev_languages(self):
"""Test that the list of dev locales is built properly.
On dev instances, the list of accepted locales should correspond to
the per-locale directories in locale/.
"""
settings.DEV = True
langs = get_dev_languages()
assert langs == ['en-US', 'fr'] or langs == ['fr', 'en-US'], (
'DEV_LANGUAGES do not correspond to the contents of locale/.')
def test_dev_languages(self):
"""Test the accepted locales on dev instances.
On dev instances, allow locales defined in DEV_LANGUAGES.
"""
settings.DEV = True
# simulate the successful result of the DEV_LANGUAGES list
# comprehension defined in settings.
settings.DEV_LANGUAGES = ['en-US', 'fr']
assert settings.LANGUAGE_URL_MAP == {'en-us': 'en-US', 'fr': 'fr'}, \
('DEV is True, but DEV_LANGUAGES are not used to define the '
'allowed locales.')
def test_prod_languages(self):
"""Test the accepted locales on prod instances.
On stage/prod instances, allow locales defined in PROD_LANGUAGES.
"""
settings.DEV = False
assert settings.LANGUAGE_URL_MAP == {'en-us': 'en-US'}, (
'DEV is False, but PROD_LANGUAGES are not used to define the '
'allowed locales.')
| mpl-2.0 | -1,075,531,728,976,025,900 | 34.795918 | 79 | 0.611174 | false |
skg-net/ansible | test/runner/lib/docker_util.py | 17 | 5836 | """Functions for accessing docker via the docker cli."""
from __future__ import absolute_import, print_function
import json
import os
import time
from lib.executor import (
SubprocessError,
)
from lib.util import (
ApplicationError,
run_command,
common_environment,
display,
find_executable,
)
from lib.config import (
EnvironmentConfig,
)
BUFFER_SIZE = 256 * 256
def docker_available():
"""
:rtype: bool
"""
return find_executable('docker', required=False)
def get_docker_container_id():
"""
:rtype: str | None
"""
path = '/proc/self/cgroup'
if not os.path.exists(path):
return None
with open(path) as cgroup_fd:
contents = cgroup_fd.read()
paths = [line.split(':')[2] for line in contents.splitlines()]
container_ids = set(path.split('/')[2] for path in paths if path.startswith('/docker/'))
if not container_ids:
return None
if len(container_ids) == 1:
return container_ids.pop()
raise ApplicationError('Found multiple container_id candidates: %s\n%s' % (sorted(container_ids), contents))
def get_docker_container_ip(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
:rtype: str
"""
results = docker_inspect(args, container_id)
ipaddress = results[0]['NetworkSettings']['IPAddress']
return ipaddress
def docker_pull(args, image):
"""
:type args: EnvironmentConfig
:type image: str
"""
if not args.docker_pull:
display.warning('Skipping docker pull for "%s". Image may be out-of-date.' % image)
return
for _ in range(1, 10):
try:
docker_command(args, ['pull', image])
return
except SubprocessError:
display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image)
time.sleep(3)
raise ApplicationError('Failed to pull docker image "%s".' % image)
def docker_put(args, container_id, src, dst):
"""
:type args: EnvironmentConfig
:type container_id: str
:type src: str
:type dst: str
"""
# avoid 'docker cp' due to a bug which causes 'docker rm' to fail
with open(src, 'rb') as src_fd:
docker_exec(args, container_id, ['dd', 'of=%s' % dst, 'bs=%s' % BUFFER_SIZE],
options=['-i'], stdin=src_fd, capture=True)
def docker_get(args, container_id, src, dst):
"""
:type args: EnvironmentConfig
:type container_id: str
:type src: str
:type dst: str
"""
# avoid 'docker cp' due to a bug which causes 'docker rm' to fail
with open(dst, 'wb') as dst_fd:
docker_exec(args, container_id, ['dd', 'if=%s' % src, 'bs=%s' % BUFFER_SIZE],
options=['-i'], stdout=dst_fd, capture=True)
def docker_run(args, image, options, cmd=None):
"""
:type args: EnvironmentConfig
:type image: str
:type options: list[str] | None
:type cmd: list[str] | None
:rtype: str | None, str | None
"""
if not options:
options = []
if not cmd:
cmd = []
for _ in range(1, 3):
try:
return docker_command(args, ['run'] + options + [image] + cmd, capture=True)
except SubprocessError as ex:
display.error(ex)
display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image)
time.sleep(3)
raise ApplicationError('Failed to run docker image "%s".' % image)
def docker_rm(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
"""
docker_command(args, ['rm', '-f', container_id], capture=True)
def docker_inspect(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
:rtype: list[dict]
"""
if args.explain:
return []
try:
stdout, _ = docker_command(args, ['inspect', container_id], capture=True)
return json.loads(stdout)
except SubprocessError as ex:
try:
return json.loads(ex.stdout)
except:
raise ex # pylint: disable=locally-disabled, raising-bad-type
def docker_network_inspect(args, network):
"""
:type args: EnvironmentConfig
:type network: str
:rtype: list[dict]
"""
if args.explain:
return []
try:
stdout, _ = docker_command(args, ['network', 'inspect', network], capture=True)
return json.loads(stdout)
except SubprocessError as ex:
try:
return json.loads(ex.stdout)
except:
raise ex # pylint: disable=locally-disabled, raising-bad-type
def docker_exec(args, container_id, cmd, options=None, capture=False, stdin=None, stdout=None):
"""
:type args: EnvironmentConfig
:type container_id: str
:type cmd: list[str]
:type options: list[str] | None
:type capture: bool
:type stdin: file | None
:type stdout: file | None
:rtype: str | None, str | None
"""
if not options:
options = []
return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout)
def docker_command(args, cmd, capture=False, stdin=None, stdout=None):
"""
:type args: EnvironmentConfig
:type cmd: list[str]
:type capture: bool
:type stdin: file | None
:type stdout: file | None
:rtype: str | None, str | None
"""
env = docker_environment()
return run_command(args, ['docker'] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout)
def docker_environment():
"""
:rtype: dict[str, str]
"""
env = common_environment()
env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_')))
return env
| gpl-3.0 | -3,049,441,002,144,460,300 | 25.170404 | 119 | 0.605552 | false |
AustereCuriosity/numpy | numpy/lib/tests/test_nanfunctions.py | 10 | 32613 | from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal,
assert_no_warnings, assert_raises, assert_array_equal, suppress_warnings
)
# Test data
_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
[0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
[np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
[0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
# Rows of _ndat with nans removed
_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),
np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
np.array([0.1042, -0.5954]),
np.array([0.1610, 0.1859, 0.3146])]
# Rows of _ndat with nans converted to ones
_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170],
[0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833],
[1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954],
[0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]])
# Rows of _ndat with nans converted to zeros
_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
[0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833],
[0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954],
[0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
class TestNanFunctions_MinMax(TestCase):
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalars
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
def test_masked(self):
mat = np.ma.fix_invalid(_ndat)
msk = mat._mask.copy()
for f in [np.nanmin]:
res = f(mat, axis=1)
tgt = f(_ndat, axis=1)
assert_equal(res, tgt)
assert_equal(mat._mask, msk)
assert_(not np.isinf(mat).any())
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
# check that rows of nan are dealt with for subclasses (#4628)
mat[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
and not np.isnan(res[2, 0]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat)
assert_(np.isscalar(res))
assert_(res != np.nan)
assert_(len(w) == 0)
class TestNanFunctions_ArgminArgmax(TestCase):
nanfuncs = [np.nanargmin, np.nanargmax]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_result_values(self):
for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
for row in _ndat:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in")
ind = f(row)
val = row[ind]
# comparing with NaN is tricky as the result
# is always false except for NaN != NaN
assert_(not np.isnan(val))
assert_(not fcmp(val, row).any())
assert_(not np.equal(val, row[:ind]).any())
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
assert_raises(ValueError, f, mat, axis=axis)
assert_raises(ValueError, f, np.nan)
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
assert_raises(ValueError, f, mat, axis=axis)
for axis in [1]:
res = f(mat, axis=axis)
assert_equal(res, np.zeros(0))
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_IntTypes(TestCase):
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
mat = np.array([127, 39, 93, 87, 46])
def integer_arrays(self):
for dtype in self.int_types:
yield self.mat.astype(dtype)
def test_nanmin(self):
tgt = np.min(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmin(mat), tgt)
def test_nanmax(self):
tgt = np.max(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmax(mat), tgt)
def test_nanargmin(self):
tgt = np.argmin(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmin(mat), tgt)
def test_nanargmax(self):
tgt = np.argmax(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmax(mat), tgt)
def test_nansum(self):
tgt = np.sum(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nansum(mat), tgt)
def test_nanprod(self):
tgt = np.prod(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanprod(mat), tgt)
def test_nancumsum(self):
tgt = np.cumsum(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nancumsum(mat), tgt)
def test_nancumprod(self):
tgt = np.cumprod(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nancumprod(mat), tgt)
def test_nanmean(self):
tgt = np.mean(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmean(mat), tgt)
def test_nanvar(self):
tgt = np.var(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat), tgt)
tgt = np.var(mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat, ddof=1), tgt)
def test_nanstd(self):
tgt = np.std(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat), tgt)
tgt = np.std(self.mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat, ddof=1), tgt)
class SharedNanFunctionsTestsMixin(object):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
with suppress_warnings() as sup:
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
# Giving the warning is a small bug, see gh-8000
sup.filter(np.ComplexWarning)
tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_char(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
with suppress_warnings() as sup:
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
# Giving the warning is a small bug, see gh-8000
sup.filter(np.ComplexWarning)
tgt = rf(mat, dtype=c, axis=1).dtype.type
res = nf(mat, dtype=c, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=c, axis=None).dtype.type
res = nf(mat, dtype=c, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt, "res %s, tgt %s" % (res, tgt))
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nansum, np.nanprod]
stdfuncs = [np.sum, np.prod]
def test_allnans(self):
# Check for FutureWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = np.nansum([np.nan]*3, axis=None)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check scalar
res = np.nansum(np.nan)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check there is no warning for not all-nan
np.nansum([0]*3, axis=None)
assert_(len(w) == 0, 'unwanted warning raised')
def test_empty(self):
for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]):
mat = np.zeros((0, 3))
tgt = [tgt_value]*3
res = f(mat, axis=0)
assert_equal(res, tgt)
tgt = []
res = f(mat, axis=1)
assert_equal(res, tgt)
tgt = tgt_value
res = f(mat, axis=None)
assert_equal(res, tgt)
class TestNanFunctions_CumSumProd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nancumsum, np.nancumprod]
stdfuncs = [np.cumsum, np.cumprod]
def test_allnans(self):
for f, tgt_value in zip(self.nanfuncs, [0, 1]):
# Unlike other nan-functions, sum/prod/cumsum/cumprod don't warn on all nan input
with assert_no_warnings():
res = f([np.nan]*3, axis=None)
tgt = tgt_value*np.ones((3))
assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((3))' % (tgt_value))
# Check scalar
res = f(np.nan)
tgt = tgt_value*np.ones((1))
assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((1))' % (tgt_value))
# Check there is no warning for not all-nan
f([0]*3, axis=None)
def test_empty(self):
for f, tgt_value in zip(self.nanfuncs, [0, 1]):
mat = np.zeros((0, 3))
tgt = tgt_value*np.ones((0, 3))
res = f(mat, axis=0)
assert_equal(res, tgt)
tgt = mat
res = f(mat, axis=1)
assert_equal(res, tgt)
tgt = np.zeros((0))
res = f(mat, axis=None)
assert_equal(res, tgt)
def test_keepdims(self):
for f, g in zip(self.nanfuncs, self.stdfuncs):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = f(mat, axis=axis, out=None)
res = g(mat, axis=axis, out=None)
assert_(res.ndim == tgt.ndim)
for f in self.nanfuncs:
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
rs = np.random.RandomState(0)
d[rs.rand(*d.shape) < 0.5] = np.nan
res = f(d, axis=None)
assert_equal(res.shape, (1155,))
for axis in np.arange(4):
res = f(d, axis=axis)
assert_equal(res.shape, (3, 5, 7, 11))
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
for axis in np.arange(2):
res = f(mat, axis=axis)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 3))
res = f(mat)
assert_(res.shape == (1, 3*3))
def test_result_values(self):
for axis in (-2, -1, 0, 1, None):
tgt = np.cumprod(_ndat_ones, axis=axis)
res = np.nancumprod(_ndat, axis=axis)
assert_almost_equal(res, tgt)
tgt = np.cumsum(_ndat_zeros,axis=axis)
res = np.nancumsum(_ndat, axis=axis)
assert_almost_equal(res, tgt)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.eye(3)
for axis in (-2, -1, 0, 1):
tgt = rf(mat, axis=axis)
res = nf(mat, axis=axis, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
stdfuncs = [np.mean, np.var, np.std]
def test_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype)
def test_out_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
out = np.empty(_ndat.shape[0], dtype=dtype)
assert_raises(TypeError, f, _ndat, axis=1, out=out)
def test_ddof(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in [0, 1]:
tgt = [rf(d, ddof=ddof) for d in _rdat]
res = nf(_ndat, axis=1, ddof=ddof)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
dsize = [len(d) for d in _rdat]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in range(5):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
sup.filter(np.ComplexWarning)
tgt = [ddof >= d for d in dsize]
res = nf(_ndat, axis=1, ddof=ddof)
assert_equal(np.isnan(res), tgt)
if any(tgt):
assert_(len(sup.log) == 1)
else:
assert_(len(sup.log) == 0)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 2)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
class TestNanFunctions_Median(TestCase):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanmedian(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.median(mat, axis=axis, out=None, overwrite_input=False)
res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
res = np.nanmedian(d, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanmedian(d, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.median(mat, axis=1)
res = np.nanmedian(nan_mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.median(mat, axis=None)
res = np.nanmedian(nan_mat, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanmedian(nan_mat, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_small_large(self):
# test the small and large code paths, current cutoff 400 elements
for s in [5, 20, 51, 200, 1000]:
d = np.random.randn(4, s)
# Randomly set some elements to NaN:
w = np.random.randint(0, d.size, size=d.size // 5)
d.ravel()[w] = np.nan
d[:,0] = 1. # ensure at least one good value
# use normal median without nans to compare
tgt = []
for x in d:
nonan = np.compress(~np.isnan(x), x)
tgt.append(np.median(nonan, overwrite_input=True))
assert_array_equal(np.nanmedian(d, axis=-1), tgt)
def test_result_values(self):
tgt = [np.median(d) for d in _rdat]
res = np.nanmedian(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for axis in [None, 0, 1]:
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
if axis is None:
assert_(len(sup.log) == 1)
else:
assert_(len(sup.log) == 3)
# Check scalar
assert_(np.isnan(np.nanmedian(np.nan)))
if axis is None:
assert_(len(sup.log) == 2)
else:
assert_(len(sup.log) == 4)
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanmedian(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_(np.nanmedian(0.) == 0.)
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(IndexError, np.nanmedian, d, axis=-5)
assert_raises(IndexError, np.nanmedian, d, axis=(0, -5))
assert_raises(IndexError, np.nanmedian, d, axis=4)
assert_raises(IndexError, np.nanmedian, d, axis=(0, 4))
assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))
def test_float_special(self):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
a = np.array([[np.inf, np.nan], [np.nan, np.nan]])
assert_equal(np.nanmedian(a, axis=0), [np.inf, np.nan])
assert_equal(np.nanmedian(a, axis=1), [np.inf, np.nan])
assert_equal(np.nanmedian(a), np.inf)
# minimum fill value check
a = np.array([[np.nan, np.nan, np.inf], [np.nan, np.nan, np.inf]])
assert_equal(np.nanmedian(a, axis=1), np.inf)
# no mask path
a = np.array([[np.inf, np.inf], [np.inf, np.inf]])
assert_equal(np.nanmedian(a, axis=1), np.inf)
class TestNanFunctions_Percentile(TestCase):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanpercentile(ndat, 30)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.percentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
res = np.nanpercentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
res = np.nanpercentile(d, 90, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanpercentile(d, 90, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.percentile(mat, 42, axis=1)
res = np.nanpercentile(nan_mat, 42, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.percentile(mat, 42, axis=None)
res = np.nanpercentile(nan_mat, 42, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_result_values(self):
tgt = [np.percentile(d, 28) for d in _rdat]
res = np.nanpercentile(_ndat, 28, axis=1)
assert_almost_equal(res, tgt)
# Transpose the array to fit the output convention of numpy.percentile
tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat])
res = np.nanpercentile(_ndat, (28, 98), axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all())
if axis is None:
assert_(len(w) == 1)
else:
assert_(len(w) == 3)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(np.nanpercentile(np.nan, 60)))
if axis is None:
assert_(len(w) == 2)
else:
assert_(len(w) == 4)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_(np.nanpercentile(0., 100) == 0.)
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=-5)
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, -5))
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=4)
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, 4))
assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1))
def test_multiple_percentiles(self):
perc = [50, 100]
mat = np.ones((4, 3))
nan_mat = np.nan * mat
# For checking consistency in higher dimensional case
large_mat = np.ones((3, 4, 5))
large_mat[:, 0:2:4, :] = 0
large_mat[:, :, 3:] *= 2
for axis in [None, 0, 1]:
for keepdim in [False, True]:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "All-NaN slice encountered")
val = np.percentile(mat, perc, axis=axis, keepdims=keepdim)
nan_val = np.nanpercentile(nan_mat, perc, axis=axis,
keepdims=keepdim)
assert_equal(nan_val.shape, val.shape)
val = np.percentile(large_mat, perc, axis=axis,
keepdims=keepdim)
nan_val = np.nanpercentile(large_mat, perc, axis=axis,
keepdims=keepdim)
assert_equal(nan_val, val)
megamat = np.ones((3, 4, 5, 6))
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -3,814,907,824,342,896,600 | 37.549645 | 98 | 0.514641 | false |
nolanliou/tensorflow | tensorflow/contrib/nn/__init__.py | 56 | 1688 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for variants of ops in tf.nn.
@@alpha_dropout
@@conv1d_transpose
@@deprecated_flipped_softmax_cross_entropy_with_logits
@@deprecated_flipped_sparse_softmax_cross_entropy_with_logits
@@deprecated_flipped_sigmoid_cross_entropy_with_logits
@@nth_element
@@rank_sampled_softmax_loss
@@sampled_sparse_softmax_loss
@@scaled_softplus
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.nn.python.ops.alpha_dropout import *
from tensorflow.contrib.nn.python.ops.cross_entropy import *
from tensorflow.contrib.nn.python.ops.sampling_ops import *
from tensorflow.contrib.nn.python.ops.scaled_softplus import *
from tensorflow.python.ops.nn_ops import conv1d_transpose
from tensorflow.python.ops.nn_ops import nth_element
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| apache-2.0 | 8,201,347,967,109,907,000 | 39.190476 | 80 | 0.75 | false |
zingale/hydro_examples | advection/weno_coefficients.py | 3 | 18621 | import numpy
# Coefficients of order r=2
# On smooth solutions this should converge with order r=3
C_2 = numpy.array([ 1, 2 ]) / 3
a_2 = numpy.array([
[ 3, -1],
[ 1, 1],
]) / 2
sigma_2 = numpy.array([
[
[ 1, 0],
[-2, 1]
],
[
[ 1, 0],
[-2, 1]
]
])
# Coefficients of order r=3
# On smooth solutions this should converge with order r=5
C_3 = numpy.array([ 1, 6, 3 ]) / 10
a_3 = numpy.array([
[ 11, -7, 2],
[ 2, 5, -1],
[ -1, 5, 2],
]) / 6
sigma_3 = numpy.array([
[
[ 10, 0, 0],
[-31, 25, 0],
[ 11, -19, 4]
],
[
[ 4, 0, 0],
[-13, 13, 0],
[ 5, -13, 4]
],
[
[ 4, 0, 0],
[-19, 25, 0],
[ 11, -31, 10]
]
]) / 3
# Coefficients of order r=4
# On smooth solutions this should converge with order r=7
C_4 = numpy.array([ 1, 12, 18, 4 ]) / 35
a_4 = numpy.array([
[ 25, -23, 13, -3],
[ 3, 13, -5, 1],
[ -1, 7, 7, -1],
[ 1, -5, 13, 3],
]) / 12
sigma_4 = numpy.array([
[
[ 2107, 0, 0, 0],
[ -9402, 11003, 0, 0],
[ 7042, -17246, 7043, 0],
[ -1854, 4642, -3882, 547]
],
[
[ 547, 0, 0, 0],
[ -2522, 3443, 0, 0],
[ 1922, -5966, 2843, 0],
[ -494, 1602, -1642, 267]
],
[
[ 267, 0, 0, 0],
[ -1642, 2843, 0, 0],
[ 1602, -5966, 3443, 0],
[ -494, 1922, -2522, 547]
],
[
[ 547, 0, 0, 0],
[ -3882, 7043, 0, 0],
[ 4642, -17246, 11003, 0],
[ -1854, 7042, -9402, 2107]
]
]) / 240
# Coefficients of order r=5
# On smooth solutions this should converge with order r=9
C_5 = numpy.array([ 1, 20, 60, 40, 5 ]) / 126
a_5 = numpy.array([
[ 137, -163, 137, -63, 12],
[ 12, 77, -43, 17, -3],
[ -3, 27, 47, -13, 2],
[ 2, -13, 47, 27, -3],
[ -3, 17, -43, 77, 12],
]) / 60
sigma_5 = numpy.array([
[
[ 107918, 0, 0, 0, 0],
[ -649501, 1020563, 0, 0, 0],
[ 758823, -2462076, 1521393, 0, 0],
[ -411487, 1358458, -1704396, 482963, 0],
[ 86329, -288007, 364863, -208501, 22658]
],
[
[ 22658, 0, 0, 0, 0],
[ -140251, 242723, 0, 0, 0],
[ 165153, -611976, 406293, 0, 0],
[ -88297, 337018, -464976, 138563, 0],
[ 18079, -70237, 99213, -60871, 6908]
],
[
[ 6908, 0, 0, 0, 0],
[ -51001, 104963, 0, 0, 0],
[ 67923, -299076, 231153, 0, 0],
[ -38947, 179098, -299076, 104963, 0],
[ 8209, -38947, 67923, -51001, 6908]
],
[
[ 6908, 0, 0, 0, 0],
[ -60871, 138563, 0, 0, 0],
[ 99213, -464976, 406293, 0, 0],
[ -70237, 337018, -611976, 242723, 0],
[ 18079, -88297, 165153, -140251, 22658]
],
[
[ 22658, 0, 0, 0, 0],
[ -208501, 482963, 0, 0, 0],
[ 364863, -1704396, 1521393, 0, 0],
[ -288007, 1358458, -2462076, 1020563, 0],
[ 86329, -411487, 758823, -649501, 107918]
]
]) / 5040
# Coefficients of order r=6
# On smooth solutions this should converge with order r=11
C_6 = numpy.array([ 1, 30, 150, 200, 75, 6 ]) / 462
a_6 = numpy.array([
[ 147, -213, 237, -163, 62, -10],
[ 10, 87, -63, 37, -13, 2],
[ -2, 22, 57, -23, 7, -1],
[ 1, -8, 37, 37, -8, 1],
[ -1, 7, -23, 57, 22, -2],
[ 2, -13, 37, -63, 87, 10],
]) / 60
sigma_6 = numpy.array([
[
[ 6150211, 0, 0, 0, 0, 0],
[ -47460464, 94851237, 0, 0, 0, 0],
[ 76206736, -311771244, 260445372, 0, 0, 0],
[ -63394124, 262901672, -444003904, 190757572, 0, 0],
[ 27060170, -113206788, 192596472, -166461044, 36480687, 0],
[ -4712740, 19834350, -33918804, 29442256, -12950184, 1152561]
],
[
[ 1152561, 0, 0, 0, 0, 0],
[ -9117992, 19365967, 0, 0, 0, 0],
[ 14742480, -65224244, 56662212, 0, 0, 0],
[ -12183636, 55053752, -97838784, 43093692, 0, 0],
[ 5134574, -23510468, 42405032, -37913324, 8449957, 0],
[ -880548, 4067018, -7408908, 6694608, -3015728, 271779]
],
[
[ 271779, 0, 0, 0, 0, 0],
[ -2380800, 5653317, 0, 0, 0, 0],
[ 4086352, -20427884, 19510972, 0, 0, 0],
[ -3462252, 17905032, -35817664, 17195652, 0, 0],
[ 1458762, -7727988, 15929912, -15880404, 3824847, 0],
[ -245620, 1325006, -2792660, 2863984, -1429976, 139633]
],
[
[ 139633, 0, 0, 0, 0, 0],
[ -1429976, 3824847, 0, 0, 0, 0],
[ 2863984, -15880404, 17195652, 0, 0, 0],
[ -2792660, 15929912, -35817664, 19510972, 0, 0],
[ 1325006, -7727988, 17905032, -20427884, 5653317, 0],
[ -245620, 1458762, -3462252, 4086352, -2380800, 271779]
],
[
[ 271779, 0, 0, 0, 0, 0],
[ -3015728, 8449957, 0, 0, 0, 0],
[ 6694608, -37913324, 43093692, 0, 0, 0],
[ -7408908, 42405032, -97838784, 56662212, 0, 0],
[ 4067018, -23510468, 55053752, -65224244, 19365967, 0],
[ -880548, 5134574, -12183636, 14742480, -9117992, 1152561]
],
[
[ 1152561, 0, 0, 0, 0, 0],
[ -12950184, 36480687, 0, 0, 0, 0],
[ 29442256, -166461044, 190757572, 0, 0, 0],
[ -33918804, 192596472, -444003904, 260445372, 0, 0],
[ 19834350, -113206788, 262901672, -311771244, 94851237, 0],
[ -4712740, 27060170, -63394124, 76206736, -47460464, 6150211]
]
]) / 120960
# Coefficients of order r=7
# On smooth solutions this should converge with order r=13
C_7 = numpy.array([ 1, 42, 315, 700, 525, 126, 7 ]) / 1716
a_7 = numpy.array([
[ 1089, -1851, 2559, -2341, 1334, -430, 60],
[ 60, 669, -591, 459, -241, 74, -10],
[ -10, 130, 459, -241, 109, -31, 4],
[ 4, -38, 214, 319, -101, 25, -3],
[ -3, 25, -101, 319, 214, -38, 4],
[ 4, -31, 109, -241, 459, 130, -10],
[ -10, 74, -241, 459, -591, 669, 60],
]) / 420
sigma_7 = numpy.array([
[
[ 7177657304, 0, 0, 0, 0, 0, 0],
[ -68289277071, 166930543737, 0, 0, 0, 0, 0],
[ 140425750893, -698497961463, 739478564460, 0, 0, 0, 0],
[ -158581758572, 797280592452, -1701893556420, 985137198380, 0, 0, 0],
[ 102951716988, -521329653333, 1119254208255, -1301580166020, 431418789360, 0, 0],
[ -36253275645, 184521097818, -397822832973, 464200620612, -308564463663, 55294430841, 0],
[ 5391528799, -27545885877, 59577262788, -69700128812, 46430779053, -16670007831, 1258225940]
],
[
[ 1258225940, 0, 0, 0, 0, 0, 0],
[ -12223634361, 31090026771, 0, 0, 0, 0, 0],
[ 25299603603, -132164397513, 143344579860, 0, 0, 0, 0],
[ -28498553012, 151212114012, -332861569020, 195601143380, 0, 0, 0],
[ 18375686988, -98508059523, 219064013505, -259838403420, 86959466460, 0, 0],
[ -6414710427, 34632585198, -77574968883, 92646554652, -62392325913, 11250068787, 0],
[ 945155329, -5128661355, 11548158588, -13862429972, 9380155443, -3397272201, 257447084]
],
[
[ 257447084, 0, 0, 0, 0, 0, 0],
[ -2659103847, 7257045753, 0, 0, 0, 0, 0],
[ 5684116173, -32164185663, 36922302360, 0, 0, 0, 0],
[ -6473137292, 37531128132, -88597133220, 54531707180, 0, 0, 0],
[ 4158865908, -24530177853, 59045150655, -74236325220, 25788772260, 0, 0],
[ -1432622085, 8555779674, -20891234853, 26694456132, -18869146983, 3510366201, 0],
[ 206986975, -1247531949, 3078682188, -3982402892, 2854088973, -1077964287, 84070496]
],
[
[ 84070496, 0, 0, 0, 0, 0, 0],
[ -969999969, 2927992563, 0, 0, 0, 0, 0],
[ 2283428883, -14296379553, 18133963560, 0, 0, 0, 0],
[ -2806252532, 18083339772, -47431870620, 32154783380, 0, 0, 0],
[ 1902531828, -12546315963, 33820678305, -47431870620, 18133963560, 0, 0],
[ -676871859, 4550242446, -12546315963, 18083339772, -14296379553, 2927992563, 0],
[ 99022657, -676871859, 1902531828, -2806252532, 2283428883, -969999969, 84070496]
],
[
[ 84070496, 0, 0, 0, 0, 0, 0],
[ -1077964287, 3510366201, 0, 0, 0, 0, 0],
[ 2854088973, -18869146983, 25788772260, 0, 0, 0, 0],
[ -3982402892, 26694456132, -74236325220, 54531707180, 0, 0, 0],
[ 3078682188, -20891234853, 59045150655, -88597133220, 36922302360, 0, 0],
[ -1247531949, 8555779674, -24530177853, 37531128132, -32164185663, 7257045753, 0],
[ 206986975, -1432622085, 4158865908, -6473137292, 5684116173, -2659103847, 257447084]
],
[
[ 257447084, 0, 0, 0, 0, 0, 0],
[ -3397272201, 11250068787, 0, 0, 0, 0, 0],
[ 9380155443, -62392325913, 86959466460, 0, 0, 0, 0],
[ -13862429972, 92646554652, -259838403420, 195601143380, 0, 0, 0],
[ 11548158588, -77574968883, 219064013505, -332861569020, 143344579860, 0, 0],
[ -5128661355, 34632585198, -98508059523, 151212114012, -132164397513, 31090026771, 0],
[ 945155329, -6414710427, 18375686988, -28498553012, 25299603603, -12223634361, 1258225940]
],
[
[ 1258225940, 0, 0, 0, 0, 0, 0],
[ -16670007831, 55294430841, 0, 0, 0, 0, 0],
[ 46430779053, -308564463663, 431418789360, 0, 0, 0, 0],
[ -69700128812, 464200620612, -1301580166020, 985137198380, 0, 0, 0],
[ 59577262788, -397822832973, 1119254208255, -1701893556420, 739478564460, 0, 0],
[ -27545885877, 184521097818, -521329653333, 797280592452, -698497961463, 166930543737, 0],
[ 5391528799, -36253275645, 102951716988, -158581758572, 140425750893, -68289277071, 7177657304]
]
]) / 59875200
C_all = {
2 : C_2,
3 : C_3,
4 : C_4,
5 : C_5,
6 : C_6,
7 : C_7
}
a_all = {
2 : a_2,
3 : a_3,
4 : a_4,
5 : a_5,
6 : a_6,
7 : a_7
}
sigma_all = {
2 : sigma_2,
3 : sigma_3,
4 : sigma_4,
5 : sigma_5,
6 : sigma_6,
7 : sigma_7
}
| bsd-3-clause | -5,206,133,110,468,352,000 | 61.486577 | 139 | 0.275925 | false |
pombredanne/pants | tests/python/pants_test/backend/codegen/antlr/java/test_antlr_java_gen_integration.py | 8 | 1286 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class AntlrJavaGenIntegrationTest(PantsRunIntegrationTest):
def test_run_antlr3(self):
stdout_data = self.bundle_and_run('examples/src/java/org/pantsbuild/example/antlr3',
'examples.src.java.org.pantsbuild.example.antlr3.antlr3',
bundle_jar_name='antlr3',
args=['7*8'])
self.assertEquals('56.0', stdout_data.rstrip(), msg="got output:{0}".format(stdout_data))
def test_run_antlr4(self):
stdout_data = self.bundle_and_run('examples/src/java/org/pantsbuild/example/antlr4',
'examples.src.java.org.pantsbuild.example.antlr4.antlr4',
bundle_jar_name='antlr4',
args=['7*6'])
self.assertEquals('42.0', stdout_data.rstrip(), msg="got output:{0}".format(stdout_data))
| apache-2.0 | 227,209,065,965,367,940 | 50.44 | 95 | 0.605754 | false |
KeyWeeUsr/kivy | examples/frameworks/twisted/echo_client_app.py | 13 | 2352 | # install_twisted_rector must be called before importing the reactor
from __future__ import unicode_literals
from kivy.support import install_twisted_reactor
install_twisted_reactor()
# A Simple Client that send messages to the Echo Server
from twisted.internet import reactor, protocol
class EchoClient(protocol.Protocol):
def connectionMade(self):
self.factory.app.on_connection(self.transport)
def dataReceived(self, data):
self.factory.app.print_message(data.decode('utf-8'))
class EchoClientFactory(protocol.ClientFactory):
protocol = EchoClient
def __init__(self, app):
self.app = app
def startedConnecting(self, connector):
self.app.print_message('Started to connect.')
def clientConnectionLost(self, connector, reason):
self.app.print_message('Lost connection.')
def clientConnectionFailed(self, connector, reason):
self.app.print_message('Connection failed.')
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
# A simple kivy App, with a textbox to enter messages, and
# a large label to display all the messages received from
# the server
class TwistedClientApp(App):
connection = None
textbox = None
label = None
def build(self):
root = self.setup_gui()
self.connect_to_server()
return root
def setup_gui(self):
self.textbox = TextInput(size_hint_y=.1, multiline=False)
self.textbox.bind(on_text_validate=self.send_message)
self.label = Label(text='connecting...\n')
layout = BoxLayout(orientation='vertical')
layout.add_widget(self.label)
layout.add_widget(self.textbox)
return layout
def connect_to_server(self):
reactor.connectTCP('localhost', 8000, EchoClientFactory(self))
def on_connection(self, connection):
self.print_message("Connected successfully!")
self.connection = connection
def send_message(self, *args):
msg = self.textbox.text
if msg and self.connection:
self.connection.write(msg.encode('utf-8'))
self.textbox.text = ""
def print_message(self, msg):
self.label.text += "{}\n".format(msg)
if __name__ == '__main__':
TwistedClientApp().run()
| mit | 1,322,365,627,565,985,000 | 27.682927 | 70 | 0.680697 | false |
utessel/edimax | target/linux/x86/image/mkimg_bifferboard.py | 561 | 1265 | #!/usr/bin/env python
"""
Create firmware for 4/8MB Bifferboards, suitable for uploading using
either bb_upload8.py or bb_eth_upload8.py
"""
import struct, sys
# Increase the kmax value if the script gives errors about the kernel being
# too large. You need to set the Biffboot kmax value to the same value you
# use here.
kmax = 0x10
# No need to change this for 4MB devices, it's only used to tell you if
# the firmware is too large!
flash_size = 0x800000
# This is always the same, for 1MB, 4MB and 8MB devices
config_extent = 0x6000
kernel_extent = kmax * 0x10000
if __name__ == "__main__":
if len(sys.argv) != 4:
print "usage: mkimg_bifferboard.py <kernel> <rootfs> <output file>"
sys.exit(-1)
bzimage = sys.argv[1]
rootfs = sys.argv[2]
target = sys.argv[3]
# Kernel first
fw = file(bzimage).read()
if len(fw) > (kernel_extent - config_extent):
raise IOError("Kernel too large")
# Pad up to end of kernel partition
while len(fw) < (kernel_extent - config_extent):
fw += "\xff"
fw += file(rootfs).read()
# Check length of total
if len(fw) > (flash_size - 0x10000 - config_extent):
raise IOError("Rootfs too large")
file(target,"wb").write(fw)
print "Firmware written to '%s'" % target
| gpl-2.0 | -3,588,933,269,943,066,600 | 24.3 | 76 | 0.667984 | false |
thaumos/ansible | lib/ansible/modules/storage/netapp/na_ontap_vscan_scanner_pool.py | 21 | 8931 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_ontap_vscan_scanner_pool
short_description: NetApp ONTAP Vscan Scanner Pools Configuration.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.8'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Configure a Vscan Scanner Pool
options:
state:
description:
- Whether a Vscan Scanner pool is present or not
choices: ['present', 'absent']
default: present
vserver:
description:
- the name of the data vserver to use.
required: true
hostnames:
description:
- List of hostnames of Vscan servers which are allowed to connect to Data ONTAP
privileged_users:
description:
- List of privileged usernames. Username must be in the form "domain-name\\user-name"
scanner_pool:
description:
- the name of the virus scanner pool
required: true
scanner_policy:
description:
- The name of the Virus scanner Policy
choices: ['primary', 'secondary', 'idle']
'''
EXAMPLES = """
- name: Create and enable Scanner pool
na_ontap_vscan_scanner_pool:
state: present
username: '{{ netapp_username }}'
password: '{{ netapp_password }}'
hostname: '{{ netapp_hostname }}'
vserver: carchi-vsim2
hostnames: ['name', 'name2']
privileged_users: ['sim.rtp.openeng.netapp.com\\admin', 'sim.rtp.openeng.netapp.com\\carchi']
scanner_pool: Scanner1
scanner_policy: primary
- name: Delete a scanner pool
na_ontap_vscan_scanner_pool:
state: absent
username: '{{ netapp_username }}'
password: '{{ netapp_password }}'
hostname: '{{ netapp_hostname }}'
vserver: carchi-vsim2
scanner_pool: Scanner1
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapVscanScannerPool(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(choices=['present', 'absent'], default='present'),
vserver=dict(required=True, type='str'),
hostnames=dict(requried=False, type='list'),
privileged_users=dict(required=False, type='list'),
scanner_pool=dict(required=True, type='str'),
scanner_policy=dict(required=False, choices=['primary', 'secondary', 'idle'])
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
parameters = self.module.params
self.hostnames = parameters['hostnames']
self.vserver = parameters['vserver']
self.privileged_users = parameters['privileged_users']
self.scanner_pool = parameters['scanner_pool']
self.state = parameters['state']
self.scanner_policy = parameters['scanner_policy']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
def create_scanner_pool(self):
"""
Create a Vscan Scanner Pool
:return: nothing
"""
scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-create')
if self.hostnames:
string_obj = netapp_utils.zapi.NaElement('hostnames')
scanner_pool_obj.add_child_elem(string_obj)
for hostname in self.hostnames:
string_obj.add_new_child('string', hostname)
if self.privileged_users:
users_obj = netapp_utils.zapi.NaElement('privileged-users')
scanner_pool_obj.add_child_elem(users_obj)
for user in self.privileged_users:
users_obj.add_new_child('privileged-user', user)
scanner_pool_obj.add_new_child('scanner-pool', self.scanner_pool)
try:
self.server.invoke_successfully(scanner_pool_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating Vscan Scanner Pool %s: %s' %
(self.scanner_pool, to_native(error)),
exception=traceback.format_exc())
def apply_policy(self):
"""
Apply a Scanner policy to a Scanner pool
:return: nothing
"""
apply_policy_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-apply-policy')
apply_policy_obj.add_new_child('scanner-policy', self.scanner_policy)
apply_policy_obj.add_new_child('scanner-pool', self.scanner_pool)
try:
self.server.invoke_successfully(apply_policy_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error appling policy %s to pool %s: %s' %
(self.scanner_policy, self.scanner_pool, to_native(error)),
exception=traceback.format_exc())
def get_scanner_pool(self):
"""
Check to see if a scanner pool exist or not
:return: True if it exist, False if it does not
"""
scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-get-iter')
scanner_pool_info = netapp_utils.zapi.NaElement('scan-scanner-pool-info')
scanner_pool_info.add_new_child('scanner-pool', self.scanner_pool)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(scanner_pool_info)
scanner_pool_obj.add_child_elem(query)
try:
result = self.server.invoke_successfully(scanner_pool_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error searching for Vscan Scanner Pool %s: %s' %
(self.scanner_pool, to_native(error)),
exception=traceback.format_exc())
if result.get_child_by_name('num-records'):
if result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info').get_child_content(
'scanner-pool') == self.scanner_pool:
return result.get_child_by_name('attributes-list').get_child_by_name('vscan-scanner-pool-info')
return False
return False
def delete_scanner_pool(self):
"""
Delete a Scanner pool
:return: nothing
"""
scanner_pool_obj = netapp_utils.zapi.NaElement('vscan-scanner-pool-delete')
scanner_pool_obj.add_new_child('scanner-pool', self.scanner_pool)
try:
self.server.invoke_successfully(scanner_pool_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting Vscan Scanner Pool %s: %s' %
(self.scanner_pool, to_native(error)),
exception=traceback.format_exc())
def asup_log_for_cserver(self, event_name):
"""
Fetch admin vserver for the given cluster
Create and Autosupport log event with the given module name
:param event_name: Name of the event log
:return: None
"""
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event(event_name, cserver)
def apply(self):
self.asup_log_for_cserver("na_ontap_vscan_scanner_pool")
changed = False
scanner_pool_obj = self.get_scanner_pool()
if self.state == 'present':
if not scanner_pool_obj:
self.create_scanner_pool()
if self.scanner_policy:
self.apply_policy()
changed = True
# apply Scanner policy
if scanner_pool_obj:
if scanner_pool_obj.get_child_content('scanner-policy') != self.scanner_policy:
self.apply_policy()
changed = True
if self.state == 'absent':
if scanner_pool_obj:
self.delete_scanner_pool()
changed = True
self.module.exit_json(changed=changed)
def main():
"""
Execute action from playbook
"""
command = NetAppOntapVscanScannerPool()
command.apply()
if __name__ == '__main__':
main()
| gpl-3.0 | 6,293,197,760,607,675,000 | 36.52521 | 122 | 0.613929 | false |
awkspace/ansible | lib/ansible/modules/network/fortios/fortios_firewall_multicast_address6.py | 24 | 10083 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_multicast_address6
short_description: Configure IPv6 multicast address in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and multicast_address6 category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
firewall_multicast_address6:
description:
- Configure IPv6 multicast address.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
color:
description:
- Color of icon on the GUI.
comment:
description:
- Comment.
ip6:
description:
- "IPv6 address prefix (format: xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx/xxx)."
name:
description:
- IPv6 multicast address name.
required: true
tagging:
description:
- Config object tagging.
suboptions:
category:
description:
- Tag category. Source system.object-tagging.category.
name:
description:
- Tagging entry name.
required: true
tags:
description:
- Tags.
suboptions:
name:
description:
- Tag name. Source system.object-tagging.tags.name.
required: true
visibility:
description:
- Enable/disable visibility of the IPv6 multicast address on the GUI.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv6 multicast address.
fortios_firewall_multicast_address6:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
firewall_multicast_address6:
state: "present"
color: "3"
comment: "Comment."
ip6: "<your_own_value>"
name: "default_name_6"
tagging:
-
category: "<your_own_value> (source system.object-tagging.category)"
name: "default_name_9"
tags:
-
name: "default_name_11 (source system.object-tagging.tags.name)"
visibility: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_multicast_address6_data(json):
option_list = ['color', 'comment', 'ip6',
'name', 'tagging', 'visibility']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_multicast_address6(data, fos):
vdom = data['vdom']
firewall_multicast_address6_data = data['firewall_multicast_address6']
filtered_data = filter_firewall_multicast_address6_data(firewall_multicast_address6_data)
if firewall_multicast_address6_data['state'] == "present":
return fos.set('firewall',
'multicast-address6',
data=filtered_data,
vdom=vdom)
elif firewall_multicast_address6_data['state'] == "absent":
return fos.delete('firewall',
'multicast-address6',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_multicast_address6']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"firewall_multicast_address6": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"color": {"required": False, "type": "int"},
"comment": {"required": False, "type": "str"},
"ip6": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"tagging": {"required": False, "type": "list",
"options": {
"category": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"tags": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}}
}},
"visibility": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,655,453,827,568,233,000 | 30.216718 | 98 | 0.553308 | false |
shubhdev/openedx | common/djangoapps/student/migrations/0007_convert_to_utf8.py | 188 | 9663 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
if db.backend_name == 'mysql':
db.execute_many("""
ALTER DATABASE CHARACTER SET utf8 COLLATE utf8_general_ci;
ALTER TABLE student_pendingemailchange CONVERT TO CHARACTER SET utf8 COLLATE utf8_general_ci;
ALTER TABLE student_pendingnamechange CONVERT TO CHARACTER SET utf8 COLLATE utf8_general_ci;
ALTER TABLE student_usertestgroup CONVERT TO CHARACTER SET utf8 COLLATE utf8_general_ci;
ALTER TABLE student_usertestgroup_users CONVERT TO CHARACTER SET utf8 COLLATE utf8_general_ci;
""")
def backwards(self, orm):
# Although this migration can't be undone, it is okay for it to be run backwards because it doesn't add/remove any fields
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 | -6,087,782,319,955,499,000 | 76.304 | 182 | 0.561834 | false |
luisgg/iteexe | twisted/internet/gtk2reactor.py | 14 | 9981 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to interact with the glib/gtk2 mainloop.
In order to use this support, simply do the following::
| from twisted.internet import gtk2reactor
| gtk2reactor.install()
Then use twisted.internet APIs as usual. The other methods here are not
intended to be called directly.
When installing the reactor, you can choose whether to use the glib
event loop or the GTK+ event loop which is based on it but adds GUI
integration.
API Stability: stable
Maintainer: U{Itamar Shtull-Trauring<mailto:[email protected]>}
"""
__all__ = ['install']
# System Imports
import sys
from zope.interface import implements
try:
if not hasattr(sys, 'frozen'):
# Don't want to check this for py2exe
import pygtk
pygtk.require('2.0')
except (ImportError, AttributeError):
pass # maybe we're using pygtk before this hack existed.
import gobject
if hasattr(gobject, "threads_init"):
# recent versions of python-gtk expose this. python-gtk=2.4.1
# (wrapping glib-2.4.7) does. python-gtk=2.0.0 (wrapping
# glib-2.2.3) does not.
gobject.threads_init()
# Twisted Imports
from twisted.python import log, threadable, runtime, failure, components
from twisted.internet.interfaces import IReactorFDSet
# Sibling Imports
from twisted.internet import main, posixbase, error, selectreactor
reads = {}
writes = {}
hasReader = reads.has_key
hasWriter = writes.has_key
# the next callback
_simtag = None
POLL_DISCONNECTED = gobject.IO_HUP | gobject.IO_ERR | gobject.IO_NVAL
# glib's iochannel sources won't tell us about any events that we haven't
# asked for, even if those events aren't sensible inputs to the poll()
# call.
INFLAGS = gobject.IO_IN | POLL_DISCONNECTED
OUTFLAGS = gobject.IO_OUT | POLL_DISCONNECTED
def _our_mainquit():
# XXX: gtk.main_quit() (which is used for crash()) raises an exception if
# gtk.main_level() == 0; however, all the tests freeze if we use this
# function to stop the reactor. what gives? (I believe this may have been
# a stupid mistake where I forgot to import gtk here... I will remove this
# comment if the tests pass)
import gtk
if gtk.main_level():
gtk.main_quit()
class Gtk2Reactor(posixbase.PosixReactorBase):
"""GTK+-2 event loop reactor.
"""
implements(IReactorFDSet)
def __init__(self, useGtk=True):
self.context = gobject.main_context_default()
self.loop = gobject.MainLoop()
posixbase.PosixReactorBase.__init__(self)
# pre 2.3.91 the glib iteration and mainloop functions didn't release
# global interpreter lock, thus breaking thread and signal support.
if (hasattr(gobject, "pygtk_version") and gobject.pygtk_version >= (2, 3, 91)
and not useGtk):
self.__pending = self.context.pending
self.__iteration = self.context.iteration
self.__crash = self.loop.quit
self.__run = self.loop.run
else:
import gtk
self.__pending = gtk.events_pending
self.__iteration = gtk.main_iteration
self.__crash = _our_mainquit
self.__run = gtk.main
# The input_add function in pygtk1 checks for objects with a
# 'fileno' method and, if present, uses the result of that method
# as the input source. The pygtk2 input_add does not do this. The
# function below replicates the pygtk1 functionality.
# In addition, pygtk maps gtk.input_add to _gobject.io_add_watch, and
# g_io_add_watch() takes different condition bitfields than
# gtk_input_add(). We use g_io_add_watch() here in case pygtk fixes this
# bug.
def input_add(self, source, condition, callback):
if hasattr(source, 'fileno'):
# handle python objects
def wrapper(source, condition, real_s=source, real_cb=callback):
return real_cb(real_s, condition)
return gobject.io_add_watch(source.fileno(), condition, wrapper)
else:
return gobject.io_add_watch(source, condition, callback)
def addReader(self, reader):
if not hasReader(reader):
reads[reader] = self.input_add(reader, INFLAGS, self.callback)
def addWriter(self, writer):
if not hasWriter(writer):
writes[writer] = self.input_add(writer, OUTFLAGS, self.callback)
def removeAll(self):
return self._removeAll(reads, writes)
def removeReader(self, reader):
if hasReader(reader):
gobject.source_remove(reads[reader])
del reads[reader]
def removeWriter(self, writer):
if hasWriter(writer):
gobject.source_remove(writes[writer])
del writes[writer]
doIterationTimer = None
def doIterationTimeout(self, *args):
self.doIterationTimer = None
return 0 # auto-remove
def doIteration(self, delay):
# flush some pending events, return if there was something to do
# don't use the usual "while self.context.pending(): self.context.iteration()"
# idiom because lots of IO (in particular test_tcp's
# ProperlyCloseFilesTestCase) can keep us from ever exiting.
log.msg(channel='system', event='iteration', reactor=self)
if self.__pending():
self.__iteration(0)
return
# nothing to do, must delay
if delay == 0:
return # shouldn't delay, so just return
self.doIterationTimer = gobject.timeout_add(int(delay * 1000),
self.doIterationTimeout)
# This will either wake up from IO or from a timeout.
self.__iteration(1) # block
# note: with the .simulate timer below, delays > 0.1 will always be
# woken up by the .simulate timer
if self.doIterationTimer:
# if woken by IO, need to cancel the timer
gobject.source_remove(self.doIterationTimer)
self.doIterationTimer = None
def crash(self):
self.__crash()
def run(self, installSignalHandlers=1):
self.startRunning(installSignalHandlers=installSignalHandlers)
gobject.timeout_add(0, self.simulate)
self.__run()
def _doReadOrWrite(self, source, condition, faildict={
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost()),
}):
why = None
didRead = None
if condition & POLL_DISCONNECTED and \
not (condition & gobject.IO_IN):
why = main.CONNECTION_LOST
else:
try:
if condition & gobject.IO_IN:
why = source.doRead()
didRead = source.doRead
if not why and condition & gobject.IO_OUT:
# if doRead caused connectionLost, don't call doWrite
# if doRead is doWrite, don't call it again.
if not source.disconnected and source.doWrite != didRead:
why = source.doWrite()
didRead = source.doWrite # if failed it was in write
except:
why = sys.exc_info()[1]
log.msg('Error In %s' % source)
log.deferr()
if why:
self._disconnectSelectable(source, why, didRead == source.doRead)
def callback(self, source, condition):
log.callWithLogger(source, self._doReadOrWrite, source, condition)
self.simulate() # fire Twisted timers
return 1 # 1=don't auto-remove the source
def simulate(self):
"""Run simulation loops and reschedule callbacks.
"""
global _simtag
if _simtag is not None:
gobject.source_remove(_simtag)
self.runUntilCurrent()
timeout = min(self.timeout(), 0.1)
if timeout is None:
timeout = 0.1
# grumble
_simtag = gobject.timeout_add(int(timeout * 1010), self.simulate)
components.backwardsCompatImplements(Gtk2Reactor)
class PortableGtkReactor(selectreactor.SelectReactor):
"""Reactor that works on Windows.
input_add is not supported on GTK+ for Win32, apparently.
"""
def crash(self):
import gtk
# mainquit is deprecated in newer versions
if hasattr(gtk, 'main_quit'):
gtk.main_quit()
else:
gtk.mainquit()
def run(self, installSignalHandlers=1):
import gtk
self.startRunning(installSignalHandlers=installSignalHandlers)
self.simulate()
# mainloop is deprecated in newer versions
if hasattr(gtk, 'main'):
gtk.main()
else:
gtk.mainloop()
def simulate(self):
"""Run simulation loops and reschedule callbacks.
"""
global _simtag
if _simtag is not None:
gobject.source_remove(_simtag)
self.iterate()
timeout = min(self.timeout(), 0.1)
if timeout is None:
timeout = 0.1
# grumble
_simtag = gobject.timeout_add(int(timeout * 1010), self.simulate)
def install(useGtk=True):
"""Configure the twisted mainloop to be run inside the gtk mainloop.
@param useGtk: should glib rather than GTK+ event loop be
used (this will be slightly faster but does not support GUI).
"""
reactor = Gtk2Reactor(useGtk)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
def portableInstall(useGtk=True):
"""Configure the twisted mainloop to be run inside the gtk mainloop.
"""
reactor = PortableGtkReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
if runtime.platform.getType() != 'posix':
install = portableInstall
| gpl-2.0 | -5,488,067,095,562,070,000 | 34.144366 | 86 | 0.634105 | false |
flavour/tldrmp | modules/s3cfg.py | 1 | 73402 | # -*- coding: utf-8 -*-
""" Deployment Settings
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: 2009-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3Config"]
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current, URL, TR, TD
from gluon.storage import Storage
class S3Config(Storage):
"""
Deployment Settings Helper Class
"""
def __init__(self):
self.auth = Storage()
self.auth.email_domains = []
self.base = Storage()
self.database = Storage()
# @ToDo: Move to self.ui
self.frontpage = Storage()
self.frontpage.rss = []
self.fin = Storage()
self.L10n = Storage()
self.mail = Storage()
self.msg = Storage()
self.search = Storage()
self.security = Storage()
self.ui = Storage()
self.cap = Storage()
self.gis = Storage()
self.hrm = Storage()
self.inv = Storage()
self.irs = Storage()
self.org = Storage()
self.pr = Storage()
self.proc = Storage()
self.project = Storage()
self.req = Storage()
self.supply = Storage()
self.hms = Storage()
# -------------------------------------------------------------------------
# Template
def get_template(self):
"""
Which deployment template to use for config.py, parser.py, menus.py, etc
http://eden.sahanafoundation.org/wiki/BluePrint/Templates
"""
return self.base.get("template", "default")
def exec_template(self, path):
"""
Execute the template
"""
from gluon.fileutils import read_file
from gluon.restricted import restricted
code = read_file(path)
restricted(code, layer=path)
return
# -------------------------------------------------------------------------
# Theme
def get_theme(self):
"""
Which templates folder to use for views/layout.html
"""
return self.base.get("theme", "default")
def get_base_xtheme(self):
"""
Whether there is a custom Ext theme or simply use the default xtheme-gray
- specified as <themefolder>/xtheme-<filename>.css
"""
return self.base.get("xtheme", None)
# -------------------------------------------------------------------------
def is_cd_version(self):
"""
Whether we're running from a non-writable CD
"""
return self.base.get("cd_version", False)
# -------------------------------------------------------------------------
def get_google_analytics_tracking_id(self):
"""
Google Analytics Key
"""
return self.base.get("google_analytics_tracking_id", None)
# -------------------------------------------------------------------------
def get_youtube_id(self):
"""
List of YouTube IDs for the /default/video page
"""
return self.base.get("youtube_id", [])
# -------------------------------------------------------------------------
# Authentication settings
def get_auth_hmac_key(self):
"""
salt to encrypt passwords - normally randomised during 1st run
"""
return self.auth.get("hmac_key", "akeytochange")
def get_auth_password_min_length(self):
"""
To set the Minimum Password Length
"""
return self.auth.get("password_min_length", int(4))
def get_auth_facebook(self):
"""
Read the FaceBook OAuth settings
- if configured, then it is assumed that FaceBook Authentication is enabled
"""
id = self.auth.get("facebook_id", False)
secret = self.auth.get("facebook_secret", False)
if id and secret:
return dict(id=id, secret=secret)
else:
return False
def get_auth_gmail_domains(self):
""" List of domains which can use GMail SMTP for Authentication """
return self.auth.get("gmail_domains", [])
def get_auth_google(self):
"""
Read the Google OAuth settings
- if configured, then it is assumed that Google Authentication is enabled
"""
id = self.auth.get("google_id", False)
secret = self.auth.get("google_secret", False)
if id and secret:
return dict(id=id, secret=secret)
else:
return False
def get_auth_openid(self):
""" Use OpenID for Authentication """
return self.auth.get("openid", False)
def get_security_self_registration(self):
return self.security.get("self_registration", True)
def get_auth_registration_requires_verification(self):
return self.auth.get("registration_requires_verification", False)
def get_auth_registration_requires_approval(self):
return self.auth.get("registration_requires_approval", False)
def get_auth_always_notify_approver(self):
return self.auth.get("always_notify_approver", True)
def get_auth_login_next(self):
""" Which page to go to after login """
return self.auth.get("login_next", URL(c="default", f="index"))
def get_auth_show_link(self):
return self.auth.get("show_link", True)
def get_auth_registration_link_user_to(self):
"""
Link User accounts to none or more of:
* Staff
* Volunteer
* Member
"""
return self.auth.get("registration_link_user_to", None)
def get_auth_registration_link_user_to_default(self):
"""
Link User accounts to none or more of:
* Staff
* Volunteer
* Member
"""
return self.auth.get("registration_link_user_to_default", None)
def get_auth_opt_in_team_list(self):
return self.auth.get("opt_in_team_list", [])
def get_auth_opt_in_to_email(self):
return self.get_auth_opt_in_team_list() != []
def get_auth_opt_in_default(self):
return self.auth.get("opt_in_default", False)
def get_auth_registration_requests_mobile_phone(self):
return self.auth.get("registration_requests_mobile_phone", False)
def get_auth_registration_mobile_phone_mandatory(self):
" Make the selection of Mobile Phone Mandatory during registration "
return self.auth.get("registration_mobile_phone_mandatory", False)
def get_auth_registration_requests_organisation(self):
" Have the registration form request the Organisation "
return self.auth.get("registration_requests_organisation", False)
def get_auth_admin_sees_organisation(self):
" See Organisations in User Admin"
return self.auth.get("admin_sees_organisation",
self.get_auth_registration_requests_organisation())
def get_auth_registration_organisation_required(self):
" Make the selection of Organisation required during registration "
return self.auth.get("registration_organisation_required", False)
def get_auth_registration_organisation_hidden(self):
" Hide the Organisation field in the registration form unless an email is entered which isn't whitelisted "
return self.auth.get("registration_organisation_hidden", False)
def get_auth_registration_organisation_default(self):
" Default the Organisation during registration "
return self.auth.get("registration_organisation_default", None)
def get_auth_registration_organisation_id_default(self):
" Default the Organisation during registration - will return the organisation_id"
name = self.auth.get("registration_organisation_default", None)
if name:
otable = current.s3db.org_organisation
orow = current.db(otable.name == name).select(otable.id).first()
if orow:
organisation_id = orow.id
else:
organisation_id = otable.insert(name = name)
else:
organisation_id = None
return organisation_id
def get_auth_registration_requests_organisation_group(self):
" Have the registration form request the Organisation Group "
return self.auth.get("registration_requests_organisation_group", False)
def get_auth_registration_organisation_group_required(self):
" Make the selection of Organisation Group required during registration "
return self.auth.get("registration_organisation_group_required", False)
def get_auth_registration_requests_site(self):
" Have the registration form request the Site "
return self.auth.get("registration_requests_site", False)
def get_auth_registration_site_required(self):
" Make the selection of site required during registration "
return self.auth.get("registration_site_required", False)
def get_auth_registration_requests_image(self):
""" Have the registration form request an Image """
return self.auth.get("registration_requests_image", False)
def get_auth_registration_pending(self):
""" Message someone gets when they register & they need approving """
message = self.auth.get("registration_pending", None)
if message:
return current.T(message)
approver = self.get_mail_approver()
if "@" in approver:
m = "Registration is still pending approval from Approver (%s) - please wait until confirmation received." % \
approver
else:
m = "Registration is still pending approval from the system administrator - please wait until confirmation received."
return current.T(m)
def get_auth_registration_pending_approval(self):
""" Message someone gets when they register & they need approving """
message = self.auth.get("registration_pending_approval", None)
if message:
return current.T(message)
approver = self.get_mail_approver()
if "@" in approver:
m = "Thank you for validating your email. Your user account is still pending for approval by the system administrator (%s). You will get a notification by email when your account is activated." % \
approver
else:
m = "Thank you for validating your email. Your user account is still pending for approval by the system administrator. You will get a notification by email when your account is activated."
return current.T(m)
def get_auth_registration_roles(self):
"""
A dictionary of realms, with lists of role UUIDs, to assign to newly-registered users
Use key = 0 to have the roles not restricted to a realm
"""
return self.auth.get("registration_roles", [])
def get_auth_terms_of_service(self):
"""
Force users to accept Terms of Servcie before Registering an account
- uses <template>/views/tos.html
"""
return self.auth.get("terms_of_service", False)
def get_auth_registration_volunteer(self):
""" Redirect the newly-registered user to their volunteer details page """
return self.auth.get("registration_volunteer", False)
def get_auth_record_approval(self):
""" Use record approval (False by default) """
return self.auth.get("record_approval", False)
def get_auth_record_approval_required_for(self):
""" Which tables record approval is required for """
return self.auth.get("record_approval_required_for", [])
def get_auth_realm_entity(self):
""" Hook to determine the owner entity of a record """
return self.auth.get("realm_entity", None)
def get_auth_person_realm_human_resource_site_then_org(self):
"""
Should we set pr_person.realm_entity to that of
hrm_human_resource.site_id$pe_id
"""
return self.auth.get("person_realm_human_resource_site_then_org", False)
def get_auth_person_realm_member_org(self):
"""
Sets pr_person.realm_entity to
organisation.pe_id of member_member
"""
return self.auth.get("person_realm_member_org", False)
def get_auth_role_modules(self):
"""
Which modules are includes in the Role Manager
- to assign discrete permissions to via UI
"""
T = current.T
return self.auth.get("role_modules", OrderedDict([
("staff", "Staff"),
("vol", "Volunteers"),
("member", "Members"),
("inv", "Warehouses"),
("asset", "Assets"),
("project", "Projects"),
("survey", "Assessments"),
("irs", "Incidents")
]))
def get_auth_access_levels(self):
"""
Access levels for the Role Manager UI
"""
T = current.T
return self.auth.get("access_levels", OrderedDict([
("reader", "Reader"),
("data_entry", "Data Entry"),
("editor", "Editor"),
("super", "Super Editor")
]))
def get_auth_set_presence_on_login(self):
return self.auth.get("set_presence_on_login", False)
def get_auth_ignore_levels_for_presence(self):
return self.auth.get("ignore_levels_for_presence", ["L0"])
def get_auth_create_unknown_locations(self):
return self.auth.get("create_unknown_locations", False)
def get_auth_show_utc_offset(self):
return self.auth.get("show_utc_offset", True)
def get_security_archive_not_delete(self):
return self.security.get("archive_not_delete", True)
def get_security_audit_read(self):
return self.security.get("audit_read", False)
def get_security_audit_write(self):
return self.security.get("audit_write", False)
def get_security_policy(self):
" Default is Simple Security Policy "
return self.security.get("policy", 1)
def get_security_strict_ownership(self):
"""
Ownership-rule for records without owner:
True = not owned by any user (strict ownership, default)
False = owned by any authenticated user
"""
return self.security.get("strict_ownership", True)
def get_security_map(self):
return self.security.get("map", False)
# -------------------------------------------------------------------------
# Base settings
def get_instance_name(self):
"""
Instance Name - for management scripts. e.g. prod or test
"""
return self.base.get("instance_name", "")
def get_system_name(self):
"""
System Name - for the UI & Messaging
"""
return self.base.get("system_name", current.T("Sahana Eden Humanitarian Management Platform"))
def get_system_name_short(self):
"""
System Name (Short Version) - for the UI & Messaging
"""
return self.base.get("system_name_short", "Sahana Eden")
def get_base_debug(self):
"""
Debug mode: Serve CSS/JS in separate uncompressed files
"""
return self.base.get("debug", False)
def get_base_migrate(self):
""" Whether to allow Web2Py to migrate the SQL database to the new structure """
return self.base.get("migrate", True)
def get_base_fake_migrate(self):
""" Whether to have Web2Py create the .table files to match the expected SQL database structure """
return self.base.get("fake_migrate", False)
def get_base_prepopulate(self):
""" Whether to prepopulate the database &, if so, which set of data to use for this """
return self.base.get("prepopulate", 1)
def get_base_guided_tour(self):
""" Whether the guided tours are enabled """
return self.base.get("guided_tour", False)
def get_base_public_url(self):
"""
The Public URL for the site - for use in email links, etc
"""
return self.base.get("public_url", "http://127.0.0.1:8000")
def get_base_cdn(self):
"""
Should we use CDNs (Content Distribution Networks) to serve some common CSS/JS?
"""
return self.base.get("cdn", False)
def get_base_session_memcache(self):
"""
Should we store sessions in a Memcache service to allow sharing
between multiple instances?
"""
return self.base.get("session_memcache", False)
def get_base_solr_url(self):
"""
URL to connect to solr server
"""
return self.base.get("solr_url", False)
def get_import_callback(self, tablename, callback):
"""
Lookup callback to use for imports in the following order:
- custom [create, update]_onxxxx
- default [create, update]_onxxxx
- custom onxxxx
- default onxxxx
NB: Currently only onaccept is actually used
"""
callbacks = self.base.get("import_callbacks", [])
if tablename in callbacks:
callbacks = callbacks[tablename]
if callback in callbacks:
return callbacks[callback]
get_config = current.s3db.get_config
default = get_config(tablename, callback)
if default:
return default
if callback[:2] != "on":
callback = callback[7:]
if callback in callbacks:
return callbacks[callback]
default = get_config(tablename, callback)
if default:
return default
# -------------------------------------------------------------------------
# Database settings
def get_database_type(self):
return self.database.get("db_type", "sqlite").lower()
def get_database_string(self):
db_type = self.database.get("db_type", "sqlite").lower()
pool_size = self.database.get("pool_size", 30)
if (db_type == "sqlite"):
db_string = "sqlite://storage.db"
elif (db_type == "mysql"):
db_string = "mysql://%s:%s@%s:%s/%s" % \
(self.database.get("username", "sahana"),
self.database.get("password", "password"),
self.database.get("host", "localhost"),
self.database.get("port", None) or "3306",
self.database.get("database", "sahana"))
elif (db_type == "postgres"):
db_string = "postgres://%s:%s@%s:%s/%s" % \
(self.database.get("username", "sahana"),
self.database.get("password", "password"),
self.database.get("host", "localhost"),
self.database.get("port", None) or "5432",
self.database.get("database", "sahana"))
else:
from gluon import HTTP
raise HTTP(501, body="Database type '%s' not recognised - please correct file models/000_config.py." % db_type)
return (db_string, pool_size)
# -------------------------------------------------------------------------
# Finance settings
# @ToDo: Make these customisable per Organisation
# => Move to a Table like hrm_course
def get_fin_currencies(self):
T = current.T
currencies = {
"EUR" :T("Euros"),
"GBP" :T("Great British Pounds"),
"USD" :T("United States Dollars"),
}
return self.fin.get("currencies", currencies)
def get_fin_currency_default(self):
return self.fin.get("currency_default", "USD") # Dollars
def get_fin_currency_writable(self):
return self.fin.get("currency_writable", True)
# -------------------------------------------------------------------------
# GIS (Map) Settings
#
def get_gis_api_bing(self):
""" API key for Bing """
return self.gis.get("api_bing", None)
def get_gis_api_google(self):
"""
API key for Google
- needed for Earth, MapMaker & GeoCoder
- defaults to localhost
"""
return self.gis.get("api_google", "ABQIAAAAgB-1pyZu7pKAZrMGv3nksRTpH3CbXHjuCVmaTc5MkkU4wO1RRhQWqp1VGwrG8yPE2KhLCPYhD7itFw")
def get_gis_api_yahoo(self):
"""
API key for Yahoo
- deprecated
"""
return self.gis.get("api_yahoo", None)
def get_gis_building_name(self):
" Display Building Name when selecting Locations "
return self.gis.get("building_name", True)
def get_gis_check_within_parent_boundaries(self):
"""
Whether location Lat/Lons should be within the boundaries of the parent
"""
return self.gis.get("check_within_parent_boundaries", True)
def get_gis_countries(self):
"""
Which country codes should be accessible to the location selector?
"""
return self.gis.get("countries", [])
def get_gis_display_l0(self):
return self.gis.get("display_L0", False)
def get_gis_display_l1(self):
return self.gis.get("display_L1", True)
def get_gis_duplicate_features(self):
"""
Display duplicate features either side of the International date line?
"""
return self.gis.get("duplicate_features", False)
def get_gis_edit_group(self):
" Edit Location Groups "
return self.gis.get("edit_GR", False)
def get_gis_geocode_imported_addresses(self):
" Should Addresses imported from CSV be passed to a Geocoder to try and automate Lat/Lon? "
return self.gis.get("geocode_imported_addresses", False)
def get_gis_geoserver_url(self):
return self.gis.get("geoserver_url", "")
def get_gis_geoserver_username(self):
return self.gis.get("geoserver_username", "admin")
def get_gis_geoserver_password(self):
return self.gis.get("geoserver_password", "")
def get_gis_latlon_selector(self):
" Display Lat/Lon form fields when selecting Locations "
return self.gis.get("latlon_selector", True)
def get_gis_layer_metadata(self):
" Use CMS to provide Metadata on Map Layers "
return self.has_module("cms") and self.gis.get("layer_metadata", False)
def get_gis_layer_properties(self):
" Display Layer Properties Tool above Map's Layer Tree "
return self.gis.get("layer_properties", True)
def get_gis_layer_tree_base(self):
" Display Base Layers folder in the Map's Layer Tree "
return self.gis.get("layer_tree_base", True)
def get_gis_layer_tree_overlays(self):
" Display Overlays folder in the Map's Layer Tree "
return self.gis.get("layer_tree_overlays", True)
def get_gis_layer_tree_expanded(self):
" Display folders in the Map's Layer Tree Open by default "
return self.gis.get("layer_tree_expanded", True)
def get_gis_layer_tree_radio(self):
" Use a radio button for custom folders in the Map's Layer Tree "
return self.gis.get("layer_tree_radio", False)
def get_gis_layers_label(self):
" Label for the Map's Layer Tree "
return self.gis.get("layers_label", "Layers")
def get_gis_map_height(self):
"""
Height of the Embedded Map
Change this if-required for your theme
NB API can override this in specific modules
"""
return self.gis.get("map_height", 600)
def get_gis_map_width(self):
"""
Width of the Embedded Map
Change this if-required for your theme
NB API can override this in specific modules
"""
return self.gis.get("map_width", 1000)
def get_gis_map_selector(self):
" Display a Map-based tool to select Locations "
return self.gis.get("map_selector", True)
def get_gis_marker_max_height(self):
return self.gis.get("marker_max_height", 35)
def get_gis_marker_max_width(self):
return self.gis.get("marker_max_width", 30)
def get_gis_max_features(self):
"""
The maximum number of features to return in a Map Layer
- more than this will prompt the user to zoom in to load the layer
Lower this number to get extra performance from an overloaded server.
"""
return self.gis.get("max_features", 1000)
def get_gis_legend(self):
"""
Should we display a Legend on the Map?
- set to True to show a GeoExt Legend (default)
- set to False to not show a Legend
- set to "float" to use a floating DIV
"""
return self.gis.get("legend", True)
def get_gis_menu(self):
"""
Should we display a menu of GIS configurations?
- set to False to not show the menu (default)
- set to the label to use for the menu to enable it
e.g. T("Events") or T("Regions")
"""
return self.gis.get("menu", False)
def get_gis_mouse_position(self):
"""
What style of Coordinates for the current Mouse Position
should be shown on the Map?
'normal', 'mgrs' or False
"""
return self.gis.get("mouse_position", "normal")
def get_gis_nav_controls(self):
"""
Should the Map Toolbar display Navigation Controls?
"""
return self.gis.get("nav_controls", True)
def get_gis_label_overlays(self):
"""
Label for the Map Overlays in the Layer Tree
"""
return self.gis.get("label_overlays", "Overlays")
def get_gis_overview(self):
"""
Should the Map display an Overview Map?
"""
return self.gis.get("overview", True)
def get_gis_permalink(self):
"""
Should the Map display a Permalink control?
"""
return self.gis.get("permalink", True)
def get_gis_poi_resources(self):
"""
List of resources (tablenames) to import/export as PoIs from Admin Locations
- KML & OpenStreetMap formats
"""
return self.gis.get("poi_resources",
["cr_shelter", "hms_hospital", "org_office"])
def get_gis_postcode_selector(self):
" Display Postcode form field when selecting Locations "
return self.gis.get("postcode_selector", True)
def get_gis_print_service(self):
"""
URL for a Print Service
"""
return self.gis.get("print_service", "")
def get_gis_simplify_tolerance(self):
"""
Default Tolerance for the Simplification of Polygons
- a lower value means less simplification, which is suitable for higher-resolution local activities
- a higher value is suitable for global views
"""
return self.gis.get("simplify_tolerance", 0.01)
def get_gis_scaleline(self):
"""
Should the Map display a ScaleLine control?
"""
return self.gis.get("scaleline", True)
def get_gis_spatialdb(self):
"""
Does the database have Spatial extensions?
"""
db_type = self.get_database_type()
if db_type != "postgres":
# Only Postgres supported currently
return False
else:
return self.gis.get("spatialdb", False)
def get_gis_toolbar(self):
"""
Should the main Map display a Toolbar?
"""
return self.gis.get("toolbar", True)
def get_gis_zoomcontrol(self):
"""
Should the Map display a Zoom control?
"""
return self.gis.get("zoomcontrol", True)
# -------------------------------------------------------------------------
# L10N Settings
def get_L10n_default_language(self):
return self.L10n.get("default_language", "en")
def get_L10n_display_toolbar(self):
return self.L10n.get("display_toolbar", True)
def get_L10n_languages(self):
return self.L10n.get("languages",
OrderedDict([
("ar", "العربية"),
("zh-cn", "中文 (简体)"),
("zh-tw", "中文 (繁體)"),
("en", "English"),
("fr", "Français"),
("de", "Deutsch"),
("el", "ελληνικά"),
("it", "Italiano"),
("ja", "日本語"),
("ko", "한국어"),
("pt", "Português"),
("pt-br", "Português (Brasil)"),
("ru", "русский"),
("es", "Español"),
("tl", "Tagalog"),
("ur", "اردو"),
("vi", "Tiếng Việt"),
]))
def get_L10n_languages_readonly(self):
return self.L10n.get("languages_readonly", True)
def get_L10n_religions(self):
"""
Religions used in Person Registry
@ToDo: find a better code
http://eden.sahanafoundation.org/ticket/594
"""
T = current.T
return self.L10n.get("religions", {
"none":T("none"),
"christian":T("Christian"),
"muslim":T("Muslim"),
"jewish":T("Jewish"),
"buddhist":T("Buddhist"),
"hindu":T("Hindu"),
"bahai":T("Bahai"),
"other":T("other")
})
def get_L10n_date_format(self):
return self.L10n.get("date_format", "%Y-%m-%d")
def get_L10n_time_format(self):
return self.L10n.get("time_format", "%H:%M")
def get_L10n_datetime_separator(self):
return self.L10n.get("datetime_separator", " ")
def get_L10n_datetime_format(self):
return "%s%s%s" % (self.get_L10n_date_format(),
self.get_L10n_datetime_separator(),
self.get_L10n_time_format()
)
def get_L10n_utc_offset(self):
return self.L10n.get("utc_offset", "UTC +0000")
def get_L10n_firstDOW(self):
return self.L10n.get("firstDOW", 1)
def get_L10n_lat_lon_format(self):
"""
This is used to format latitude and longitude fields when they are
displayed by eden. The format string may include the following
placeholders:
- %d -- Degress (integer)
- %m -- Minutes (integer)
- %s -- Seconds (double)
- %f -- Degrees in decimal (double)
"""
return self.L10n.get("lat_lon_display_format", "%f")
def get_L10n_default_country_code(self):
""" Default Telephone Country Code """
return self.L10n.get("default_country_code", 1)
def get_L10n_mandatory_lastname(self):
return self.L10n.get("mandatory_lastname", False)
def get_L10n_thousands_separator(self):
return self.L10n.get("thousands_separator", " ")
def get_L10n_thousands_grouping(self):
return self.L10n.get("thousands_grouping", 3)
def get_L10n_decimal_separator(self):
return self.L10n.get("decimal_separator", ",")
def get_L10n_translate_cms_series(self):
"""
Whether to translate CMS Series names
"""
return self.L10n.get("translate_cms_series", False)
def get_L10n_translate_gis_layer(self):
"""
Whether to translate Layer names
"""
return self.L10n.get("translate_gis_layer", False)
def get_L10n_translate_gis_location(self):
"""
Whether to translate Location names
"""
return self.L10n.get("translate_gis_location", False)
def get_L10n_pootle_url(self):
""" URL for Pootle server """
return self.L10n.get("pootle_url", "http://pootle.sahanafoundation.org/")
def get_L10n_pootle_username(self):
""" Username for Pootle server """
return self.L10n.get("pootle_username", False)
def get_L10n_pootle_password(self):
""" Password for Pootle server """
return self.L10n.get("pootle_password", False)
# -------------------------------------------------------------------------
# PDF settings
def get_paper_size(self):
return self.base.get("paper_size", "A4")
def get_pdf_logo(self):
return self.ui.get("pdf_logo", None)
# Optical Character Recognition (OCR)
def get_pdf_excluded_fields(self, resourcename):
excluded_fields_dict = {
"hms_hospital" : [
"hrm_human_resource",
],
"pr_group" : [
"pr_group_membership",
],
}
excluded_fields =\
excluded_fields_dict.get(resourcename, [])
return excluded_fields
# -------------------------------------------------------------------------
# UI Settings
@staticmethod
def default_formstyle(id, label, widget, comment, hidden=False):
"""
Provide the default Sahana Eden Form Style
Label above the Inputs:
http://uxmovement.com/design-articles/faster-with-top-aligned-labels
Things that need to be looked at for custom formstyles:
* subheadings (s3forms.py)
* S3AddPersonWidget (s3widgets.py)
* S3EmbedComponentWidget (s3widgets.py)
"""
row = []
if hidden:
_class = "hide"
else:
_class = ""
# Label on the 1st row
row.append(TR(TD(label, _class="w2p_fl"),
TD(""),
_id=id + "1",
_class=_class))
# Widget & Comment on the 2nd Row
row.append(TR(widget,
TD(comment, _class="w2p_fc"),
_id=id,
_class=_class))
return tuple(row)
def get_ui_formstyle(self):
return self.ui.get("formstyle", self.default_formstyle)
# -------------------------------------------------------------------------
def get_ui_auth_user_represent(self):
"""
Should the auth_user created_by/modified_by be represented by Name or Email?
- defaults to email
"""
return self.ui.get("auth_user_represent", "email")
def get_ui_confirm(self):
"""
For Delete actions
Workaround for this Bug in Selenium with FF4:
http://code.google.com/p/selenium/issues/detail?id=1604
"""
return self.ui.get("confirm", True)
def get_ui_crud_form(self, tablename):
"""
Get custom crud_forms for diffent tables
"""
return self.ui.get("crud_form_%s" % tablename, None)
def ui_customize(self, tablename, **attr):
"""
Customize a Controller
"""
customize = self.ui.get("customize_%s" % tablename)
if customize:
return customize(**attr)
else:
return attr
def get_ui_export_formats(self):
"""
Which export formats should we display?
- specify a list of export formats to restrict
"""
return self.ui.get("export_formats",
["have", "kml", "map", "pdf", "rss", "xls", "xml"])
def get_ui_hide_report_filter_options(self):
"""
Show report filter options form by default
"""
return self.ui.get("hide_report_filter_options", False)
def get_ui_hide_report_options(self):
"""
Hide report options form by default
"""
return self.ui.get("hide_report_options", True)
def get_ui_interim_save(self):
""" Render interim-save button in CRUD forms by default """
return self.ui.get("interim_save", False)
def get_ui_label_attachments(self):
"""
Label for attachments tab
"""
return current.T(self.ui.get("label_attachments", "Attachments"))
def get_ui_label_camp(self):
""" 'Camp' instead of 'Shelter'? """
return self.ui.get("camp", False)
def get_ui_label_cluster(self):
""" UN-style deployment? """
return self.ui.get("cluster", False)
def get_ui_label_mobile_phone(self):
"""
Label for the Mobile Phone field
e.g. 'Cell Phone'
"""
return current.T(self.ui.get("label_mobile_phone", "Mobile Phone"))
def get_ui_label_postcode(self):
"""
Label for the Postcode field
e.g. 'ZIP Code'
"""
return current.T(self.ui.get("label_postcode", "Postcode"))
def get_ui_label_read(self):
"""
Label for buttons in list views which lead to a Read-only 'Display' page
"""
return self.ui.get("read_label", "Open")
def get_ui_label_update(self):
"""
Label for buttons in list views which lead to an Editable 'Update' page
"""
return self.ui.get("update_label", "Open")
def get_ui_navigate_away_confirm(self):
return self.ui.get("navigate_away_confirm", True)
def get_ui_search_submit_button(self):
"""
Class for submit buttons in search views
"""
return self.ui.get("search_submit_button", "search-button")
def get_ui_social_buttons(self):
"""
Display social media Buttons in the footer?
- requires support in the Theme
"""
return self.ui.get("social_buttons", False)
def get_ui_summary(self):
"""
Default Summary Page Configuration (can also be
configured per-resource using s3db.configure)
@example:
settings.ui.summary = [
{
"name": "table", # the section name
"label": "Table", # the section label, will
# automatically be translated
"common": False, # show this section on all tabs
"translate": True, # turn automatic label translation on/off
"widgets": [ # list of widgets for this section
{
"method": "datatable", # widget method, either a
# name that resolves into
# a S3Method, or a callable
# to render the widget
"filterable": True, # Whether the widget can
# be filtered by the summary
# filter form
}
]
}
]
"""
return self.ui.get("summary", None)
def get_ui_filter_auto_submit(self):
"""
Time in milliseconds after the last filter option change to
automatically update the filter target(s), set to 0 to disable
"""
return self.ui.get("filter_auto_submit", 800)
def get_ui_report_auto_submit(self):
"""
Time in milliseconds after the last filter option change to
automatically update the filter target(s), set to 0 to disable
"""
return self.ui.get("report_auto_submit", 800)
# =========================================================================
# Messaging
# -------------------------------------------------------------------------
# Mail settings
def get_mail_server(self):
return self.mail.get("server", "127.0.0.1:25")
def get_mail_server_login(self):
return self.mail.get("login", False)
def get_mail_server_tls(self):
"""
Does the Mail Server use TLS?
- default Debian is False
- GMail is True
"""
return self.mail.get("tls", False)
def get_mail_sender(self):
"""
The From Address for all Outbound Emails
"""
return self.mail.get("sender", None)
def get_mail_approver(self):
"""
The default Address to send Requests for New Users to be Approved
OR
UUID of Role of users who should receive Requests for New Users to be Approved
- unless overridden by per-domain entries in auth_organsiation
"""
return self.mail.get("approver", "[email protected]")
def get_mail_limit(self):
"""
A daily limit to the number of messages which can be sent
"""
return self.mail.get("limit", None)
# -------------------------------------------------------------------------
# Parser
def get_msg_parser(self):
"""
Which template folder to use to load parser.py
"""
return self.msg.get("parser", "default")
# -------------------------------------------------------------------------
# Twitter
def get_msg_twitter_oauth_consumer_key(self):
return self.msg.get("twitter_oauth_consumer_key", "")
def get_msg_twitter_oauth_consumer_secret(self):
return self.msg.get("twitter_oauth_consumer_secret", "")
# -------------------------------------------------------------------------
# Notifications
def get_msg_notify_subject(self):
"""
Template for the subject line in update notifications.
Available placeholders:
$S = System Name (long)
$s = System Name (short)
$r = Resource Name
Use {} to separate the placeholder from immediately following
identifier characters (like: ${placeholder}text).
"""
return self.msg.get("notify_subject",
"$s %s: $r" % current.T("Update Notification"))
def get_msg_notify_email_format(self):
"""
The preferred email format for update notifications,
"text" or "html".
"""
return self.msg.get("notify_email_format", "text")
def get_msg_notify_renderer(self):
"""
Custom content renderer function for update notifications,
function()
"""
return self.msg.get("notify_renderer", None)
# -------------------------------------------------------------------------
# Outbox settings
def get_msg_max_send_retries(self):
"""
Maximum number of retries to send a message before
it is regarded as permanently failing; set to None
to retry forever.
"""
return self.msg.get("max_send_retries", 9)
# -------------------------------------------------------------------------
def get_search_max_results(self):
"""
The maximum number of results to return in an Autocomplete Search
- more than this will prompt the user to enter a more exact match
Lower this number to get extra performance from an overloaded server.
"""
return self.search.get("max_results", 200)
# -------------------------------------------------------------------------
# Save Search and Subscription
def get_search_save_widget(self):
"""
Enable the Saved Search widget
"""
return self.search.get("save_widget", True)
# -------------------------------------------------------------------------
# Filter Manager Widget
def get_search_filter_manager(self):
""" Enable the filter manager widget """
return self.search.get("filter_manager", True)
def get_search_filter_manager_allow_delete(self):
""" Allow deletion of saved filters """
return self.search.get("filter_manager_allow_delete", True)
def get_search_filter_manager_save(self):
""" Text for saved filter save-button """
return self.search.get("filter_manager_save", None)
def get_search_filter_manager_update(self):
""" Text for saved filter update-button """
return self.search.get("filter_manager_update", None)
def get_search_filter_manager_delete(self):
""" Text for saved filter delete-button """
return self.search.get("filter_manager_delete", None)
def get_search_filter_manager_load(self):
""" Text for saved filter load-button """
return self.search.get("filter_manager_load", None)
# =========================================================================
# Modules
# -------------------------------------------------------------------------
# CAP
def get_cap_identifier_prefix(self):
"""
Prefix to be prepended to identifiers of CAP alerts
"""
return self.cap.get("identifier_prefix", "")
def get_cap_identifier_suffix(self):
"""
Suffix to be appended to identifiers of CAP alerts
"""
return self.cap.get("identifier_suffix", "")
def get_cap_codes(self):
"""
Default codes for CAP alerts
should return a list of dicts:
[ {"key": "<ValueName>, "value": "<Value>",
"comment": "<Help string>", "mutable": True|False},
...]
"""
return self.cap.get("codes", [])
def get_cap_event_codes(self):
"""
Default alert codes for CAP info segments
should return a list of dicts:
[ {"key": "<ValueName>, "value": "<Value>",
"comment": "<Help string>", "mutable": True|False},
...]
"""
return self.cap.get("event_codes", [])
def get_cap_parameters(self):
"""
Default parameters for CAP info segments
should return a list of dicts:
[ {"key": "<ValueName>, "value": "<Value>",
"comment": "<Help string>", "mutable": True|False},
...]
"""
return self.cap.get("parameters", [])
def get_cap_geocodes(self):
"""
Default geocodes.
should return a list of dicts:
[ {"key": "<ValueName>, "value": "<Value>",
"comment": "<Help string>", "mutable": True|False},
...]
"""
return self.cap.get("geocodes", [])
def get_cap_base64(self):
"""
Should CAP resources be base64 encoded and embedded in the alert message?
"""
return self.cap.get("base64", False)
def get_cap_languages(self):
"""
Languages for CAP info segments.
This gets filled in the drop-down for selecting languages.
These values should conform to RFC 3066.
For a full list of languages and their codes, see:
http://www.i18nguy.com/unicode/language-identifiers.html
"""
return self.cap.get("languages",
OrderedDict([
("ar", "العربية"),
("en", "English"),
("fr", "Français"),
("pt", "Português"),
("ru", "русский"),
("es", "Español")
]))
def get_cap_priorities(self):
"""
Settings for CAP priorities
Should be an ordered dict of the format
OrderedDict([
("<value>, "<Translated title>", <urgency>, <severity>, <certainty>, <color>),
...
]) """
T = current.T
return self.cap.get("priorities", [
("Urgent", T("Urgent"), "Immediate", "Extreme", "Observed", "red"),
("High", T("High"), "Expected", "Severe", "Observed", "orange"),
("Low", T("Low"), "Expected", "Moderate", "Observed", "green")
])
# -------------------------------------------------------------------------
# Human Resource Management
def get_hrm_staff_label(self):
"""
Label for 'Staff'
e.g. 'Contacts'
"""
return current.T(self.hrm.get("staff_label", "Staff"))
def get_hrm_organisation_label(self):
"""
Label for Organisations in Human Resources
"""
return current.T(self.hrm.get("organisation_label", "Organization"))
def get_hrm_email_required(self):
"""
If set to True then Staff & Volunteers require an email address
"""
return self.hrm.get("email_required", True)
def get_hrm_org_required(self):
"""
If set to True then Staff & Volunteers require an Organisation
"""
return self.hrm.get("org_required", True)
def get_hrm_deletable(self):
"""
If set to True then HRM records are deletable rather than just being able to be marked as obsolete
"""
return self.hrm.get("deletable", True)
def get_hrm_filter_certificates(self):
"""
If set to True then Certificates are filtered by (Root) Organisation
& hence certificates from other Organisations cannot be added to an HR's profile (except by Admins)
"""
return self.hrm.get("filter_certificates", False)
def get_hrm_multiple_job_titles(self):
"""
If set to True then HRs can have multiple Job Titles
"""
return self.hrm.get("multi_job_titles", False)
def get_hrm_show_staff(self):
"""
If set to True then show 'Staff' options when HRM enabled
- needs a separate setting as vol requires hrm, but we may only wish to show Volunteers
"""
return self.hrm.get("show_staff", True)
def get_hrm_skill_types(self):
"""
If set to True then Skill Types are exposed to the UI
- each skill_type needs it's own set of competency levels
If set to False then Skill Types are hidden from the UI
- all skills use the same skill_type & hence the same set of competency levels
"""
return self.hrm.get("skill_types", False)
def get_hrm_staff_experience(self):
"""
Whether to use Experience for Staff &, if so, which table to use
- options are: False, "experience"
"""
return self.hrm.get("staff_experience", "experience")
def get_hrm_vol_experience(self):
"""
Whether to use Experience for Volunteers &, if so, which table to use
- options are: False, "experience", "programme" or "both"
"""
return self.hrm.get("vol_experience", "programme")
def get_hrm_show_organisation(self):
"""
Whether Human Resource representations should include the Organisation
"""
return self.hrm.get("show_organisation", False)
def get_hrm_teams(self):
"""
Whether Human Resources should use Teams
& what to call them
"""
return self.hrm.get("teams", "Team")
def get_hrm_use_awards(self):
"""
Whether Volunteers should use Awards
"""
return self.hrm.get("use_awards", True)
def get_hrm_use_certificates(self):
"""
Whether Human Resources should use Certificates
"""
return self.hrm.get("use_certificates", True)
def get_hrm_use_credentials(self):
"""
Whether Human Resources should use Credentials
"""
return self.hrm.get("use_credentials", True)
def get_hrm_use_description(self):
"""
Whether Human Resources should use Physical Description
"""
return self.hrm.get("use_description", True)
def get_hrm_use_education(self):
"""
Whether Human Resources should show Education
"""
return self.hrm.get("use_education", False)
def get_hrm_use_id(self):
"""
Whether Human Resources should use Staff ID
"""
return self.hrm.get("use_id", True)
def get_hrm_use_skills(self):
"""
Whether Human Resources should use Skills
"""
return self.hrm.get("use_skills", True)
def get_hrm_use_trainings(self):
"""
Whether Human Resources should use Trainings
"""
return self.hrm.get("use_trainings", True)
# -------------------------------------------------------------------------
# Inventory Management Settings
#
def get_inv_collapse_tabs(self):
return self.inv.get("collapse_tabs", True)
def get_inv_facility_label(self):
return self.inv.get("facility_label", current.T("Warehouse"))
def get_inv_direct_stock_edits(self):
"""
Can Stock levels be adjusted directly?
- defaults to False
"""
return self.inv.get("direct_stock_edits", False)
def get_inv_send_show_mode_of_transport(self):
"""
Show mode of transport on Sent Shipments
"""
return self.inv.get("show_mode_of_transport", False)
def get_inv_send_show_org(self):
"""
Show Organisation on Sent Shipments
"""
return self.inv.get("send_show_org", True)
def get_inv_send_show_time_in(self):
"""
Show Time In on Sent Shipments
"""
return self.inv.get("send_show_time_in", False)
def get_inv_stock_count(self):
"""
Call Stock Adjustments 'Stock Counts'
"""
return self.inv.get("stock_count", True)
def get_inv_track_pack_values(self):
"""
Whether or not Pack values are tracked
"""
return self.inv.get("track_pack_values", True)
def get_inv_item_status(self):
"""
Item Statuses which can also be Sent Shipment Types
"""
T = current.T
return self.inv.get("item_status", {
0: current.messages["NONE"],
1: T("Dump"),
2: T("Sale"),
3: T("Reject"),
4: T("Surplus")
})
def get_inv_shipment_name(self):
"""
Get the name of Shipments
- currently supported options are:
* shipment
* order
"""
return self.inv.get("shipment_name", "shipment")
def get_inv_shipment_types(self):
"""
Shipment types which are common to both Send & Receive
"""
return self.inv.get("shipment_types", {
0 : current.messages["NONE"],
11: current.T("Internal Shipment"),
})
def get_inv_send_types(self):
"""
Shipment types which are just for Send
"""
return self.inv.get("send_types", {
21: current.T("Distribution"),
})
def get_inv_send_type_default(self):
"""
Which Shipment type is default
"""
return self.inv.get("send_type_default", 0)
def get_inv_recv_types(self):
"""
Shipment types which are just for Receive
"""
T = current.T
return self.inv.get("recv_types", {
#31: T("Other Warehouse"), Same as Internal Shipment
32: T("Donation"),
#33: T("Foreign Donation"),
34: T("Purchase"),
})
def get_inv_send_form_name(self):
return self.inv.get("send_form_name", "Waybill")
def get_inv_send_ref_field_name(self):
return self.inv.get("send_ref_field_name", "Waybill Number")
def get_inv_send_shortname(self):
return self.inv.get("send_shortname", "WB")
def get_inv_recv_form_name(self):
return self.inv.get("recv_form_name", "Goods Received Note")
def get_inv_recv_shortname(self):
return self.inv.get("recv_shortname", "GRN")
# -------------------------------------------------------------------------
# IRS
def get_irs_vehicle(self):
"""
Use Vehicles to respond to Incident Reports
"""
return self.irs.get("vehicle", False)
# -------------------------------------------------------------------------
# Organisation
def get_org_autocomplete(self):
"""
Whether organisation_id fields should use an Autocomplete instead of a dropdown
"""
return self.org.get("autocomplete", False)
def get_org_branches(self):
"""
Whether to support Organisation branches or not
"""
return self.org.get("branches", False)
def get_org_regions(self):
"""
Whether to support Organisation regions or not
"""
return self.org.get("regions", False)
def get_org_site_code_len(self):
"""
Length of auto-generated Codes for Facilities (org_site)
"""
return self.org.get("site_code_len", 10)
def get_org_site_label(self):
"""
Label for site_id fields
"""
return current.T(self.org.get("site_label", "Facility"))
def get_org_site_inv_req_tabs(self):
"""
Whether Sites should have Tabs for Inv/Req
"""
return self.org.get("site_inv_req_tabs", True)
def get_org_site_autocomplete(self):
"""
Whether site_id fields should use an Autocomplete instead of a dropdown
"""
return self.org.get("site_autocomplete", False)
def get_org_site_autocomplete_fields(self):
"""
Which extra fields should be returned in S3SiteAutocompleteWidget
"""
return self.org.get("site_autocomplete_fields", ["instance_type"])
def get_org_site_address_autocomplete(self):
"""
Whether site_id Autocomplete fields should search Address fields as well as name
"""
return self.org.get("site_address_autocomplete", False)
def get_org_site_last_contacted(self):
"""
Whether to display the last_contacted field for a Site
"""
return self.org.get("site_last_contacted", False)
def get_org_summary(self):
"""
Whether to use Summary fields for Organisation/Office:
# National/International staff
"""
return self.org.get("summary", False)
def set_org_dependent_field(self,
tablename=None,
fieldname=None,
enable_field=True):
"""
Enables/Disables optional fields according to a user's Organisation
- must specify either field or tablename/fieldname
(e.g. for virtual fields)
"""
enabled = False
dependent_fields = self.org.get("dependent_fields", None)
if dependent_fields:
org_name_list = dependent_fields.get("%s.%s" % (tablename,
fieldname),
None)
if org_name_list:
auth = current.auth
if auth.s3_has_role(auth.get_system_roles().ADMIN):
# Admins see all fields unless disabled for all orgs in this deployment
enabled = True
else:
s3db = current.s3db
otable = s3db.org_organisation
root_org_id = auth.root_org()
root_org = current.db(otable.id == root_org_id).select(otable.name,
limitby=(0, 1),
cache=s3db.cache
).first()
if root_org:
enabled = root_org.name in org_name_list
if enable_field:
field = current.s3db[tablename][fieldname]
field.readable = enabled
field.writable = enabled
return enabled
# -------------------------------------------------------------------------
# Persons
def get_pr_age_group(self, age):
"""
Function to provide the age group for an age
"""
fn = self.pr.get("age_group", None)
if fn:
group = fn(age)
else:
# Default
if age < 18 :
group = "-17" # "< 18"/" < 18" don't sort correctly
elif age < 25 :
group = "18-24"
elif age < 40:
group = "25-39"
elif age < 60:
group = "40-59"
else:
group = "60+"
return group
def get_pr_request_dob(self):
""" Include Date of Birth in the AddPersonWidget[2] """
return self.pr.get("request_dob", True)
def get_pr_request_gender(self):
""" Include Gender in the AddPersonWidget[2] """
return self.pr.get("request_gender", True)
def get_pr_request_home_phone(self):
""" Include Home Phone in the AddPersonWidget2 """
return self.pr.get("request_home_phone", False)
def get_pr_select_existing(self):
"""
Whether the AddPersonWidget allows selecting existing PRs
- set to True if Persons can be found in multiple contexts
- set to False if just a single context
"""
return self.pr.get("select_existing", True)
def get_pr_import_update_requires_email(self):
"""
During imports, records are only updated if the import
item contains a (matching) email address
"""
return self.pr.get("import_update_requires_email", True)
# -------------------------------------------------------------------------
# Proc
def get_proc_form_name(self):
return self.proc.get("form_name", "Purchase Order")
def get_proc_shortname(self):
return self.proc.get("form_name", "PO")
# -------------------------------------------------------------------------
# Projects
def get_project_mode_3w(self):
"""
Enable 3W mode in the projects module
"""
return self.project.get("mode_3w", False)
def get_project_mode_task(self):
"""
Enable Tasks mode in the projects module
"""
return self.project.get("mode_task", False)
def get_project_mode_drr(self):
"""
Enable DRR extensions in the projects module
"""
return self.project.get("mode_drr", False)
def get_project_activities(self):
"""
Use Activities in Projects
"""
return self.project.get("activities", False)
def get_project_codes(self):
"""
Use Codes in Projects
"""
return self.project.get("codes", False)
def get_project_community(self):
"""
Label project_location as 'Community'
"""
return self.project.get("community", False)
#def get_project_locations_from_countries(self):
# """
# Create a project_location for each country that a Project is
# implemented in
# """
# return self.project.get("locations_from_countries", False)
def get_project_milestones(self):
"""
Use Milestones in Projects
"""
return self.project.get("milestones", False)
def get_project_sectors(self):
"""
Use Sectors in Projects
"""
return self.project.get("sectors", True)
def get_project_theme_percentages(self):
"""
Use Theme Percentages in Projects
"""
return self.project.get("theme_percentages", False)
def get_project_multiple_budgets(self):
"""
Use Multiple Budgets in Projects
"""
return self.project.get("multiple_budgets", False)
def get_project_multiple_organisations(self):
"""
Use Multiple Organisations in Projects
"""
return self.project.get("multiple_organisations", False)
def get_project_organisation_roles(self):
T = current.T
return self.project.get("organisation_roles", {
1: T("Lead Implementer"), # T("Host National Society")
2: T("Partner"), # T("Partner National Society")
3: T("Donor"),
#4: T("Customer"), # T("Beneficiary")?
#5: T("Supplier") # T("Beneficiary")?
})
def get_project_organisation_lead_role(self):
return self.project.get("organisation_lead_role", 1)
# -------------------------------------------------------------------------
# Request Settings
def get_req_req_type(self):
"""
The Types of Request which can be made.
Select one or more from:
* People
* Stock
* Other
tbc: Assets, Shelter, Food
"""
return self.req.get("req_type", ["Stock", "People", "Other"])
def get_req_type_inv_label(self):
return current.T(self.req.get("type_inv_label", "Warehouse Stock"))
def get_req_type_hrm_label(self):
return current.T(self.req.get("type_hrm_label", "People"))
def get_req_requester_label(self):
return current.T(self.req.get("requester_label", "Requester"))
def get_req_requester_optional(self):
return self.req.get("requester_optional", False)
def get_req_requester_is_author(self):
"""
Whether the User Account logging the Request is normally the Requester
"""
return self.req.get("requester_is_author", True)
def get_req_requester_from_site(self):
"""
Whether the Requester has to be a staff of the site making the Request
"""
return self.req.get("requester_from_site", False)
def get_req_requester_to_site(self):
"""
Whether to set the Requester as being an HR for the Site if no HR record yet & as Site contact if none yet exists
"""
return self.req.get("requester_to_site", False)
def get_req_date_writable(self):
""" Whether Request Date should be manually editable """
return self.req.get("date_writable", True)
def get_req_status_writable(self):
""" Whether Request Status should be manually editable """
return self.req.get("status_writable", True)
def get_req_item_quantities_writable(self):
""" Whether Item Quantities should be manually editable """
return self.req.get("item_quantities_writable", False)
def get_req_skill_quantities_writable(self):
""" Whether People Quantities should be manually editable """
return self.req.get("skill_quantities_writable", False)
def get_req_multiple_req_items(self):
"""
Can a Request have multiple line items?
- e.g. ICS says that each request should be just for items of a single Type
"""
return self.req.get("multiple_req_items", True)
def get_req_show_quantity_transit(self):
return self.req.get("show_quantity_transit", True)
def get_req_inline_forms(self):
"""
Whether Requests module should use inline forms for Items
"""
return self.req.get("inline_forms", True)
def get_req_prompt_match(self):
"""
Whether a Requester is prompted to match each line item in an Item request
"""
return self.req.get("prompt_match", True)
def get_req_summary(self):
"""
Whether to use Summary Needs for Sites (Office/Facility currently):
"""
return self.req.get("summary", False)
def get_req_use_commit(self):
"""
Whether there is a Commit step in Requests Management
"""
return self.req.get("use_commit", True)
def get_req_commit_value(self):
"""
Whether Donations should have a Value field
"""
return self.req.get("commit_value", False)
def get_req_commit_without_request(self):
"""
Whether to allow Donations to be made without a matching Request
"""
return self.req.get("commit_without_request", False)
def get_req_committer_is_author(self):
""" Whether the User Account logging the Commitment is normally the Committer """
return self.req.get("committer_is_author", True)
def get_req_ask_security(self):
"""
Should Requests ask whether Security is required?
"""
return self.req.get("ask_security", False)
def get_req_ask_transport(self):
"""
Should Requests ask whether Transportation is required?
"""
return self.req.get("ask_transport", False)
def get_req_items_ask_purpose(self):
"""
Should Requests for Items ask for Purpose?
"""
return self.req.get("items_ask_purpose", True)
def get_req_req_crud_strings(self, type = None):
return self.req.get("req_crud_strings") and \
self.req.req_crud_strings.get(type, None)
def get_req_use_req_number(self):
return self.req.get("use_req_number", True)
def get_req_generate_req_number(self):
return self.req.get("generate_req_number", True)
def get_req_form_name(self):
return self.req.get("req_form_name", "Requisition Form")
def get_req_shortname(self):
return self.req.get("req_shortname", "REQ")
def get_req_restrict_on_complete(self):
"""
To restrict adding new commits to the Completed commits.
"""
return self.req.get("req_restrict_on_complete", False)
# -------------------------------------------------------------------------
# Supply
def get_supply_catalog_default(self):
return self.inv.get("catalog_default", "Default")
def get_supply_use_alt_name(self):
return self.supply.get("use_alt_name", True)
# -------------------------------------------------------------------------
# Hospital Registry
def get_hms_track_ctc(self):
return self.hms.get("track_ctc", False)
def get_hms_activity_reports(self):
return self.hms.get("activity_reports", False)
# -------------------------------------------------------------------------
# Active modules list
def has_module(self, module_name):
if not self.modules:
# Provide a minimal list of core modules
_modules = [
"default", # Default
"admin", # Admin
"gis", # GIS
"pr", # Person Registry
"org" # Organization Registry
]
else:
_modules = self.modules
return module_name in _modules
# END =========================================================================
| mit | 6,573,373,098,371,668,000 | 35.155325 | 209 | 0.538139 | false |
jimi-c/ansible | lib/ansible/modules/cloud/ovirt/ovirt_scheduling_policies_facts.py | 2 | 4085 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_scheduling_policies_facts
short_description: Retrieve facts about one or more oVirt scheduling policies
author: "Ondra Machacek (@machacekondra)"
version_added: "2.4"
description:
- "Retrieve facts about one or more oVirt scheduling policies."
notes:
- "This module creates a new top-level C(ovirt_scheduling_policies) fact,
which contains a list of scheduling policies."
options:
id:
description:
- "ID of the scheduling policy."
required: true
name:
description:
- "Name of the scheduling policy, can be used as glob expression."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all scheduling policies with name InClusterUpgrade:
- ovirt_scheduling_policies_facts:
name: InClusterUpgrade
- debug:
var: ovirt_scheduling_policies
'''
RETURN = '''
ovirt_scheduling_policies:
description: "List of dictionaries describing the scheduling policies.
Scheduling policies attributes are mapped to dictionary keys,
all scheduling policies attributes can be found at following
url: https://ovirt.example.com/ovirt-engine/api/model#types/scheduling_policy."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
id=dict(default=None),
name=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
system_service = connection.system_service()
sched_policies_service = system_service.scheduling_policies_service()
if module.params['name']:
sched_policies = [
e for e in sched_policies_service.list()
if fnmatch.fnmatch(e.name, module.params['name'])
]
elif module.params['id']:
sched_policies = [
sched_policies_service.service(module.params['id']).get()
]
else:
sched_policies = sched_policies_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_scheduling_policies=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in sched_policies
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 | -6,534,769,872,052,733,000 | 31.165354 | 97 | 0.637209 | false |
nikhilprathapani/python-for-android | python3-alpha/python3-src/Lib/distutils/tests/test_bdist_wininst.py | 53 | 1038 | """Tests for distutils.command.bdist_wininst."""
import unittest
from test.support import run_unittest
from distutils.command.bdist_wininst import bdist_wininst
from distutils.tests import support
class BuildWinInstTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_get_exe_bytes(self):
# issue5731: command was broken on non-windows platforms
# this test makes sure it works now for every platform
# let's create a command
pkg_pth, dist = self.create_dist()
cmd = bdist_wininst(dist)
cmd.ensure_finalized()
# let's run the code that finds the right wininst*.exe file
# and make sure it finds it and returns its content
# no matter what platform we have
exe_file = cmd.get_exe_bytes()
self.assertTrue(len(exe_file) > 10)
def test_suite():
return unittest.makeSuite(BuildWinInstTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
| apache-2.0 | 7,292,239,186,281,860,000 | 32.483871 | 67 | 0.655106 | false |
shashank971/edx-platform | common/djangoapps/student/management/commands/assigngroups.py | 170 | 3059 | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from student.models import UserTestGroup
import random
import sys
import datetime
from textwrap import dedent
import json
from pytz import UTC
def group_from_value(groups, v):
''' Given group: (('a',0.3),('b',0.4),('c',0.3)) And random value
in [0,1], return the associated group (in the above case, return
'a' if v<0.3, 'b' if 0.3<=v<0.7, and 'c' if v>0.7
'''
sum = 0
for (g, p) in groups:
sum = sum + p
if sum > v:
return g
return g # For round-off errors
class Command(BaseCommand):
help = dedent("""\
Assign users to test groups. Takes a list of groups:
a:0.3,b:0.4,c:0.3 file.txt "Testing something"
Will assign each user to group a, b, or c with
probability 0.3, 0.4, 0.3. Probabilities must
add up to 1.
Will log what happened to file.txt.
""")
def handle(self, *args, **options):
if len(args) != 3:
print "Invalid number of options"
sys.exit(-1)
# Extract groups from string
group_strs = [x.split(':') for x in args[0].split(',')]
groups = [(group, float(value)) for group, value in group_strs]
print "Groups", groups
## Confirm group probabilities add up to 1
total = sum(zip(*groups)[1])
print "Total:", total
if abs(total - 1) > 0.01:
print "Total not 1"
sys.exit(-1)
## Confirm groups don't already exist
for group in dict(groups):
if UserTestGroup.objects.filter(name=group).count() != 0:
print group, "already exists!"
sys.exit(-1)
group_objects = {}
f = open(args[1], "a+")
## Create groups
for group in dict(groups):
utg = UserTestGroup()
utg.name = group
utg.description = json.dumps({"description": args[2]},
{"time": datetime.datetime.now(UTC).isoformat()})
group_objects[group] = utg
group_objects[group].save()
## Assign groups
users = list(User.objects.all())
count = 0
for user in users:
if count % 1000 == 0:
print count
count = count + 1
v = random.uniform(0, 1)
group = group_from_value(groups, v)
group_objects[group].users.add(user)
f.write(u"Assigned user {name} ({id}) to {group}\n".format(
name=user.username,
id=user.id,
group=group
).encode('utf-8'))
## Save groups
for group in group_objects:
group_objects[group].save()
f.close()
# python manage.py assigngroups summary_test:0.3,skip_summary_test:0.7 log.txt "Do previews of future materials help?"
# python manage.py assigngroups skip_capacitor:0.3,capacitor:0.7 log.txt "Do we show capacitor in linearity tutorial?"
| agpl-3.0 | -1,424,312,265,535,190,500 | 30.536082 | 118 | 0.55541 | false |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/db/models/sql/compiler.py | 76 | 14014 | import datetime
from django.conf import settings
from django.db.backends.util import truncate_name, typecast_date, typecast_timestamp
from django.db.models.sql import compiler
from django.db.models.sql.constants import MULTI
from django.utils import six
from django.utils.six.moves import zip, zip_longest
from django.utils import timezone
SQLCompiler = compiler.SQLCompiler
class GeoSQLCompiler(compiler.SQLCompiler):
def get_columns(self, with_aliases=False):
"""
Return the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguitity with nested queries.
This routine is overridden from Query to handle customized selection of
geometry columns.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (self.get_extra_select_format(alias) % col[0], qn2(alias))
for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
# This loop customized for GeoQuery.
for col, field in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = self.get_field_select(field, alias, column)
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = col.as_sql(qn, self.connection)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, aggregate in self.query.aggregate_select.items():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
# This loop customized for GeoQuery.
for (table, col), field in self.query.related_select_cols:
r = self.get_field_select(field, table, col)
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
This routine is overridden from Query to handle customized selection of
geometry columns.
"""
result = []
if opts is None:
opts = self.query.get_meta()
aliases = set()
only_load = self.deferred_to_columns()
seen = self.query.included_inherited_models.copy()
if start_alias:
seen[None] = start_alias
for field, model in opts.get_concrete_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias, seen)
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field))
aliases.add(alias)
continue
# This part of the function is customized for GeoQuery. We
# see if there was any custom selection specified in the
# dictionary, and set up the selection format appropriately.
field_sel = self.get_field_select(field, alias)
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (field_sel, c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = field_sel
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def resolve_columns(self, row, fields=()):
"""
This routine is necessary so that distances and geometries returned
from extra selection SQL get resolved appropriately into Python
objects.
"""
values = []
aliases = list(self.query.extra_select)
# Have to set a starting row number offset that is used for
# determining the correct starting row index -- needed for
# doing pagination with Oracle.
rn_offset = 0
if self.connection.ops.oracle:
if self.query.high_mark is not None or self.query.low_mark: rn_offset = 1
index_start = rn_offset + len(aliases)
# Converting any extra selection values (e.g., geometries and
# distance objects added by GeoQuerySet methods).
values = [self.query.convert_values(v,
self.query.extra_select_fields.get(a, None),
self.connection)
for v, a in zip(row[rn_offset:index_start], aliases)]
if self.connection.ops.oracle or getattr(self.query, 'geo_values', False):
# We resolve the rest of the columns if we're on Oracle or if
# the `geo_values` attribute is defined.
for value, field in zip_longest(row[index_start:], fields):
values.append(self.query.convert_values(value, field, self.connection))
else:
values.extend(row[index_start:])
return tuple(values)
#### Routines unique to GeoQuery ####
def get_extra_select_format(self, alias):
sel_fmt = '%s'
if hasattr(self.query, 'custom_select') and alias in self.query.custom_select:
sel_fmt = sel_fmt % self.query.custom_select[alias]
return sel_fmt
def get_field_select(self, field, alias=None, column=None):
"""
Returns the SELECT SQL string for the given field. Figures out
if any custom selection SQL is needed for the column The `alias`
keyword may be used to manually specify the database table where
the column exists, if not in the model associated with this
`GeoQuery`. Similarly, `column` may be used to specify the exact
column name, rather than using the `column` attribute on `field`.
"""
sel_fmt = self.get_select_format(field)
if field in self.query.custom_select:
field_sel = sel_fmt % self.query.custom_select[field]
else:
field_sel = sel_fmt % self._field_column(field, alias, column)
return field_sel
def get_select_format(self, fld):
"""
Returns the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKT. For all
other fields a simple '%s' format string is returned.
"""
if self.connection.ops.select and hasattr(fld, 'geom_type'):
# This allows operations to be done on fields in the SELECT,
# overriding their values -- used by the Oracle and MySQL
# spatial backends to get database values as WKT, and by the
# `transform` method.
sel_fmt = self.connection.ops.select
# Because WKT doesn't contain spatial reference information,
# the SRID is prefixed to the returned WKT to ensure that the
# transformed geometries have an SRID different than that of the
# field -- this is only used by `transform` for Oracle and
# SpatiaLite backends.
if self.query.transformed_srid and ( self.connection.ops.oracle or
self.connection.ops.spatialite ):
sel_fmt = "'SRID=%d;'||%s" % (self.query.transformed_srid, sel_fmt)
else:
sel_fmt = '%s'
return sel_fmt
# Private API utilities, subject to change.
def _field_column(self, field, table_alias=None, column=None):
"""
Helper function that returns the database column for the given field.
The table and column are returned (quoted) in the proper format, e.g.,
`"geoapp_city"."point"`. If `table_alias` is not specified, the
database table associated with the model of this `GeoQuery` will be
used. If `column` is specified, it will be used instead of the value
in `field.column`.
"""
if table_alias is None: table_alias = self.query.get_meta().db_table
return "%s.%s" % (self.quote_name_unless_alias(table_alias),
self.connection.ops.quote_name(column or field.column))
class SQLInsertCompiler(compiler.SQLInsertCompiler, GeoSQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, GeoSQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, GeoSQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, GeoSQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, GeoSQLCompiler):
"""
This is overridden for GeoDjango to properly cast date columns, since
`GeoQuery.resolve_columns` is used for spatial values.
See #14648, #16757.
"""
def results_iter(self):
if self.connection.ops.oracle:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if self.connection.ops.oracle:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_date(str(date))
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(compiler.SQLDateTimeCompiler, GeoSQLCompiler):
"""
This is overridden for GeoDjango to properly cast date columns, since
`GeoQuery.resolve_columns` is used for spatial values.
See #14648, #16757.
"""
def results_iter(self):
if self.connection.ops.oracle:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = row[offset]
if self.connection.ops.oracle:
datetime = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
datetime = typecast_timestamp(str(datetime))
# Datetimes are artifically returned in UTC on databases that
# don't support time zone. Restore the zone used in the query.
if settings.USE_TZ:
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
| apache-2.0 | -700,933,559,621,958,700 | 43.773163 | 96 | 0.588412 | false |
erkrishna9/odoo | addons/l10n_es/__openerp__.py | 314 | 2772 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2008-2010 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <[email protected]>
# Copyright (c) 2012-2013, Grupo OPENTIA (<http://opentia.com>) Registered EU Trademark.
# Dpto. Consultoría <[email protected]>
# Copyright (c) 2013 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro Manuel Baeza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Spanish Charts of Accounts (PGCE 2008)",
"version" : "4.0",
"author" : "Spanish Localization Team",
'website' : 'https://launchpad.net/openerp-spain',
"category" : "Localization/Account Charts",
"description": """
Spanish charts of accounts (PGCE 2008).
========================================
* Defines the following chart of account templates:
* Spanish general chart of accounts 2008
* Spanish general chart of accounts 2008 for small and medium companies
* Spanish general chart of accounts 2008 for associations
* Defines templates for sale and purchase VAT
* Defines tax code templates
* Defines fiscal positions for spanish fiscal legislation
""",
"license" : "AGPL-3",
"depends" : ["account", "base_vat", "base_iban"],
"data" : [
"account_type.xml",
"account_chart_template.xml",
"account_account_common.xml",
"account_account_full.xml",
"account_account_pymes.xml",
"account_account_assoc.xml",
"tax_codes_common.xml",
"taxes_common.xml",
"fiscal_templates_common.xml",
"account_chart_template_post.xml",
"l10n_es_wizard.xml",
],
"demo" : [],
'auto_install': False,
"installable": True,
'images': ['images/config_chart_l10n_es.png', 'images/l10n_es_chart.png'],
}
| agpl-3.0 | -2,250,878,701,145,068,500 | 42.296875 | 91 | 0.604836 | false |
CoderSong2015/utFordataType | googletest/googletest/test/gtest_test_utils.py | 20 | 10824 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
sys.stdout.write(message)
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file object for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| apache-2.0 | 6,009,014,981,150,480,000 | 32.825 | 79 | 0.67415 | false |
lewischeng-ms/pox | pox/lib/socketcapture.py | 24 | 5511 | # Copyright 2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
from pox.lib.addresses import *
import pox.lib.packet as pkt
from struct import pack
import time
from struct import pack
import time
class SocketWedge (object):
def __init__ (self, socket):
self._socket = socket
def send (self, string, *args, **kw):
r = self._socket.send(string, *args, **kw)
self._send_out(string, r)
return r
def recv (self, bufsize, *args, **kw):
r = self._socket.recv(bufsize, *args, **kw)
self._recv_out(r)
return r
def __getattr__ (self, n):
return getattr(self._socket, n)
class PCapWriter (object):
def __init__ (self, outstream, socket = None, flush = False,
local_addrs = (None,None,None),
remote_addrs = (None,None,None)):
"""
outstream is the stream to write the PCAP trace to.
Ethernet addresses have to be faked, and it can be convenient to
fake IP and TCP addresses as well. Thus, you can specify local_addrs
or remote_addrs. These are tuples of (EthAddr, IPAddr, TCPPort).
Any item that is None gets a default value.
"""
self._out = outstream
self._flush = flush
if socket is not None:
remote = socket.getpeername()
local = socket.getsockname()
else:
remote = ("1.1.1.1",1)
local = ("0.0.0.0",0)
def create_packet (e1,e2,i1,i2,t1,t2):
e = pkt.ethernet(
src = e1,
dst = e2,
type = pkt.ethernet.IP_TYPE)
i = pkt.ipv4(
srcip = i1,
dstip = i2,
protocol = pkt.ipv4.TCP_PROTOCOL)
t = pkt.tcp(
srcport = t1,
dstport = t2,
off = 5,
win = 1)
t.ACK = True
i.payload = t
e.payload = i
return e
self._c_to_s = create_packet(
local_addrs[0] or EthAddr("\x02" + "\x00" * 5),
remote_addrs[0] or EthAddr("\x02" + "\x11" * 5),
local_addrs[1] or IPAddr(local[0]),
remote_addrs[1] or IPAddr(remote[0]),
local_addrs[2] or local[1],
remote_addrs[2] or remote[1],
)
self._s_to_c = create_packet(
remote_addrs[0] or EthAddr("\x02" + "\x11" * 5),
local_addrs[0] or EthAddr("\x02" + "\x00" * 5),
remote_addrs[1] or IPAddr(remote[0]),
local_addrs[1] or IPAddr(local[0]),
remote_addrs[2] or remote[1],
local_addrs[2] or local[1],
)
outstream.write(pack("IHHiIII",
0xa1b2c3d4, # Magic
2,4, # Version
time.timezone, # TZ offset
0, # Accuracy of timestamps (apparently 0 is OK)
0x7fffFFff, # Snaplen
1 # Ethernet
))
def write (self, outgoing, buf):
if len(buf) == 0: return
e = self._c_to_s if outgoing else self._s_to_c
e2 = self._c_to_s if not outgoing else self._s_to_c
l = len(buf)
e.payload.payload.payload = buf
buf = e.pack()
t = time.time()
ut = t - int(t)
t = int(t)
ut = int(ut * 1000000)
self._out.write(pack("IIII",
t,ut, # Timestamp
len(buf), # Saved size
len(buf), # Original size
))
self._out.write(buf)
if self._flush: self._out.flush()
e.next.next.seq += l
e2.next.next.ack += l
class CaptureSocket (SocketWedge):
"""
Wraps a TCP socket and writes a faked PCAP format trace
"""
def __init__ (self, socket, outstream, close = True,
local_addrs = (None,None,None),
remote_addrs = (None,None,None)):
"""
socket is the socket to be wrapped.
outstream is the stream to write the PCAP trace to.
Ethernet addresses have to be faked, and it can be convenient to
fake IP and TCP addresses as well. Thus, you can specify local_addrs
or remote_addrs. These are tuples of (EthAddr, IPAddr, TCPPort).
Any item that is None gets a default value.
"""
super(CaptureSocket, self).__init__(socket)
self._close = close
self._writer = PCapWriter(outstream, socket=socket,
local_addrs=local_addrs,
remote_addrs=remote_addrs)
def _recv_out (self, buf):
try:
self._writer.write(False, buf)
except Exception:
pass
def _send_out (self, buf, r):
try:
self._writer.write(True, buf[:r])
except Exception:
pass
def close (self, *args, **kw):
if self._close:
try:
self._writer._out.close()
except Exception:
pass
return self._socket.close(*args, **kw)
if __name__ == "__main__":
"""
Test with:
nc -v -v -l 9933
"""
import socket
sock = socket.create_connection(("127.0.0.1",9933))
s = CaptureSocket(sock, file("test.pcap", "w"))
while True:
d = s.recv(1024)
d = d.upper()
import sys
import time
import random
time.sleep(random.random() * 1.5)
sys.stdout.write(d)
s.send(d)
| gpl-3.0 | -8,264,924,839,194,660,000 | 26.974619 | 73 | 0.587008 | false |
emilio/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/src/_pytest/pytester.py | 32 | 42622 | """(disabled by default) support for testing pytest and pytest plugins."""
from __future__ import absolute_import, division, print_function
import codecs
import gc
import os
import platform
import re
import subprocess
import six
import sys
import time
import traceback
from fnmatch import fnmatch
from weakref import WeakKeyDictionary
from _pytest.capture import MultiCapture, SysCapture
from _pytest._code import Source
import py
import pytest
from _pytest.main import Session, EXIT_OK
from _pytest.assertion.rewrite import AssertionRewritingHook
PYTEST_FULLPATH = os.path.abspath(pytest.__file__.rstrip("oc")).replace(
"$py.class", ".py"
)
IGNORE_PAM = [ # filenames added when obtaining details about the current user
u"/var/lib/sss/mc/passwd"
]
def pytest_addoption(parser):
parser.addoption(
"--lsof",
action="store_true",
dest="lsof",
default=False,
help=("run FD checks if lsof is available"),
)
parser.addoption(
"--runpytest",
default="inprocess",
dest="runpytest",
choices=("inprocess", "subprocess"),
help=(
"run pytest sub runs in tests using an 'inprocess' "
"or 'subprocess' (python -m main) method"
),
)
def pytest_configure(config):
if config.getvalue("lsof"):
checker = LsofFdLeakChecker()
if checker.matching_platform():
config.pluginmanager.register(checker)
class LsofFdLeakChecker(object):
def get_open_files(self):
out = self._exec_lsof()
open_files = self._parse_lsof_output(out)
return open_files
def _exec_lsof(self):
pid = os.getpid()
return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
def _parse_lsof_output(self, out):
def isopen(line):
return line.startswith("f") and (
"deleted" not in line
and "mem" not in line
and "txt" not in line
and "cwd" not in line
)
open_files = []
for line in out.split("\n"):
if isopen(line):
fields = line.split("\0")
fd = fields[0][1:]
filename = fields[1][1:]
if filename in IGNORE_PAM:
continue
if filename.startswith("/"):
open_files.append((fd, filename))
return open_files
def matching_platform(self):
try:
py.process.cmdexec("lsof -v")
except (py.process.cmdexec.Error, UnicodeDecodeError):
# cmdexec may raise UnicodeDecodeError on Windows systems with
# locale other than English:
# https://bitbucket.org/pytest-dev/py/issues/66
return False
else:
return True
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_protocol(self, item):
lines1 = self.get_open_files()
yield
if hasattr(sys, "pypy_version_info"):
gc.collect()
lines2 = self.get_open_files()
new_fds = {t[0] for t in lines2} - {t[0] for t in lines1}
leaked_files = [t for t in lines2 if t[0] in new_fds]
if leaked_files:
error = []
error.append("***** %s FD leakage detected" % len(leaked_files))
error.extend([str(f) for f in leaked_files])
error.append("*** Before:")
error.extend([str(f) for f in lines1])
error.append("*** After:")
error.extend([str(f) for f in lines2])
error.append(error[0])
error.append("*** function %s:%s: %s " % item.location)
error.append("See issue #2366")
item.warn("", "\n".join(error))
# XXX copied from execnet's conftest.py - needs to be merged
winpymap = {
"python2.7": r"C:\Python27\python.exe",
"python3.4": r"C:\Python34\python.exe",
"python3.5": r"C:\Python35\python.exe",
"python3.6": r"C:\Python36\python.exe",
}
def getexecutable(name, cache={}):
try:
return cache[name]
except KeyError:
executable = py.path.local.sysfind(name)
if executable:
import subprocess
popen = subprocess.Popen(
[str(executable), "--version"],
universal_newlines=True,
stderr=subprocess.PIPE,
)
out, err = popen.communicate()
if name == "jython":
if not err or "2.5" not in err:
executable = None
if "2.5.2" in err:
executable = None # http://bugs.jython.org/issue1790
elif popen.returncode != 0:
# handle pyenv's 127
executable = None
cache[name] = executable
return executable
@pytest.fixture(params=["python2.7", "python3.4", "pypy", "pypy3"])
def anypython(request):
name = request.param
executable = getexecutable(name)
if executable is None:
if sys.platform == "win32":
executable = winpymap.get(name, None)
if executable:
executable = py.path.local(executable)
if executable.check():
return executable
pytest.skip("no suitable %s found" % (name,))
return executable
# used at least by pytest-xdist plugin
@pytest.fixture
def _pytest(request):
"""Return a helper which offers a gethookrecorder(hook) method which
returns a HookRecorder instance which helps to make assertions about called
hooks.
"""
return PytestArg(request)
class PytestArg(object):
def __init__(self, request):
self.request = request
def gethookrecorder(self, hook):
hookrecorder = HookRecorder(hook._pm)
self.request.addfinalizer(hookrecorder.finish_recording)
return hookrecorder
def get_public_names(values):
"""Only return names from iterator values without a leading underscore."""
return [x for x in values if x[0] != "_"]
class ParsedCall(object):
def __init__(self, name, kwargs):
self.__dict__.update(kwargs)
self._name = name
def __repr__(self):
d = self.__dict__.copy()
del d["_name"]
return "<ParsedCall %r(**%r)>" % (self._name, d)
class HookRecorder(object):
"""Record all hooks called in a plugin manager.
This wraps all the hook calls in the plugin manager, recording each call
before propagating the normal calls.
"""
def __init__(self, pluginmanager):
self._pluginmanager = pluginmanager
self.calls = []
def before(hook_name, hook_impls, kwargs):
self.calls.append(ParsedCall(hook_name, kwargs))
def after(outcome, hook_name, hook_impls, kwargs):
pass
self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
def finish_recording(self):
self._undo_wrapping()
def getcalls(self, names):
if isinstance(names, str):
names = names.split()
return [call for call in self.calls if call._name in names]
def assert_contains(self, entries):
__tracebackhide__ = True
i = 0
entries = list(entries)
backlocals = sys._getframe(1).f_locals
while entries:
name, check = entries.pop(0)
for ind, call in enumerate(self.calls[i:]):
if call._name == name:
print("NAMEMATCH", name, call)
if eval(check, backlocals, call.__dict__):
print("CHECKERMATCH", repr(check), "->", call)
else:
print("NOCHECKERMATCH", repr(check), "-", call)
continue
i += ind + 1
break
print("NONAMEMATCH", name, "with", call)
else:
pytest.fail("could not find %r check %r" % (name, check))
def popcall(self, name):
__tracebackhide__ = True
for i, call in enumerate(self.calls):
if call._name == name:
del self.calls[i]
return call
lines = ["could not find call %r, in:" % (name,)]
lines.extend([" %s" % str(x) for x in self.calls])
pytest.fail("\n".join(lines))
def getcall(self, name):
values = self.getcalls(name)
assert len(values) == 1, (name, values)
return values[0]
# functionality for test reports
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
return [x.report for x in self.getcalls(names)]
def matchreport(
self,
inamepart="",
names="pytest_runtest_logreport pytest_collectreport",
when=None,
):
"""return a testreport whose dotted import path matches"""
values = []
for rep in self.getreports(names=names):
try:
if not when and rep.when != "call" and rep.passed:
# setup/teardown passing reports - let's ignore those
continue
except AttributeError:
pass
if when and getattr(rep, "when", None) != when:
continue
if not inamepart or inamepart in rep.nodeid.split("::"):
values.append(rep)
if not values:
raise ValueError(
"could not find test report matching %r: "
"no test reports at all!" % (inamepart,)
)
if len(values) > 1:
raise ValueError(
"found 2 or more testreports matching %r: %s" % (inamepart, values)
)
return values[0]
def getfailures(self, names="pytest_runtest_logreport pytest_collectreport"):
return [rep for rep in self.getreports(names) if rep.failed]
def getfailedcollections(self):
return self.getfailures("pytest_collectreport")
def listoutcomes(self):
passed = []
skipped = []
failed = []
for rep in self.getreports("pytest_collectreport pytest_runtest_logreport"):
if rep.passed:
if getattr(rep, "when", None) == "call":
passed.append(rep)
elif rep.skipped:
skipped.append(rep)
elif rep.failed:
failed.append(rep)
return passed, skipped, failed
def countoutcomes(self):
return [len(x) for x in self.listoutcomes()]
def assertoutcome(self, passed=0, skipped=0, failed=0):
realpassed, realskipped, realfailed = self.listoutcomes()
assert passed == len(realpassed)
assert skipped == len(realskipped)
assert failed == len(realfailed)
def clear(self):
self.calls[:] = []
@pytest.fixture
def linecomp(request):
return LineComp()
@pytest.fixture(name="LineMatcher")
def LineMatcher_fixture(request):
return LineMatcher
@pytest.fixture
def testdir(request, tmpdir_factory):
return Testdir(request, tmpdir_factory)
rex_outcome = re.compile(r"(\d+) ([\w-]+)")
class RunResult(object):
"""The result of running a command.
Attributes:
:ret: the return value
:outlines: list of lines captured from stdout
:errlines: list of lines captures from stderr
:stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
reconstruct stdout or the commonly used ``stdout.fnmatch_lines()``
method
:stderr: :py:class:`LineMatcher` of stderr
:duration: duration in seconds
"""
def __init__(self, ret, outlines, errlines, duration):
self.ret = ret
self.outlines = outlines
self.errlines = errlines
self.stdout = LineMatcher(outlines)
self.stderr = LineMatcher(errlines)
self.duration = duration
def parseoutcomes(self):
"""Return a dictionary of outcomestring->num from parsing the terminal
output that the test process produced.
"""
for line in reversed(self.outlines):
if "seconds" in line:
outcomes = rex_outcome.findall(line)
if outcomes:
d = {}
for num, cat in outcomes:
d[cat] = int(num)
return d
raise ValueError("Pytest terminal report not found")
def assert_outcomes(self, passed=0, skipped=0, failed=0, error=0):
"""Assert that the specified outcomes appear with the respective
numbers (0 means it didn't occur) in the text output from a test run.
"""
d = self.parseoutcomes()
obtained = {
"passed": d.get("passed", 0),
"skipped": d.get("skipped", 0),
"failed": d.get("failed", 0),
"error": d.get("error", 0),
}
assert (
obtained == dict(passed=passed, skipped=skipped, failed=failed, error=error)
)
class CwdSnapshot(object):
def __init__(self):
self.__saved = os.getcwd()
def restore(self):
os.chdir(self.__saved)
class SysModulesSnapshot(object):
def __init__(self, preserve=None):
self.__preserve = preserve
self.__saved = dict(sys.modules)
def restore(self):
if self.__preserve:
self.__saved.update(
(k, m) for k, m in sys.modules.items() if self.__preserve(k)
)
sys.modules.clear()
sys.modules.update(self.__saved)
class SysPathsSnapshot(object):
def __init__(self):
self.__saved = list(sys.path), list(sys.meta_path)
def restore(self):
sys.path[:], sys.meta_path[:] = self.__saved
class Testdir(object):
"""Temporary test directory with tools to test/run pytest itself.
This is based on the ``tmpdir`` fixture but provides a number of methods
which aid with testing pytest itself. Unless :py:meth:`chdir` is used all
methods will use :py:attr:`tmpdir` as their current working directory.
Attributes:
:tmpdir: The :py:class:`py.path.local` instance of the temporary directory.
:plugins: A list of plugins to use with :py:meth:`parseconfig` and
:py:meth:`runpytest`. Initially this is an empty list but plugins can
be added to the list. The type of items to add to the list depends on
the method using them so refer to them for details.
"""
def __init__(self, request, tmpdir_factory):
self.request = request
self._mod_collections = WeakKeyDictionary()
name = request.function.__name__
self.tmpdir = tmpdir_factory.mktemp(name, numbered=True)
self.plugins = []
self._cwd_snapshot = CwdSnapshot()
self._sys_path_snapshot = SysPathsSnapshot()
self._sys_modules_snapshot = self.__take_sys_modules_snapshot()
self.chdir()
self.request.addfinalizer(self.finalize)
method = self.request.config.getoption("--runpytest")
if method == "inprocess":
self._runpytest_method = self.runpytest_inprocess
elif method == "subprocess":
self._runpytest_method = self.runpytest_subprocess
def __repr__(self):
return "<Testdir %r>" % (self.tmpdir,)
def finalize(self):
"""Clean up global state artifacts.
Some methods modify the global interpreter state and this tries to
clean this up. It does not remove the temporary directory however so
it can be looked at after the test run has finished.
"""
self._sys_modules_snapshot.restore()
self._sys_path_snapshot.restore()
self._cwd_snapshot.restore()
def __take_sys_modules_snapshot(self):
# some zope modules used by twisted-related tests keep internal state
# and can't be deleted; we had some trouble in the past with
# `zope.interface` for example
def preserve_module(name):
return name.startswith("zope")
return SysModulesSnapshot(preserve=preserve_module)
def make_hook_recorder(self, pluginmanager):
"""Create a new :py:class:`HookRecorder` for a PluginManager."""
assert not hasattr(pluginmanager, "reprec")
pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
self.request.addfinalizer(reprec.finish_recording)
return reprec
def chdir(self):
"""Cd into the temporary directory.
This is done automatically upon instantiation.
"""
self.tmpdir.chdir()
def _makefile(self, ext, args, kwargs, encoding="utf-8"):
items = list(kwargs.items())
def to_text(s):
return s.decode(encoding) if isinstance(s, bytes) else six.text_type(s)
if args:
source = u"\n".join(to_text(x) for x in args)
basename = self.request.function.__name__
items.insert(0, (basename, source))
ret = None
for basename, value in items:
p = self.tmpdir.join(basename).new(ext=ext)
p.dirpath().ensure_dir()
source = Source(value)
source = u"\n".join(to_text(line) for line in source.lines)
p.write(source.strip().encode(encoding), "wb")
if ret is None:
ret = p
return ret
def makefile(self, ext, *args, **kwargs):
"""Create a new file in the testdir.
ext: The extension the file should use, including the dot, e.g. `.py`.
args: All args will be treated as strings and joined using newlines.
The result will be written as contents to the file. The name of the
file will be based on the test function requesting this fixture.
E.g. "testdir.makefile('.txt', 'line1', 'line2')"
kwargs: Each keyword is the name of a file, while the value of it will
be written as contents of the file.
E.g. "testdir.makefile('.ini', pytest='[pytest]\naddopts=-rs\n')"
"""
return self._makefile(ext, args, kwargs)
def makeconftest(self, source):
"""Write a contest.py file with 'source' as contents."""
return self.makepyfile(conftest=source)
def makeini(self, source):
"""Write a tox.ini file with 'source' as contents."""
return self.makefile(".ini", tox=source)
def getinicfg(self, source):
"""Return the pytest section from the tox.ini config file."""
p = self.makeini(source)
return py.iniconfig.IniConfig(p)["pytest"]
def makepyfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .py extension."""
return self._makefile(".py", args, kwargs)
def maketxtfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .txt extension."""
return self._makefile(".txt", args, kwargs)
def syspathinsert(self, path=None):
"""Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
This is undone automatically when this object dies at the end of each
test.
"""
if path is None:
path = self.tmpdir
sys.path.insert(0, str(path))
# a call to syspathinsert() usually means that the caller wants to
# import some dynamically created files, thus with python3 we
# invalidate its import caches
self._possibly_invalidate_import_caches()
def _possibly_invalidate_import_caches(self):
# invalidate caches if we can (py33 and above)
try:
import importlib
except ImportError:
pass
else:
if hasattr(importlib, "invalidate_caches"):
importlib.invalidate_caches()
def mkdir(self, name):
"""Create a new (sub)directory."""
return self.tmpdir.mkdir(name)
def mkpydir(self, name):
"""Create a new python package.
This creates a (sub)directory with an empty ``__init__.py`` file so it
gets recognised as a python package.
"""
p = self.mkdir(name)
p.ensure("__init__.py")
return p
Session = Session
def getnode(self, config, arg):
"""Return the collection node of a file.
:param config: :py:class:`_pytest.config.Config` instance, see
:py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the
configuration
:param arg: a :py:class:`py.path.local` instance of the file
"""
session = Session(config)
assert "::" not in str(arg)
p = py.path.local(arg)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([str(p)], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def getpathnode(self, path):
"""Return the collection node of a file.
This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to
create the (configured) pytest Config instance.
:param path: a :py:class:`py.path.local` instance of the file
"""
config = self.parseconfigure(path)
session = Session(config)
x = session.fspath.bestrelpath(path)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([x], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def genitems(self, colitems):
"""Generate all test items from a collection node.
This recurses into the collection node and returns a list of all the
test items contained within.
"""
session = colitems[0].session
result = []
for colitem in colitems:
result.extend(session.genitems(colitem))
return result
def runitem(self, source):
"""Run the "test_func" Item.
The calling test instance (class containing the test method) must
provide a ``.getrunner()`` method which should return a runner which
can run the test protocol for a single item, e.g.
:py:func:`_pytest.runner.runtestprotocol`.
"""
# used from runner functional tests
item = self.getitem(source)
# the test class where we are called from wants to provide the runner
testclassinstance = self.request.instance
runner = testclassinstance.getrunner()
return runner(item)
def inline_runsource(self, source, *cmdlineargs):
"""Run a test module in process using ``pytest.main()``.
This run writes "source" into a temporary file and runs
``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance
for the result.
:param source: the source code of the test module
:param cmdlineargs: any extra command line arguments to use
:return: :py:class:`HookRecorder` instance of the result
"""
p = self.makepyfile(source)
values = list(cmdlineargs) + [p]
return self.inline_run(*values)
def inline_genitems(self, *args):
"""Run ``pytest.main(['--collectonly'])`` in-process.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself like :py:meth:`inline_run`, but returns a
tuple of the collected items and a :py:class:`HookRecorder` instance.
"""
rec = self.inline_run("--collect-only", *args)
items = [x.item for x in rec.getcalls("pytest_itemcollected")]
return items, rec
def inline_run(self, *args, **kwargs):
"""Run ``pytest.main()`` in-process, returning a HookRecorder.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself. This means it can return a
:py:class:`HookRecorder` instance which gives more detailed results
from that run than can be done by matching stdout/stderr from
:py:meth:`runpytest`.
:param args: command line arguments to pass to :py:func:`pytest.main`
:param plugin: (keyword-only) extra plugin instances the
``pytest.main()`` instance should use
:return: a :py:class:`HookRecorder` instance
"""
finalizers = []
try:
# When running pytest inline any plugins active in the main test
# process are already imported. So this disables the warning which
# will trigger to say they can no longer be rewritten, which is
# fine as they have already been rewritten.
orig_warn = AssertionRewritingHook._warn_already_imported
def revert_warn_already_imported():
AssertionRewritingHook._warn_already_imported = orig_warn
finalizers.append(revert_warn_already_imported)
AssertionRewritingHook._warn_already_imported = lambda *a: None
# Any sys.module or sys.path changes done while running pytest
# inline should be reverted after the test run completes to avoid
# clashing with later inline tests run within the same pytest test,
# e.g. just because they use matching test module names.
finalizers.append(self.__take_sys_modules_snapshot().restore)
finalizers.append(SysPathsSnapshot().restore)
# Important note:
# - our tests should not leave any other references/registrations
# laying around other than possibly loaded test modules
# referenced from sys.modules, as nothing will clean those up
# automatically
rec = []
class Collect(object):
def pytest_configure(x, config):
rec.append(self.make_hook_recorder(config.pluginmanager))
plugins = kwargs.get("plugins") or []
plugins.append(Collect())
ret = pytest.main(list(args), plugins=plugins)
if len(rec) == 1:
reprec = rec.pop()
else:
class reprec(object):
pass
reprec.ret = ret
# typically we reraise keyboard interrupts from the child run
# because it's our user requesting interruption of the testing
if ret == 2 and not kwargs.get("no_reraise_ctrlc"):
calls = reprec.getcalls("pytest_keyboard_interrupt")
if calls and calls[-1].excinfo.type == KeyboardInterrupt:
raise KeyboardInterrupt()
return reprec
finally:
for finalizer in finalizers:
finalizer()
def runpytest_inprocess(self, *args, **kwargs):
"""Return result of running pytest in-process, providing a similar
interface to what self.runpytest() provides.
"""
if kwargs.get("syspathinsert"):
self.syspathinsert()
now = time.time()
capture = MultiCapture(Capture=SysCapture)
capture.start_capturing()
try:
try:
reprec = self.inline_run(*args, **kwargs)
except SystemExit as e:
class reprec(object):
ret = e.args[0]
except Exception:
traceback.print_exc()
class reprec(object):
ret = 3
finally:
out, err = capture.readouterr()
capture.stop_capturing()
sys.stdout.write(out)
sys.stderr.write(err)
res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), time.time() - now)
res.reprec = reprec
return res
def runpytest(self, *args, **kwargs):
"""Run pytest inline or in a subprocess, depending on the command line
option "--runpytest" and return a :py:class:`RunResult`.
"""
args = self._ensure_basetemp(args)
return self._runpytest_method(*args, **kwargs)
def _ensure_basetemp(self, args):
args = [str(x) for x in args]
for x in args:
if str(x).startswith("--basetemp"):
# print("basedtemp exists: %s" %(args,))
break
else:
args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp"))
# print("added basetemp: %s" %(args,))
return args
def parseconfig(self, *args):
"""Return a new pytest Config instance from given commandline args.
This invokes the pytest bootstrapping code in _pytest.config to create
a new :py:class:`_pytest.core.PluginManager` and call the
pytest_cmdline_parse hook to create a new
:py:class:`_pytest.config.Config` instance.
If :py:attr:`plugins` has been populated they should be plugin modules
to be registered with the PluginManager.
"""
args = self._ensure_basetemp(args)
import _pytest.config
config = _pytest.config._prepareconfig(args, self.plugins)
# we don't know what the test will do with this half-setup config
# object and thus we make sure it gets unconfigured properly in any
# case (otherwise capturing could still be active, for example)
self.request.addfinalizer(config._ensure_unconfigure)
return config
def parseconfigure(self, *args):
"""Return a new pytest configured Config instance.
This returns a new :py:class:`_pytest.config.Config` instance like
:py:meth:`parseconfig`, but also calls the pytest_configure hook.
"""
config = self.parseconfig(*args)
config._do_configure()
self.request.addfinalizer(config._ensure_unconfigure)
return config
def getitem(self, source, funcname="test_func"):
"""Return the test item for a test function.
This writes the source to a python file and runs pytest's collection on
the resulting module, returning the test item for the requested
function name.
:param source: the module source
:param funcname: the name of the test function for which to return a
test item
"""
items = self.getitems(source)
for item in items:
if item.name == funcname:
return item
assert 0, (
"%r item not found in module:\n%s\nitems: %s" % (funcname, source, items)
)
def getitems(self, source):
"""Return all test items collected from the module.
This writes the source to a python file and runs pytest's collection on
the resulting module, returning all test items contained within.
"""
modcol = self.getmodulecol(source)
return self.genitems([modcol])
def getmodulecol(self, source, configargs=(), withinit=False):
"""Return the module collection node for ``source``.
This writes ``source`` to a file using :py:meth:`makepyfile` and then
runs the pytest collection on it, returning the collection node for the
test module.
:param source: the source code of the module to collect
:param configargs: any extra arguments to pass to
:py:meth:`parseconfigure`
:param withinit: whether to also write an ``__init__.py`` file to the
same directory to ensure it is a package
"""
kw = {self.request.function.__name__: Source(source).strip()}
path = self.makepyfile(**kw)
if withinit:
self.makepyfile(__init__="#")
self.config = config = self.parseconfigure(path, *configargs)
node = self.getnode(config, path)
return node
def collect_by_name(self, modcol, name):
"""Return the collection node for name from the module collection.
This will search a module collection node for a collection node
matching the given name.
:param modcol: a module collection node; see :py:meth:`getmodulecol`
:param name: the name of the node to return
"""
if modcol not in self._mod_collections:
self._mod_collections[modcol] = list(modcol.collect())
for colitem in self._mod_collections[modcol]:
if colitem.name == name:
return colitem
def popen(self, cmdargs, stdout, stderr, **kw):
"""Invoke subprocess.Popen.
This calls subprocess.Popen making sure the current working directory
is in the PYTHONPATH.
You probably want to use :py:meth:`run` instead.
"""
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(
filter(None, [str(os.getcwd()), env.get("PYTHONPATH", "")])
)
kw["env"] = env
popen = subprocess.Popen(
cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw
)
popen.stdin.close()
return popen
def run(self, *cmdargs):
"""Run a command with arguments.
Run a process using subprocess.Popen saving the stdout and stderr.
Returns a :py:class:`RunResult`.
"""
return self._run(*cmdargs)
def _run(self, *cmdargs):
cmdargs = [str(x) for x in cmdargs]
p1 = self.tmpdir.join("stdout")
p2 = self.tmpdir.join("stderr")
print("running:", " ".join(cmdargs))
print(" in:", str(py.path.local()))
f1 = codecs.open(str(p1), "w", encoding="utf8")
f2 = codecs.open(str(p2), "w", encoding="utf8")
try:
now = time.time()
popen = self.popen(
cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32")
)
ret = popen.wait()
finally:
f1.close()
f2.close()
f1 = codecs.open(str(p1), "r", encoding="utf8")
f2 = codecs.open(str(p2), "r", encoding="utf8")
try:
out = f1.read().splitlines()
err = f2.read().splitlines()
finally:
f1.close()
f2.close()
self._dump_lines(out, sys.stdout)
self._dump_lines(err, sys.stderr)
return RunResult(ret, out, err, time.time() - now)
def _dump_lines(self, lines, fp):
try:
for line in lines:
print(line, file=fp)
except UnicodeEncodeError:
print("couldn't print to %s because of encoding" % (fp,))
def _getpytestargs(self):
# we cannot use `(sys.executable, script)` because on Windows the
# script is e.g. `pytest.exe`
return (sys.executable, PYTEST_FULLPATH) # noqa
def runpython(self, script):
"""Run a python script using sys.executable as interpreter.
Returns a :py:class:`RunResult`.
"""
return self.run(sys.executable, script)
def runpython_c(self, command):
"""Run python -c "command", return a :py:class:`RunResult`."""
return self.run(sys.executable, "-c", command)
def runpytest_subprocess(self, *args, **kwargs):
"""Run pytest as a subprocess with given arguments.
Any plugins added to the :py:attr:`plugins` list will added using the
``-p`` command line option. Additionally ``--basetemp`` is used put
any temporary files and directories in a numbered directory prefixed
with "runpytest-" so they do not conflict with the normal numbered
pytest location for temporary files and directories.
Returns a :py:class:`RunResult`.
"""
p = py.path.local.make_numbered_dir(
prefix="runpytest-", keep=None, rootdir=self.tmpdir
)
args = ("--basetemp=%s" % p,) + args
plugins = [x for x in self.plugins if isinstance(x, str)]
if plugins:
args = ("-p", plugins[0]) + args
args = self._getpytestargs() + args
return self.run(*args)
def spawn_pytest(self, string, expect_timeout=10.0):
"""Run pytest using pexpect.
This makes sure to use the right pytest and sets up the temporary
directory locations.
The pexpect child is returned.
"""
basetemp = self.tmpdir.mkdir("temp-pexpect")
invoke = " ".join(map(str, self._getpytestargs()))
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
return self.spawn(cmd, expect_timeout=expect_timeout)
def spawn(self, cmd, expect_timeout=10.0):
"""Run a command using pexpect.
The pexpect child is returned.
"""
pexpect = pytest.importorskip("pexpect", "3.0")
if hasattr(sys, "pypy_version_info") and "64" in platform.machine():
pytest.skip("pypy-64 bit not supported")
if sys.platform.startswith("freebsd"):
pytest.xfail("pexpect does not work reliably on freebsd")
logfile = self.tmpdir.join("spawn.out").open("wb")
child = pexpect.spawn(cmd, logfile=logfile)
self.request.addfinalizer(logfile.close)
child.timeout = expect_timeout
return child
def getdecoded(out):
try:
return out.decode("utf-8")
except UnicodeDecodeError:
return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
py.io.saferepr(out),
)
class LineComp(object):
def __init__(self):
self.stringio = py.io.TextIO()
def assert_contains_lines(self, lines2):
"""Assert that lines2 are contained (linearly) in lines1.
Return a list of extralines found.
"""
__tracebackhide__ = True
val = self.stringio.getvalue()
self.stringio.truncate(0)
self.stringio.seek(0)
lines1 = val.split("\n")
return LineMatcher(lines1).fnmatch_lines(lines2)
class LineMatcher(object):
"""Flexible matching of text.
This is a convenience class to test large texts like the output of
commands.
The constructor takes a list of lines without their trailing newlines, i.e.
``text.splitlines()``.
"""
def __init__(self, lines):
self.lines = lines
self._log_output = []
def str(self):
"""Return the entire original text."""
return "\n".join(self.lines)
def _getlines(self, lines2):
if isinstance(lines2, str):
lines2 = Source(lines2)
if isinstance(lines2, Source):
lines2 = lines2.strip().lines
return lines2
def fnmatch_lines_random(self, lines2):
"""Check lines exist in the output using in any order.
Lines are checked using ``fnmatch.fnmatch``. The argument is a list of
lines which have to occur in the output, in any order.
"""
self._match_lines_random(lines2, fnmatch)
def re_match_lines_random(self, lines2):
"""Check lines exist in the output using ``re.match``, in any order.
The argument is a list of lines which have to occur in the output, in
any order.
"""
self._match_lines_random(lines2, lambda name, pat: re.match(pat, name))
def _match_lines_random(self, lines2, match_func):
"""Check lines exist in the output.
The argument is a list of lines which have to occur in the output, in
any order. Each line can contain glob whildcards.
"""
lines2 = self._getlines(lines2)
for line in lines2:
for x in self.lines:
if line == x or match_func(x, line):
self._log("matched: ", repr(line))
break
else:
self._log("line %r not found in output" % line)
raise ValueError(self._log_text)
def get_lines_after(self, fnline):
"""Return all lines following the given line in the text.
The given line can contain glob wildcards.
"""
for i, line in enumerate(self.lines):
if fnline == line or fnmatch(line, fnline):
return self.lines[i + 1:]
raise ValueError("line %r not found in output" % fnline)
def _log(self, *args):
self._log_output.append(" ".join((str(x) for x in args)))
@property
def _log_text(self):
return "\n".join(self._log_output)
def fnmatch_lines(self, lines2):
"""Search captured text for matching lines using ``fnmatch.fnmatch``.
The argument is a list of lines which have to match and can use glob
wildcards. If they do not match a pytest.fail() is called. The
matches and non-matches are also printed on stdout.
"""
self._match_lines(lines2, fnmatch, "fnmatch")
def re_match_lines(self, lines2):
"""Search captured text for matching lines using ``re.match``.
The argument is a list of lines which have to match using ``re.match``.
If they do not match a pytest.fail() is called.
The matches and non-matches are also printed on stdout.
"""
self._match_lines(lines2, lambda name, pat: re.match(pat, name), "re.match")
def _match_lines(self, lines2, match_func, match_nickname):
"""Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.
:param list[str] lines2: list of string patterns to match. The actual
format depends on ``match_func``
:param match_func: a callable ``match_func(line, pattern)`` where line
is the captured line from stdout/stderr and pattern is the matching
pattern
:param str match_nickname: the nickname for the match function that
will be logged to stdout when a match occurs
"""
lines2 = self._getlines(lines2)
lines1 = self.lines[:]
nextline = None
extralines = []
__tracebackhide__ = True
for line in lines2:
nomatchprinted = False
while lines1:
nextline = lines1.pop(0)
if line == nextline:
self._log("exact match:", repr(line))
break
elif match_func(nextline, line):
self._log("%s:" % match_nickname, repr(line))
self._log(" with:", repr(nextline))
break
else:
if not nomatchprinted:
self._log("nomatch:", repr(line))
nomatchprinted = True
self._log(" and:", repr(nextline))
extralines.append(nextline)
else:
self._log("remains unmatched: %r" % (line,))
pytest.fail(self._log_text)
| mpl-2.0 | 1,438,267,591,620,287,200 | 32.507862 | 88 | 0.587795 | false |
eyohansa/django | django/contrib/sessions/backends/base.py | 298 | 12046 | from __future__ import unicode_literals
import base64
import logging
import string
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.crypto import (
constant_time_compare, get_random_string, salted_hmac,
)
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, default=None):
self.modified = self.modified or key in self._session
return self._session.pop(key, default)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Returns True when there is no session_key and the session is empty"
try:
return not bool(self._session_key) and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _validate_session_key(self, key):
"""
Key must be truthy and at least 8 characters long. 8 characters is an
arbitrary lower bound for some minimal key security.
"""
return key and len(key) >= 8
def _get_session_key(self):
return self.__session_key
def _set_session_key(self, value):
"""
Validate session key on assignment. Invalid values will set to None.
"""
if self._validate_session_key(value):
self.__session_key = value
else:
self.__session_key = None
session_key = property(_get_session_key)
_session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Creates a new session key, whilst retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() can update an existing object with the same key.
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
| bsd-3-clause | -4,010,524,202,764,970,500 | 33.221591 | 94 | 0.61398 | false |
paveu/api_mocker | apimocker/mocker/migrations/0002_auto_20170718_1858.py | 1 | 1459 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-18 18:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mocker', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ResponseContent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_date', models.DateTimeField(auto_now_add=True, verbose_name='Create date')),
('update_date', models.DateTimeField(auto_now=True, verbose_name='Update date')),
('destination_address', models.URLField(null=True, verbose_name='Called API')),
('content', models.TextField(null=True, verbose_name='API Response')),
],
options={
'verbose_name': 'API Log',
'verbose_name_plural': 'API Logs',
},
),
migrations.RemoveField(
model_name='mocker',
name='api_log',
),
migrations.DeleteModel(
name='APILog',
),
migrations.AddField(
model_name='responsecontent',
name='mocker',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='mocker.Mocker'),
),
]
| mit | 3,768,099,984,195,887,000 | 33.738095 | 114 | 0.564085 | false |
pyrrho314/recipesystem | trunk/astrodata/adutils/irafutil.py | 1 | 6759 |
'''This file contains the following utilities:
joinlines (input, delim=" ", missing="Missing", maxchars=161,
shortest=True)
joinlists (list1, list2, delim=" ", missing="Missing", shortest=True)
atList (input, filenames)
expandlist (input)
'''
#---------------------------------------------------------------------------
def joinlines (input, delim=" ", missing="Missing",
maxchars=161, shortest=True):
"""Join lines from the input list of files.
This is an implementation of the iraf.proto.joinlines task, with
the following differences: The result is as a list of strings,
returned as the function value, rather than writing to standard
output. There is no verbose mode. No warnings will be printed.
@param input: names of files, separated by commas (and optional
whitespace)
@type input: string
@param delim: delimiter to separate joined lines
@type delim: string
@param missing: string to use for files with fewer lines,
if shortest is False
@type missing: string
@param maxchars: the output strings will be truncated after this length
@type maxchars: int
@param shortest: if True, the number of elements in the function
value will be the smallest number of lines in any input file;
if False, the number of elements will be the largest number of
lines in any input file
@type shortest: Boolean
@return: the contents of the input files
@rtype: list of strings
"""
filenames = input.split (",")
if not filenames[0]: # an empty string?
return filenames
for i in range (len (filenames)):
filenames[i] = filenames[i].strip()
# There will be one element of all_lines for each file in input;
# all_lines[i] will be a list of the lines (with leading and
# trailing whitespace removed) of file i from input.
all_lines = []
first = True
for name in filenames:
fd = open (name)
lines = fd.readlines()
fd.close()
for i in range (len (lines)):
lines[i] = lines[i].strip()
all_lines.append (copy.deepcopy (lines))
numlines = len (lines)
if first:
min_numlines = numlines
max_numlines = numlines
first = False
else:
min_numlines = min (numlines, min_numlines)
max_numlines = max (numlines, max_numlines)
if min_numlines < max_numlines:
if shortest:
numlines = min_numlines
else:
numlines = max_numlines
if len (all_lines[0]) > numlines:
result = all_lines[0][0:numlines]
else:
result = all_lines[0]
for k in range (len (result), numlines):
result.append (missing)
for i in range (1, len (all_lines)):
lines = all_lines[i]
for j in range (len (lines)):
if j >= numlines:
break
result[j] = result[j] + delim + lines[j]
for j in range (len (lines), numlines):
result[j] = result[j] + delim + missing
for j in range (len (result)):
result[j] = result[j][0:maxchars]
return result
#---------------------------------------------------------------------------
def joinlists (list1, list2, delim=" ", missing="Missing", shortest=True):
"""Join corresponding elements from two input lists.
This is similar to the iraf.proto.joinlines task, except that the
input is a pair of lists rather than files (just two input lists),
and the result is as a list of strings, returned as the function
value, rather than writing to standard output. There is no verbose
mode. No warnings will be printed.
@param list1: a list of values
@type list1: list
@param list2: another list of values
@type list2: list
@param delim: delimiter to separate joined elements
@type delim: string
@param missing: string to use for lists with fewer lines,
if shortest is False
@type missing: string
@param shortest: if True, the number of elements in the function
value will be the smaller of the number of lines in either of
the input lists;
if False, the number of elements will be the larger of the
number lines in either input list
@type shortest: Boolean
@return: the contents of the input lists
@rtype: list of strings
"""
len1 = len (list1)
len2 = len (list2)
min_numlines = min (len1, len2)
max_numlines = max (len1, len2)
if min_numlines < max_numlines:
if shortest:
numlines = min_numlines
else:
numlines = max_numlines
else:
numlines = len1
result = []
for i in range (numlines):
if i > len1-1:
result.append (missing + delim + str (list2[i]))
elif i > len2-1:
result.append (str (list1[i]) + delim + missing)
else:
result.append (str (list1[i]) + delim + str (list2[i]))
return result
#---------------------------------------------------------------------------
def atList (input, filenames):
"""Either append the current name, or read contents if it's a file.
@param input: one or more names (or @name) separated by commas
@type input: string
@param filenames: (modified in-place) a list of the names extracted
from input, or from the contents of input if it's an '@file'
@type filenames: list
"""
input = input.strip()
if not input:
return
if input[0] == '@' and input.find (',') < 0:
# just a single word, and it begins with '@'
line = irafutil.expandFileName (input[1:]) # expand environment var.
fd = open (line)
lines = fd.readlines()
fd.close()
else:
# one or more words, and the first does not begin with '@'
lines = input.split (',')
for line in lines:
line = line.strip()
if line[0] == '@':
atList (line, filenames)
else:
line = irafutil.expandFileName (line)
filenames.append (line)
#---------------------------------------------------------------------------
def expandlist (input):
"""Convert a string of comma-separated names to a list of names.
@param input: one or more names separated by commas;
a name of the form '@filename' implies that 'filename' is
the name of a file containing names
@type input: string
@return: list of the names in 'input'
@rtype: list of strings
"""
filenames = []
atList (input, filenames)
return filenames
#---------------------------------------------------------------------------
| mpl-2.0 | 6,242,744,817,355,163,000 | 30.732394 | 77 | 0.576417 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.