repo_name
stringlengths
5
100
path
stringlengths
4
299
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1.03M
license
stringclasses
15 values
hash
int64
-9,223,351,895,964,839,000
9,223,297,778B
line_mean
float64
3.17
100
line_max
int64
7
1k
alpha_frac
float64
0.25
0.98
autogenerated
bool
1 class
tensorflow/tensorflow
tensorflow/python/ops/image_ops_impl.py
6
226930
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of image ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import numpy as np from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_image_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import sort_ops from tensorflow.python.ops import stateless_random_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops import variables from tensorflow.python.util import deprecation from tensorflow.python.util import dispatch from tensorflow.python.util.tf_export import tf_export ops.NotDifferentiable('RandomCrop') # TODO(b/31222613): This op may be differentiable, and there may be # latent bugs here. ops.NotDifferentiable('HSVToRGB') ops.NotDifferentiable('DrawBoundingBoxes') ops.NotDifferentiable('SampleDistortedBoundingBox') ops.NotDifferentiable('SampleDistortedBoundingBoxV2') # TODO(bsteiner): Implement the gradient function for extract_glimpse # TODO(b/31222613): This op may be differentiable, and there may be # latent bugs here. ops.NotDifferentiable('ExtractGlimpse') ops.NotDifferentiable('NonMaxSuppression') ops.NotDifferentiable('NonMaxSuppressionV2') ops.NotDifferentiable('NonMaxSuppressionWithOverlaps') ops.NotDifferentiable('GenerateBoundingBoxProposals') # pylint: disable=invalid-name def _assert(cond, ex_type, msg): """A polymorphic assert, works with tensors and boolean expressions. If `cond` is not a tensor, behave like an ordinary assert statement, except that a empty list is returned. If `cond` is a tensor, return a list containing a single TensorFlow assert op. Args: cond: Something evaluates to a boolean value. May be a tensor. ex_type: The exception class to use. msg: The error message. Returns: A list, containing at most one assert op. """ if _is_tensor(cond): return [control_flow_ops.Assert(cond, [msg])] else: if not cond: raise ex_type(msg) else: return [] def _is_tensor(x): """Returns `True` if `x` is a symbolic tensor-like object. Args: x: A python object to check. Returns: `True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`. """ return isinstance(x, (ops.Tensor, variables.Variable)) def _ImageDimensions(image, rank): """Returns the dimensions of an image tensor. Args: image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`. rank: The expected rank of the image Returns: A list of corresponding to the dimensions of the input image. Dimensions that are statically known are python integers, otherwise, they are integer scalar tensors. """ if image.get_shape().is_fully_defined(): return image.get_shape().as_list() else: static_shape = image.get_shape().with_rank(rank).as_list() dynamic_shape = array_ops.unstack(array_ops.shape(image), rank) return [ s if s is not None else d for s, d in zip(static_shape, dynamic_shape) ] def _Check3DImage(image, require_static=True): """Assert that we are working with a properly shaped image. Args: image: 3-D Tensor of shape [height, width, channels] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if `image.shape` is not a 3-vector. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned. """ try: image_shape = image.get_shape().with_rank(3) except ValueError: raise ValueError("'image' (shape %s) must be three-dimensional." % image.shape) if require_static and not image_shape.is_fully_defined(): raise ValueError("'image' (shape %s) must be fully defined." % image_shape) if any(x == 0 for x in image_shape): raise ValueError("all dims of 'image.shape' must be > 0: %s" % image_shape) if not image_shape.is_fully_defined(): return [ check_ops.assert_positive( array_ops.shape(image), ["all dims of 'image.shape' " 'must be > 0.']) ] else: return [] def _Assert3DImage(image): """Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: 3-D Tensor of shape [height, width, channels] Raises: ValueError: if `image.shape` is not a 3-vector. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape. """ return control_flow_ops.with_dependencies( _Check3DImage(image, require_static=False), image) def _AssertAtLeast3DImage(image): """Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: >= 3-D Tensor of size [*, height, width, depth] Raises: ValueError: if image.shape is not a [>= 3] vector. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape. """ return control_flow_ops.with_dependencies( _CheckAtLeast3DImage(image, require_static=False), image) def _CheckAtLeast3DImage(image, require_static=True): """Assert that we are working with a properly shaped image. Args: image: >= 3-D Tensor of size [*, height, width, depth] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if image.shape is not a [>= 3] vector. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned. """ try: if image.get_shape().ndims is None: image_shape = image.get_shape().with_rank(3) else: image_shape = image.get_shape().with_rank_at_least(3) except ValueError: raise ValueError("'image' (shape %s) must be at least three-dimensional." % image.shape) if require_static and not image_shape.is_fully_defined(): raise ValueError('\'image\' must be fully defined.') if any(x == 0 for x in image_shape[-3:]): raise ValueError('inner 3 dims of \'image.shape\' must be > 0: %s' % image_shape) if not image_shape[-3:].is_fully_defined(): return [ check_ops.assert_positive( array_ops.shape(image)[-3:], ["inner 3 dims of 'image.shape' " 'must be > 0.']), check_ops.assert_greater_equal( array_ops.rank(image), 3, message="'image' must be at least three-dimensional.") ] else: return [] def _AssertGrayscaleImage(image): """Assert that we are working with a properly shaped grayscale image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: >= 2-D Tensor of size [*, 1] Raises: ValueError: if image.shape is not a [>= 2] vector or if last dimension is not size 1. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape. """ return control_flow_ops.with_dependencies( _CheckGrayscaleImage(image, require_static=False), image) def _CheckGrayscaleImage(image, require_static=True): """Assert that we are working with properly shaped grayscale image. Args: image: >= 2-D Tensor of size [*, 1] require_static: Boolean, whether static shape is required. Raises: ValueError: if image.shape is not a [>= 2] vector or if last dimension is not size 1. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned. """ try: if image.get_shape().ndims is None: image_shape = image.get_shape().with_rank(2) else: image_shape = image.get_shape().with_rank_at_least(2) except ValueError: raise ValueError('A grayscale image (shape %s) must be at least ' 'two-dimensional.' % image.shape) if require_static and not image_shape.is_fully_defined(): raise ValueError('\'image\' must be fully defined.') if image_shape.is_fully_defined(): if image_shape[-1] != 1: raise ValueError('Last dimension of a grayscale image should be size 1.') if not image_shape.is_fully_defined(): return [ check_ops.assert_equal( array_ops.shape(image)[-1], 1, message='Last dimension of a grayscale image should be size 1.'), check_ops.assert_greater_equal( array_ops.rank(image), 3, message='A grayscale image must be at least two-dimensional.') ] else: return [] def fix_image_flip_shape(image, result): """Set the shape to 3 dimensional if we don't know anything else. Args: image: original image size result: flipped or transformed image Returns: An image whose shape is at least (None, None, None). """ image_shape = image.get_shape() if image_shape == tensor_shape.unknown_shape(): result.set_shape([None, None, None]) else: result.set_shape(image_shape) return result @tf_export('image.random_flip_up_down') @dispatch.add_dispatch_support def random_flip_up_down(image, seed=None): """Randomly flips an image vertically (upside down). With a 1 in 2 chance, outputs the contents of `image` flipped along the first dimension, which is `height`. Otherwise, output the image as-is. When passing a batch of images, each image will be randomly flipped independent of other images. Example usage: >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> tf.image.random_flip_up_down(image, 3).numpy().tolist() [[[3], [4]], [[1], [2]]] Randomly flip multiple images. >>> images = np.array( ... [ ... [[[1], [2]], [[3], [4]]], ... [[[5], [6]], [[7], [8]]] ... ]) >>> tf.image.random_flip_up_down(images, 4).numpy().tolist() [[[[3], [4]], [[1], [2]]], [[[5], [6]], [[7], [8]]]] For producing deterministic results given a `seed` value, use `tf.image.stateless_random_flip_up_down`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ random_func = functools.partial(random_ops.random_uniform, seed=seed) return _random_flip(image, 0, random_func, 'random_flip_up_down') @tf_export('image.random_flip_left_right') @dispatch.add_dispatch_support def random_flip_left_right(image, seed=None): """Randomly flip an image horizontally (left to right). With a 1 in 2 chance, outputs the contents of `image` flipped along the second dimension, which is `width`. Otherwise output the image as-is. When passing a batch of images, each image will be randomly flipped independent of other images. Example usage: >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> tf.image.random_flip_left_right(image, 5).numpy().tolist() [[[2], [1]], [[4], [3]]] Randomly flip multiple images. >>> images = np.array( ... [ ... [[[1], [2]], [[3], [4]]], ... [[[5], [6]], [[7], [8]]] ... ]) >>> tf.image.random_flip_left_right(images, 6).numpy().tolist() [[[[2], [1]], [[4], [3]]], [[[5], [6]], [[7], [8]]]] For producing deterministic results given a `seed` value, use `tf.image.stateless_random_flip_left_right`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ random_func = functools.partial(random_ops.random_uniform, seed=seed) return _random_flip(image, 1, random_func, 'random_flip_left_right') @tf_export('image.stateless_random_flip_left_right', v1=[]) @dispatch.add_dispatch_support def stateless_random_flip_left_right(image, seed): """Randomly flip an image horizontally (left to right) deterministically. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). Example usage: >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> seed = (2, 3) >>> tf.image.stateless_random_flip_left_right(image, seed).numpy().tolist() [[[2], [1]], [[4], [3]]] Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Returns: A tensor of the same type and shape as `image`. """ random_func = functools.partial( stateless_random_ops.stateless_random_uniform, seed=seed) return _random_flip( image, 1, random_func, 'stateless_random_flip_left_right') @tf_export('image.stateless_random_flip_up_down', v1=[]) @dispatch.add_dispatch_support def stateless_random_flip_up_down(image, seed): """Randomly flip an image vertically (upside down) deterministically. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). Example usage: >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> seed = (2, 3) >>> tf.image.stateless_random_flip_up_down(image, seed).numpy().tolist() [[[3], [4]], [[1], [2]]] Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Returns: A tensor of the same type and shape as `image`. """ random_func = functools.partial( stateless_random_ops.stateless_random_uniform, seed=seed) return _random_flip( image, 0, random_func, 'stateless_random_flip_up_down') def _random_flip(image, flip_index, random_func, scope_name): """Randomly (50% chance) flip an image along axis `flip_index`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. flip_index: Dimension along which to flip the image. Vertical is 0, Horizontal is 1. random_func: partial function for calling either stateful or stateless random ops with `seed` parameter specified. scope_name: Name of the scope in which the ops are added. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(None, scope_name, [image]) as scope: image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) shape = image.get_shape() def f_rank3(): uniform_random = random_func(shape=[], minval=0, maxval=1.0) mirror_cond = math_ops.less(uniform_random, .5) result = control_flow_ops.cond( mirror_cond, lambda: array_ops.reverse(image, [flip_index]), lambda: image, name=scope) return fix_image_flip_shape(image, result) def f_rank4(): batch_size = array_ops.shape(image)[0] uniform_random = random_func(shape=[batch_size], minval=0, maxval=1.0) flips = math_ops.round( array_ops.reshape(uniform_random, [batch_size, 1, 1, 1])) flips = math_ops.cast(flips, image.dtype) flipped_input = array_ops.reverse(image, [flip_index + 1]) return flips * flipped_input + (1 - flips) * image if shape.ndims is None: rank = array_ops.rank(image) return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4) if shape.ndims == 3: return f_rank3() elif shape.ndims == 4: return f_rank4() else: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % shape) @tf_export('image.flip_left_right') @dispatch.add_dispatch_support def flip_left_right(image): """Flip an image horizontally (left to right). Outputs the contents of `image` flipped along the width dimension. See also `tf.reverse`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.flip_left_right(x) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 4., 5., 6.], [ 1., 2., 3.]], [[10., 11., 12.], [ 7., 8., 9.]]], dtype=float32)> Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ return _flip(image, 1, 'flip_left_right') @tf_export('image.flip_up_down') @dispatch.add_dispatch_support def flip_up_down(image): """Flip an image vertically (upside down). Outputs the contents of `image` flipped along the height dimension. See also `reverse()`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.flip_up_down(x) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 7., 8., 9.], [10., 11., 12.]], [[ 1., 2., 3.], [ 4., 5., 6.]]], dtype=float32)> Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. Returns: A `Tensor` of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ return _flip(image, 0, 'flip_up_down') def _flip(image, flip_index, scope_name): """Flip an image either horizontally or vertically. Outputs the contents of `image` flipped along the dimension `flip_index`. See also `reverse()`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. flip_index: 0 For vertical, 1 for horizontal. scope_name: string, scope name. Returns: A `Tensor` of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(None, scope_name, [image]): image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) shape = image.get_shape() def f_rank3(): return fix_image_flip_shape(image, array_ops.reverse(image, [flip_index])) def f_rank4(): return array_ops.reverse(image, [flip_index + 1]) if shape.ndims is None: rank = array_ops.rank(image) return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4) elif shape.ndims == 3: return f_rank3() elif shape.ndims == 4: return f_rank4() else: raise ValueError( '\'image\' (shape %s)must have either 3 or 4 dimensions.' % shape) @tf_export('image.rot90') @dispatch.add_dispatch_support def rot90(image, k=1, name=None): """Rotate image(s) counter-clockwise by 90 degrees. For example: >>> a=tf.constant([[[1],[2]], ... [[3],[4]]]) >>> # rotating `a` counter clockwise by 90 degrees >>> a_rot=tf.image.rot90(a) >>> print(a_rot[...,0].numpy()) [[2 4] [1 3]] >>> # rotating `a` counter clockwise by 270 degrees >>> a_rot=tf.image.rot90(a, k=3) >>> print(a_rot[...,0].numpy()) [[3 1] [4 2]] Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the image is rotated by 90 degrees. name: A name for this operation (optional). Returns: A rotated tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(name, 'rot90', [image, k]) as scope: image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) k = ops.convert_to_tensor(k, dtype=dtypes.int32, name='k') k.get_shape().assert_has_rank(0) k = math_ops.mod(k, 4) shape = image.get_shape() if shape.ndims is None: rank = array_ops.rank(image) def f_rank3(): return _rot90_3D(image, k, scope) def f_rank4(): return _rot90_4D(image, k, scope) return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4) elif shape.ndims == 3: return _rot90_3D(image, k, scope) elif shape.ndims == 4: return _rot90_4D(image, k, scope) else: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % shape) def _rot90_3D(image, k, name_scope): """Rotate image counter-clockwise by 90 degrees `k` times. Args: image: 3-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the image is rotated by 90 degrees. name_scope: A valid TensorFlow name scope. Returns: A 3-D tensor of the same type and shape as `image`. """ def _rot90(): return array_ops.transpose(array_ops.reverse_v2(image, [1]), [1, 0, 2]) def _rot180(): return array_ops.reverse_v2(image, [0, 1]) def _rot270(): return array_ops.reverse_v2(array_ops.transpose(image, [1, 0, 2]), [1]) cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180), (math_ops.equal(k, 3), _rot270)] result = control_flow_ops.case( cases, default=lambda: image, exclusive=True, name=name_scope) result.set_shape([None, None, image.get_shape()[2]]) return result def _rot90_4D(images, k, name_scope): """Rotate batch of images counter-clockwise by 90 degrees `k` times. Args: images: 4-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the images are rotated by 90 degrees. name_scope: A valid TensorFlow name scope. Returns: A 4-D `Tensor` of the same type and shape as `images`. """ def _rot90(): return array_ops.transpose(array_ops.reverse_v2(images, [2]), [0, 2, 1, 3]) def _rot180(): return array_ops.reverse_v2(images, [1, 2]) def _rot270(): return array_ops.reverse_v2(array_ops.transpose(images, [0, 2, 1, 3]), [2]) cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180), (math_ops.equal(k, 3), _rot270)] result = control_flow_ops.case( cases, default=lambda: images, exclusive=True, name=name_scope) shape = result.get_shape() result.set_shape([shape[0], None, None, shape[3]]) return result @tf_export('image.transpose', v1=['image.transpose', 'image.transpose_image']) @dispatch.add_dispatch_support def transpose(image, name=None): """Transpose image(s) by swapping the height and width dimension. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.transpose(x) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 1., 2., 3.], [ 7., 8., 9.]], [[ 4., 5., 6.], [10., 11., 12.]]], dtype=float32)> Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. name: A name for this operation (optional). Returns: If `image` was 4-D, a 4-D float Tensor of shape `[batch, width, height, channels]` If `image` was 3-D, a 3-D float Tensor of shape `[width, height, channels]` Raises: ValueError: if the shape of `image` not supported. Usage Example: >>> image = [[[1, 2], [3, 4]], ... [[5, 6], [7, 8]], ... [[9, 10], [11, 12]]] >>> image = tf.constant(image) >>> tf.image.transpose(image) <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy= array([[[ 1, 2], [ 5, 6], [ 9, 10]], [[ 3, 4], [ 7, 8], [11, 12]]], dtype=int32)> """ with ops.name_scope(name, 'transpose', [image]): image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) shape = image.get_shape() if shape.ndims is None: rank = array_ops.rank(image) def f_rank3(): return array_ops.transpose(image, [1, 0, 2], name=name) def f_rank4(): return array_ops.transpose(image, [0, 2, 1, 3], name=name) return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4) elif shape.ndims == 3: return array_ops.transpose(image, [1, 0, 2], name=name) elif shape.ndims == 4: return array_ops.transpose(image, [0, 2, 1, 3], name=name) else: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % shape) @tf_export('image.central_crop') @dispatch.add_dispatch_support def central_crop(image, central_fraction): """Crop the central region of the image(s). Remove the outer parts of an image but retain the central region of the image along each dimension. If we specify central_fraction = 0.5, this function returns the region marked with "X" in the below diagram. -------- | | | XXXX | | XXXX | | | where "X" is the central 50% of the image. -------- This function works on either a single image (`image` is a 3-D Tensor), or a batch of images (`image` is a 4-D Tensor). Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0], ... [7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]], ... [[13.0, 14.0, 15.0], ... [16.0, 17.0, 18.0], ... [19.0, 20.0, 21.0], ... [22.0, 23.0, 24.0]], ... [[25.0, 26.0, 27.0], ... [28.0, 29.0, 30.0], ... [31.0, 32.0, 33.0], ... [34.0, 35.0, 36.0]], ... [[37.0, 38.0, 39.0], ... [40.0, 41.0, 42.0], ... [43.0, 44.0, 45.0], ... [46.0, 47.0, 48.0]]] >>> tf.image.central_crop(x, 0.5) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[16., 17., 18.], [19., 20., 21.]], [[28., 29., 30.], [31., 32., 33.]]], dtype=float32)> Args: image: Either a 3-D float Tensor of shape [height, width, depth], or a 4-D Tensor of shape [batch_size, height, width, depth]. central_fraction: float (0, 1], fraction of size to crop Raises: ValueError: if central_crop_fraction is not within (0, 1]. Returns: 3-D / 4-D float Tensor, as per the input. """ with ops.name_scope(None, 'central_crop', [image]): image = ops.convert_to_tensor(image, name='image') central_fraction_static = tensor_util.constant_value(central_fraction) if central_fraction_static is not None: if central_fraction_static <= 0.0 or central_fraction_static > 1.0: raise ValueError('central_fraction must be within (0, 1]') if central_fraction_static == 1.0: return image else: assert_ops = _assert( math_ops.logical_or(central_fraction > 0.0, central_fraction <= 1.0), ValueError, 'central_fraction must be within (0, 1]') image = control_flow_ops.with_dependencies(assert_ops, image) _AssertAtLeast3DImage(image) rank = image.get_shape().ndims if rank != 3 and rank != 4: raise ValueError('`image` should either be a Tensor with rank = 3 or ' 'rank = 4. Had rank = {}.'.format(rank)) # Helper method to return the `idx`-th dimension of `tensor`, along with # a boolean signifying if the dimension is dynamic. def _get_dim(tensor, idx): static_shape = tensor.get_shape().dims[idx].value if static_shape is not None: return static_shape, False return array_ops.shape(tensor)[idx], True # Get the height, width, depth (and batch size, if the image is a 4-D # tensor). if rank == 3: img_h, dynamic_h = _get_dim(image, 0) img_w, dynamic_w = _get_dim(image, 1) img_d = image.get_shape()[2] else: img_bs = image.get_shape()[0] img_h, dynamic_h = _get_dim(image, 1) img_w, dynamic_w = _get_dim(image, 2) img_d = image.get_shape()[3] dynamic_h = dynamic_h or (central_fraction_static is None) dynamic_w = dynamic_w or (central_fraction_static is None) # Compute the bounding boxes for the crop. The type and value of the # bounding boxes depend on the `image` tensor's rank and whether / not the # dimensions are statically defined. if dynamic_h: img_hd = math_ops.cast(img_h, dtypes.float64) bbox_h_start = math_ops.cast( (img_hd - img_hd * math_ops.cast(central_fraction, dtypes.float64)) / 2, dtypes.int32) else: img_hd = float(img_h) bbox_h_start = int((img_hd - img_hd * central_fraction_static) / 2) if dynamic_w: img_wd = math_ops.cast(img_w, dtypes.float64) bbox_w_start = math_ops.cast( (img_wd - img_wd * math_ops.cast(central_fraction, dtypes.float64)) / 2, dtypes.int32) else: img_wd = float(img_w) bbox_w_start = int((img_wd - img_wd * central_fraction_static) / 2) bbox_h_size = img_h - bbox_h_start * 2 bbox_w_size = img_w - bbox_w_start * 2 if rank == 3: bbox_begin = array_ops.stack([bbox_h_start, bbox_w_start, 0]) bbox_size = array_ops.stack([bbox_h_size, bbox_w_size, -1]) else: bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0]) bbox_size = array_ops.stack([-1, bbox_h_size, bbox_w_size, -1]) image = array_ops.slice(image, bbox_begin, bbox_size) # Reshape the `image` tensor to the desired size. if rank == 3: image.set_shape([ None if dynamic_h else bbox_h_size, None if dynamic_w else bbox_w_size, img_d ]) else: image.set_shape([ img_bs, None if dynamic_h else bbox_h_size, None if dynamic_w else bbox_w_size, img_d ]) return image @tf_export('image.pad_to_bounding_box') @dispatch.add_dispatch_support def pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width): """Pad `image` with zeros to the specified `height` and `width`. Adds `offset_height` rows of zeros on top, `offset_width` columns of zeros on the left, and then pads the image on the bottom and right with zeros until it has dimensions `target_height`, `target_width`. This op does nothing if `offset_*` is zero and the image already has size `target_height` by `target_width`. Usage Example: >>> x = [[[1., 2., 3.], ... [4., 5., 6.]], ... [[7., 8., 9.], ... [10., 11., 12.]]] >>> padded_image = tf.image.pad_to_bounding_box(x, 1, 1, 4, 4) >>> padded_image <tf.Tensor: shape=(4, 4, 3), dtype=float32, numpy= array([[[ 0., 0., 0.], [ 0., 0., 0.], [ 0., 0., 0.], [ 0., 0., 0.]], [[ 0., 0., 0.], [ 1., 2., 3.], [ 4., 5., 6.], [ 0., 0., 0.]], [[ 0., 0., 0.], [ 7., 8., 9.], [10., 11., 12.], [ 0., 0., 0.]], [[ 0., 0., 0.], [ 0., 0., 0.], [ 0., 0., 0.], [ 0., 0., 0.]]], dtype=float32)> Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. offset_height: Number of rows of zeros to add on top. offset_width: Number of columns of zeros to add on the left. target_height: Height of output image. target_width: Width of output image. Returns: If `image` was 4-D, a 4-D float Tensor of shape `[batch, target_height, target_width, channels]` If `image` was 3-D, a 3-D float Tensor of shape `[target_height, target_width, channels]` Raises: ValueError: If the shape of `image` is incompatible with the `offset_*` or `target_*` arguments, or either `offset_height` or `offset_width` is negative. """ with ops.name_scope(None, 'pad_to_bounding_box', [image]): image = ops.convert_to_tensor(image, name='image') is_batch = True image_shape = image.get_shape() if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % image_shape) assert_ops = _CheckAtLeast3DImage(image, require_static=False) batch, height, width, depth = _ImageDimensions(image, rank=4) after_padding_width = target_width - offset_width - width after_padding_height = target_height - offset_height - height assert_ops += _assert(offset_height >= 0, ValueError, 'offset_height must be >= 0') assert_ops += _assert(offset_width >= 0, ValueError, 'offset_width must be >= 0') assert_ops += _assert(after_padding_width >= 0, ValueError, 'width must be <= target - offset') assert_ops += _assert(after_padding_height >= 0, ValueError, 'height must be <= target - offset') image = control_flow_ops.with_dependencies(assert_ops, image) # Do not pad on the depth dimensions. paddings = array_ops.reshape( array_ops.stack([ 0, 0, offset_height, after_padding_height, offset_width, after_padding_width, 0, 0 ]), [4, 2]) padded = array_ops.pad(image, paddings) padded_shape = [ None if _is_tensor(i) else i for i in [batch, target_height, target_width, depth] ] padded.set_shape(padded_shape) if not is_batch: padded = array_ops.squeeze(padded, axis=[0]) return padded @tf_export('image.crop_to_bounding_box') @dispatch.add_dispatch_support def crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width): """Crops an `image` to a specified bounding box. This op cuts a rectangular bounding box out of `image`. The top-left corner of the bounding box is at `offset_height, offset_width` in `image`, and the lower-right corner is at `offset_height + target_height, offset_width + target_width`. Example Usage: >>> image = tf.constant(np.arange(1, 28, dtype=np.float32), shape=[3, 3, 3]) >>> image[:,:,0] # print the first channel of the 3-D tensor <tf.Tensor: shape=(3, 3), dtype=float32, numpy= array([[ 1., 4., 7.], [10., 13., 16.], [19., 22., 25.]], dtype=float32)> >>> cropped_image = tf.image.crop_to_bounding_box(image, 0, 0, 2, 2) >>> cropped_image[:,:,0] # print the first channel of the cropped 3-D tensor <tf.Tensor: shape=(2, 2), dtype=float32, numpy= array([[ 1., 4.], [10., 13.]], dtype=float32)> Args: image: 4-D `Tensor` of shape `[batch, height, width, channels]` or 3-D `Tensor` of shape `[height, width, channels]`. offset_height: Vertical coordinate of the top-left corner of the bounding box in `image`. offset_width: Horizontal coordinate of the top-left corner of the bounding box in `image`. target_height: Height of the bounding box. target_width: Width of the bounding box. Returns: If `image` was 4-D, a 4-D `Tensor` of shape `[batch, target_height, target_width, channels]`. If `image` was 3-D, a 3-D `Tensor` of shape `[target_height, target_width, channels]`. It has the same dtype with `image`. Raises: ValueError: `image` is not a 3-D or 4-D `Tensor`. ValueError: `offset_width < 0` or `offset_height < 0`. ValueError: `target_width <= 0` or `target_width <= 0`. ValueError: `width < offset_width + target_width` or `height < offset_height + target_height`. """ with ops.name_scope(None, 'crop_to_bounding_box', [image]): image = ops.convert_to_tensor(image, name='image') is_batch = True image_shape = image.get_shape() if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % image_shape) assert_ops = _CheckAtLeast3DImage(image, require_static=False) batch, height, width, depth = _ImageDimensions(image, rank=4) assert_ops += _assert(offset_width >= 0, ValueError, 'offset_width must be >= 0.') assert_ops += _assert(offset_height >= 0, ValueError, 'offset_height must be >= 0.') assert_ops += _assert(target_width > 0, ValueError, 'target_width must be > 0.') assert_ops += _assert(target_height > 0, ValueError, 'target_height must be > 0.') assert_ops += _assert(width >= (target_width + offset_width), ValueError, 'width must be >= target + offset.') assert_ops += _assert(height >= (target_height + offset_height), ValueError, 'height must be >= target + offset.') image = control_flow_ops.with_dependencies(assert_ops, image) cropped = array_ops.slice( image, array_ops.stack([0, offset_height, offset_width, 0]), array_ops.stack([array_ops.shape(image)[0], target_height, target_width, array_ops.shape(image)[3]])) cropped_shape = [ None if _is_tensor(i) else i for i in [batch, target_height, target_width, depth] ] cropped.set_shape(cropped_shape) if not is_batch: cropped = array_ops.squeeze(cropped, axis=[0]) return cropped @tf_export( 'image.resize_with_crop_or_pad', v1=['image.resize_with_crop_or_pad', 'image.resize_image_with_crop_or_pad']) @dispatch.add_dispatch_support def resize_image_with_crop_or_pad(image, target_height, target_width): """Crops and/or pads an image to a target width and height. Resizes an image to a target width and height by either centrally cropping the image or padding it evenly with zeros. If `width` or `height` is greater than the specified `target_width` or `target_height` respectively, this op centrally crops along that dimension. For example: >>> image = np.arange(75).reshape(5, 5, 3) # create 3-D image input >>> image[:,:,0] # print first channel just for demo purposes array([[ 0, 3, 6, 9, 12], [15, 18, 21, 24, 27], [30, 33, 36, 39, 42], [45, 48, 51, 54, 57], [60, 63, 66, 69, 72]]) >>> image = tf.image.resize_with_crop_or_pad(image, 3, 3) # crop >>> # print first channel for demo purposes; centrally cropped output >>> image[:,:,0] <tf.Tensor: shape=(3, 3), dtype=int64, numpy= array([[18, 21, 24], [33, 36, 39], [48, 51, 54]])> If `width` or `height` is smaller than the specified `target_width` or `target_height` respectively, this op centrally pads with 0 along that dimension. For example: >>> image = np.arange(1, 28).reshape(3, 3, 3) # create 3-D image input >>> image[:,:,0] # print first channel just for demo purposes array([[ 1, 4, 7], [10, 13, 16], [19, 22, 25]]) >>> image = tf.image.resize_with_crop_or_pad(image, 5, 5) # pad >>> # print first channel for demo purposes; we should see 0 paddings >>> image[:,:,0] <tf.Tensor: shape=(5, 5), dtype=int64, numpy= array([[ 0, 0, 0, 0, 0], [ 0, 1, 4, 7, 0], [ 0, 10, 13, 16, 0], [ 0, 19, 22, 25, 0], [ 0, 0, 0, 0, 0]])> Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Cropped and/or padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ with ops.name_scope(None, 'resize_image_with_crop_or_pad', [image]): image = ops.convert_to_tensor(image, name='image') image_shape = image.get_shape() is_batch = True if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % image_shape) assert_ops = _CheckAtLeast3DImage(image, require_static=False) assert_ops += _assert(target_width > 0, ValueError, 'target_width must be > 0.') assert_ops += _assert(target_height > 0, ValueError, 'target_height must be > 0.') image = control_flow_ops.with_dependencies(assert_ops, image) # `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks. # Make sure our checks come first, so that error messages are clearer. if _is_tensor(target_height): target_height = control_flow_ops.with_dependencies( assert_ops, target_height) if _is_tensor(target_width): target_width = control_flow_ops.with_dependencies(assert_ops, target_width) def max_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.maximum(x, y) else: return max(x, y) def min_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.minimum(x, y) else: return min(x, y) def equal_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.equal(x, y) else: return x == y _, height, width, _ = _ImageDimensions(image, rank=4) width_diff = target_width - width offset_crop_width = max_(-width_diff // 2, 0) offset_pad_width = max_(width_diff // 2, 0) height_diff = target_height - height offset_crop_height = max_(-height_diff // 2, 0) offset_pad_height = max_(height_diff // 2, 0) # Maybe crop if needed. cropped = crop_to_bounding_box(image, offset_crop_height, offset_crop_width, min_(target_height, height), min_(target_width, width)) # Maybe pad if needed. resized = pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width, target_height, target_width) # In theory all the checks below are redundant. if resized.get_shape().ndims is None: raise ValueError('resized contains no shape.') _, resized_height, resized_width, _ = _ImageDimensions(resized, rank=4) assert_ops = [] assert_ops += _assert( equal_(resized_height, target_height), ValueError, 'resized height is not correct.') assert_ops += _assert( equal_(resized_width, target_width), ValueError, 'resized width is not correct.') resized = control_flow_ops.with_dependencies(assert_ops, resized) if not is_batch: resized = array_ops.squeeze(resized, axis=[0]) return resized @tf_export(v1=['image.ResizeMethod']) class ResizeMethodV1(object): """See `v1.image.resize` for details.""" BILINEAR = 0 NEAREST_NEIGHBOR = 1 BICUBIC = 2 AREA = 3 @tf_export('image.ResizeMethod', v1=[]) class ResizeMethod(object): """See `tf.image.resize` for details.""" BILINEAR = 'bilinear' NEAREST_NEIGHBOR = 'nearest' BICUBIC = 'bicubic' AREA = 'area' LANCZOS3 = 'lanczos3' LANCZOS5 = 'lanczos5' GAUSSIAN = 'gaussian' MITCHELLCUBIC = 'mitchellcubic' def _resize_images_common(images, resizer_fn, size, preserve_aspect_ratio, name, skip_resize_if_same): """Core functionality for v1 and v2 resize functions.""" with ops.name_scope(name, 'resize', [images, size]): images = ops.convert_to_tensor(images, name='images') if images.get_shape().ndims is None: raise ValueError('\'images\' contains no shape.') # TODO(shlens): Migrate this functionality to the underlying Op's. is_batch = True if images.get_shape().ndims == 3: is_batch = False images = array_ops.expand_dims(images, 0) elif images.get_shape().ndims != 4: raise ValueError('\'images\' must have either 3 or 4 dimensions.') _, height, width, _ = images.get_shape().as_list() try: size = ops.convert_to_tensor(size, dtypes.int32, name='size') except (TypeError, ValueError): raise ValueError('\'size\' must be a 1-D int32 Tensor') if not size.get_shape().is_compatible_with([2]): raise ValueError('\'size\' must be a 1-D Tensor of 2 elements: ' 'new_height, new_width') if preserve_aspect_ratio: # Get the current shapes of the image, even if dynamic. _, current_height, current_width, _ = _ImageDimensions(images, rank=4) # do the computation to find the right scale and height/width. scale_factor_height = ( math_ops.cast(size[0], dtypes.float32) / math_ops.cast(current_height, dtypes.float32)) scale_factor_width = ( math_ops.cast(size[1], dtypes.float32) / math_ops.cast(current_width, dtypes.float32)) scale_factor = math_ops.minimum(scale_factor_height, scale_factor_width) scaled_height_const = math_ops.cast( math_ops.round(scale_factor * math_ops.cast(current_height, dtypes.float32)), dtypes.int32) scaled_width_const = math_ops.cast( math_ops.round(scale_factor * math_ops.cast(current_width, dtypes.float32)), dtypes.int32) # NOTE: Reset the size and other constants used later. size = ops.convert_to_tensor([scaled_height_const, scaled_width_const], dtypes.int32, name='size') size_const_as_shape = tensor_util.constant_value_as_shape(size) new_height_const = tensor_shape.dimension_at_index(size_const_as_shape, 0).value new_width_const = tensor_shape.dimension_at_index(size_const_as_shape, 1).value # If we can determine that the height and width will be unmodified by this # transformation, we avoid performing the resize. if skip_resize_if_same and all( x is not None for x in [new_width_const, width, new_height_const, height]) and ( width == new_width_const and height == new_height_const): if not is_batch: images = array_ops.squeeze(images, axis=[0]) return images images = resizer_fn(images, size) # NOTE(mrry): The shape functions for the resize ops cannot unpack # the packed values in `new_size`, so set the shape here. images.set_shape([None, new_height_const, new_width_const, None]) if not is_batch: images = array_ops.squeeze(images, axis=[0]) return images @tf_export(v1=['image.resize_images', 'image.resize']) @dispatch.add_dispatch_support def resize_images(images, size, method=ResizeMethodV1.BILINEAR, align_corners=False, preserve_aspect_ratio=False, name=None): """Resize `images` to `size` using the specified `method`. Resized images will be distorted if their original aspect ratio is not the same as `size`. To avoid distortions see `tf.image.resize_with_pad` or `tf.image.resize_with_crop_or_pad`. The `method` can be one of: * <b>`tf.image.ResizeMethod.BILINEAR`</b>: [Bilinear interpolation.]( https://en.wikipedia.org/wiki/Bilinear_interpolation) * <b>`tf.image.ResizeMethod.NEAREST_NEIGHBOR`</b>: [ Nearest neighbor interpolation.]( https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation) * <b>`tf.image.ResizeMethod.BICUBIC`</b>: [Bicubic interpolation.]( https://en.wikipedia.org/wiki/Bicubic_interpolation) * <b>`tf.image.ResizeMethod.AREA`</b>: Area interpolation. The return value has the same type as `images` if `method` is `tf.image.ResizeMethod.NEAREST_NEIGHBOR`. It will also have the same type as `images` if the size of `images` can be statically determined to be the same as `size`, because `images` is returned in this case. Otherwise, the return value has type `float32`. Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. method: ResizeMethod. Defaults to `tf.image.ResizeMethod.BILINEAR`. align_corners: bool. If True, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to `False`. preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set, then `images` will be resized to a size that fits in `size` while preserving the aspect ratio of the original image. Scales up the image if `size` is bigger than the current size of the `image`. Defaults to False. name: A name for this operation (optional). Raises: ValueError: if the shape of `images` is incompatible with the shape arguments to this function ValueError: if `size` has invalid shape or type. ValueError: if an unsupported resize method is specified. Returns: If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def resize_fn(images_t, new_size): """Legacy resize core function, passed to _resize_images_common.""" if method == ResizeMethodV1.BILINEAR or method == ResizeMethod.BILINEAR: return gen_image_ops.resize_bilinear( images_t, new_size, align_corners=align_corners) elif (method == ResizeMethodV1.NEAREST_NEIGHBOR or method == ResizeMethod.NEAREST_NEIGHBOR): return gen_image_ops.resize_nearest_neighbor( images_t, new_size, align_corners=align_corners) elif method == ResizeMethodV1.BICUBIC or method == ResizeMethod.BICUBIC: return gen_image_ops.resize_bicubic( images_t, new_size, align_corners=align_corners) elif method == ResizeMethodV1.AREA or method == ResizeMethod.AREA: return gen_image_ops.resize_area( images_t, new_size, align_corners=align_corners) else: raise ValueError('Resize method is not implemented: {}'.format(method)) return _resize_images_common( images, resize_fn, size, preserve_aspect_ratio=preserve_aspect_ratio, name=name, skip_resize_if_same=True) @tf_export('image.resize', v1=[]) @dispatch.add_dispatch_support def resize_images_v2(images, size, method=ResizeMethod.BILINEAR, preserve_aspect_ratio=False, antialias=False, name=None): """Resize `images` to `size` using the specified `method`. Resized images will be distorted if their original aspect ratio is not the same as `size`. To avoid distortions see `tf.image.resize_with_pad`. >>> image = tf.constant([ ... [1,0,0,0,0], ... [0,1,0,0,0], ... [0,0,1,0,0], ... [0,0,0,1,0], ... [0,0,0,0,1], ... ]) >>> # Add "batch" and "channels" dimensions >>> image = image[tf.newaxis, ..., tf.newaxis] >>> image.shape.as_list() # [batch, height, width, channels] [1, 5, 5, 1] >>> tf.image.resize(image, [3,5])[0,...,0].numpy() array([[0.6666667, 0.3333333, 0. , 0. , 0. ], [0. , 0. , 1. , 0. , 0. ], [0. , 0. , 0. , 0.3333335, 0.6666665]], dtype=float32) It works equally well with a single image instead of a batch of images: >>> tf.image.resize(image[0], [3,5]).shape.as_list() [3, 5, 1] When `antialias` is true, the sampling filter will anti-alias the input image as well as interpolate. When downsampling an image with [anti-aliasing]( https://en.wikipedia.org/wiki/Spatial_anti-aliasing) the sampling filter kernel is scaled in order to properly anti-alias the input image signal. `antialias` has no effect when upsampling an image: >>> a = tf.image.resize(image, [5,10]) >>> b = tf.image.resize(image, [5,10], antialias=True) >>> tf.reduce_max(abs(a - b)).numpy() 0.0 The `method` argument expects an item from the `image.ResizeMethod` enum, or the string equivalent. The options are: * <b>`bilinear`</b>: [Bilinear interpolation.]( https://en.wikipedia.org/wiki/Bilinear_interpolation) If `antialias` is true, becomes a hat/tent filter function with radius 1 when downsampling. * <b>`lanczos3`</b>: [Lanczos kernel]( https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 3. High-quality practical filter but may have some ringing, especially on synthetic images. * <b>`lanczos5`</b>: [Lanczos kernel] ( https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 5. Very-high-quality filter but may have stronger ringing. * <b>`bicubic`</b>: [Cubic interpolant]( https://en.wikipedia.org/wiki/Bicubic_interpolation) of Keys. Equivalent to Catmull-Rom kernel. Reasonably good quality and faster than Lanczos3Kernel, particularly when upsampling. * <b>`gaussian`</b>: [Gaussian kernel]( https://en.wikipedia.org/wiki/Gaussian_filter) with radius 3, sigma = 1.5 / 3.0. * <b>`nearest`</b>: [Nearest neighbor interpolation.]( https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation) `antialias` has no effect when used with nearest neighbor interpolation. * <b>`area`</b>: Anti-aliased resampling with area interpolation. `antialias` has no effect when used with area interpolation; it always anti-aliases. * <b>`mitchellcubic`</b>: Mitchell-Netravali Cubic non-interpolating filter. For synthetic images (especially those lacking proper prefiltering), less ringing than Keys cubic kernel but less sharp. Note: Near image edges the filtering kernel may be partially outside the image boundaries. For these pixels, only input pixels inside the image will be included in the filter sum, and the output value will be appropriately normalized. The return value has type `float32`, unless the `method` is `ResizeMethod.NEAREST_NEIGHBOR`, then the return dtype is the dtype of `images`: >>> nn = tf.image.resize(image, [5,7], method='nearest') >>> nn[0,...,0].numpy() array([[1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 1]], dtype=int32) With `preserve_aspect_ratio=True`, the aspect ratio is preserved, so `size` is the maximum for each dimension: >>> max_10_20 = tf.image.resize(image, [10,20], preserve_aspect_ratio=True) >>> max_10_20.shape.as_list() [1, 10, 10, 1] Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. method: An `image.ResizeMethod`, or string equivalent. Defaults to `bilinear`. preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set, then `images` will be resized to a size that fits in `size` while preserving the aspect ratio of the original image. Scales up the image if `size` is bigger than the current size of the `image`. Defaults to False. antialias: Whether to use an anti-aliasing filter when downsampling an image. name: A name for this operation (optional). Raises: ValueError: if the shape of `images` is incompatible with the shape arguments to this function ValueError: if `size` has an invalid shape or type. ValueError: if an unsupported resize method is specified. Returns: If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def resize_fn(images_t, new_size): """Resize core function, passed to _resize_images_common.""" scale_and_translate_methods = [ ResizeMethod.LANCZOS3, ResizeMethod.LANCZOS5, ResizeMethod.GAUSSIAN, ResizeMethod.MITCHELLCUBIC ] def resize_with_scale_and_translate(method): scale = ( math_ops.cast(new_size, dtype=dtypes.float32) / math_ops.cast(array_ops.shape(images_t)[1:3], dtype=dtypes.float32)) return gen_image_ops.scale_and_translate( images_t, new_size, scale, array_ops.zeros([2]), kernel_type=method, antialias=antialias) if method == ResizeMethod.BILINEAR: if antialias: return resize_with_scale_and_translate('triangle') else: return gen_image_ops.resize_bilinear( images_t, new_size, half_pixel_centers=True) elif method == ResizeMethod.NEAREST_NEIGHBOR: return gen_image_ops.resize_nearest_neighbor( images_t, new_size, half_pixel_centers=True) elif method == ResizeMethod.BICUBIC: if antialias: return resize_with_scale_and_translate('keyscubic') else: return gen_image_ops.resize_bicubic( images_t, new_size, half_pixel_centers=True) elif method == ResizeMethod.AREA: return gen_image_ops.resize_area(images_t, new_size) elif method in scale_and_translate_methods: return resize_with_scale_and_translate(method) else: raise ValueError('Resize method is not implemented: {}'.format(method)) return _resize_images_common( images, resize_fn, size, preserve_aspect_ratio=preserve_aspect_ratio, name=name, skip_resize_if_same=False) def _resize_image_with_pad_common(image, target_height, target_width, resize_fn): """Core functionality for v1 and v2 resize_image_with_pad functions.""" with ops.name_scope(None, 'resize_image_with_pad', [image]): image = ops.convert_to_tensor(image, name='image') image_shape = image.get_shape() is_batch = True if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % image_shape) assert_ops = _CheckAtLeast3DImage(image, require_static=False) assert_ops += _assert(target_width > 0, ValueError, 'target_width must be > 0.') assert_ops += _assert(target_height > 0, ValueError, 'target_height must be > 0.') image = control_flow_ops.with_dependencies(assert_ops, image) def max_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.maximum(x, y) else: return max(x, y) _, height, width, _ = _ImageDimensions(image, rank=4) # convert values to float, to ease divisions f_height = math_ops.cast(height, dtype=dtypes.float32) f_width = math_ops.cast(width, dtype=dtypes.float32) f_target_height = math_ops.cast(target_height, dtype=dtypes.float32) f_target_width = math_ops.cast(target_width, dtype=dtypes.float32) # Find the ratio by which the image must be adjusted # to fit within the target ratio = max_(f_width / f_target_width, f_height / f_target_height) resized_height_float = f_height / ratio resized_width_float = f_width / ratio resized_height = math_ops.cast( math_ops.floor(resized_height_float), dtype=dtypes.int32) resized_width = math_ops.cast( math_ops.floor(resized_width_float), dtype=dtypes.int32) padding_height = (f_target_height - resized_height_float) / 2 padding_width = (f_target_width - resized_width_float) / 2 f_padding_height = math_ops.floor(padding_height) f_padding_width = math_ops.floor(padding_width) p_height = max_(0, math_ops.cast(f_padding_height, dtype=dtypes.int32)) p_width = max_(0, math_ops.cast(f_padding_width, dtype=dtypes.int32)) # Resize first, then pad to meet requested dimensions resized = resize_fn(image, [resized_height, resized_width]) padded = pad_to_bounding_box(resized, p_height, p_width, target_height, target_width) if padded.get_shape().ndims is None: raise ValueError('padded contains no shape.') _ImageDimensions(padded, rank=4) if not is_batch: padded = array_ops.squeeze(padded, axis=[0]) return padded @tf_export(v1=['image.resize_image_with_pad']) @dispatch.add_dispatch_support def resize_image_with_pad_v1(image, target_height, target_width, method=ResizeMethodV1.BILINEAR, align_corners=False): """Resizes and pads an image to a target width and height. Resizes an image to a target width and height by keeping the aspect ratio the same without distortion. If the target dimensions don't match the image dimensions, the image is resized and then padded with zeroes to match requested dimensions. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. method: Method to use for resizing image. See `resize_images()` align_corners: bool. If True, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to `False`. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Resized and padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def _resize_fn(im, new_size): return resize_images(im, new_size, method, align_corners=align_corners) return _resize_image_with_pad_common(image, target_height, target_width, _resize_fn) @tf_export('image.resize_with_pad', v1=[]) @dispatch.add_dispatch_support def resize_image_with_pad_v2(image, target_height, target_width, method=ResizeMethod.BILINEAR, antialias=False): """Resizes and pads an image to a target width and height. Resizes an image to a target width and height by keeping the aspect ratio the same without distortion. If the target dimensions don't match the image dimensions, the image is resized and then padded with zeroes to match requested dimensions. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. method: Method to use for resizing image. See `image.resize()` antialias: Whether to use anti-aliasing when resizing. See 'image.resize()'. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Resized and padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def _resize_fn(im, new_size): return resize_images_v2(im, new_size, method, antialias=antialias) return _resize_image_with_pad_common(image, target_height, target_width, _resize_fn) @tf_export('image.per_image_standardization') @dispatch.add_dispatch_support def per_image_standardization(image): """Linearly scales each image in `image` to have mean 0 and variance 1. For each 3-D image `x` in `image`, computes `(x - mean) / adjusted_stddev`, where - `mean` is the average of all values in `x` - `adjusted_stddev = max(stddev, 1.0/sqrt(N))` is capped away from 0 to protect against division by 0 when handling uniform images - `N` is the number of elements in `x` - `stddev` is the standard deviation of all values in `x` Example Usage: >>> image = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3]) >>> image # 3-D tensor <tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy= array([[[ 1, 2, 3], [ 4, 5, 6]], [[ 7, 8, 9], [10, 11, 12]]], dtype=int32)> >>> new_image = tf.image.per_image_standardization(image) >>> new_image # 3-D tensor with mean ~= 0 and variance ~= 1 <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[-1.593255 , -1.3035723 , -1.0138896 ], [-0.7242068 , -0.4345241 , -0.14484136]], [[ 0.14484136, 0.4345241 , 0.7242068 ], [ 1.0138896 , 1.3035723 , 1.593255 ]]], dtype=float32)> Args: image: An n-D `Tensor` with at least 3 dimensions, the last 3 of which are the dimensions of each image. Returns: A `Tensor` with the same shape as `image` and its dtype is `float32`. Raises: ValueError: The shape of `image` has fewer than 3 dimensions. """ with ops.name_scope(None, 'per_image_standardization', [image]) as scope: image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) image = math_ops.cast(image, dtype=dtypes.float32) num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:]) image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True) # Apply a minimum normalization that protects us against uniform images. stddev = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True) min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32)) adjusted_stddev = math_ops.maximum(stddev, min_stddev) image -= image_mean image = math_ops.divide(image, adjusted_stddev, name=scope) return image @tf_export('image.random_brightness') @dispatch.add_dispatch_support def random_brightness(image, max_delta, seed=None): """Adjust the brightness of images by a random factor. Equivalent to `adjust_brightness()` using a `delta` randomly picked in the interval `[-max_delta, max_delta)`. For producing deterministic results given a `seed` value, use `tf.image.stateless_random_brightness`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: An image or images to adjust. max_delta: float, must be non-negative. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_brightness(x, 0.2) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...> Returns: The brightness-adjusted image(s). Raises: ValueError: if `max_delta` is negative. """ if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed) return adjust_brightness(image, delta) @tf_export('image.stateless_random_brightness', v1=[]) @dispatch.add_dispatch_support def stateless_random_brightness(image, max_delta, seed): """Adjust the brightness of images by a random factor deterministically. Equivalent to `adjust_brightness()` using a `delta` randomly picked in the interval `[-max_delta, max_delta)`. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> seed = (1, 2) >>> tf.image.stateless_random_brightness(x, 0.2, seed) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 1.1376241, 2.1376243, 3.1376243], [ 4.1376243, 5.1376243, 6.1376243]], [[ 7.1376243, 8.137624 , 9.137624 ], [10.137624 , 11.137624 , 12.137624 ]]], dtype=float32)> Args: image: An image or images to adjust. max_delta: float, must be non-negative. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Returns: The brightness-adjusted image(s). Raises: ValueError: if `max_delta` is negative. """ if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = stateless_random_ops.stateless_random_uniform( shape=[], minval=-max_delta, maxval=max_delta, seed=seed) return adjust_brightness(image, delta) @tf_export('image.random_contrast') @dispatch.add_dispatch_support def random_contrast(image, lower, upper, seed=None): """Adjust the contrast of an image or images by a random factor. Equivalent to `adjust_contrast()` but uses a `contrast_factor` randomly picked in the interval `[lower, upper)`. For producing deterministic results given a `seed` value, use `tf.image.stateless_random_contrast`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: An image tensor with 3 or more dimensions. lower: float. Lower bound for the random contrast factor. upper: float. Upper bound for the random contrast factor. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_contrast(x, 0.2, 0.5) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...> Returns: The contrast-adjusted image(s). Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError('upper must be > lower.') if lower < 0: raise ValueError('lower must be non-negative.') contrast_factor = random_ops.random_uniform([], lower, upper, seed=seed) return adjust_contrast(image, contrast_factor) @tf_export('image.stateless_random_contrast', v1=[]) @dispatch.add_dispatch_support def stateless_random_contrast(image, lower, upper, seed): """Adjust the contrast of images by a random factor deterministically. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). Args: image: An image tensor with 3 or more dimensions. lower: float. Lower bound for the random contrast factor. upper: float. Upper bound for the random contrast factor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> seed = (1, 2) >>> tf.image.stateless_random_contrast(x, 0.2, 0.5, seed) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[3.4605184, 4.4605184, 5.4605184], [4.820173 , 5.820173 , 6.820173 ]], [[6.179827 , 7.179827 , 8.179828 ], [7.5394816, 8.539482 , 9.539482 ]]], dtype=float32)> Returns: The contrast-adjusted image(s). Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError('upper must be > lower.') if lower < 0: raise ValueError('lower must be non-negative.') contrast_factor = stateless_random_ops.stateless_random_uniform( shape=[], minval=lower, maxval=upper, seed=seed) return adjust_contrast(image, contrast_factor) @tf_export('image.adjust_brightness') @dispatch.add_dispatch_support def adjust_brightness(image, delta): """Adjust the brightness of RGB or Grayscale images. This is a convenience method that converts RGB images to float representation, adjusts their brightness, and then converts them back to the original data type. If several adjustments are chained, it is advisable to minimize the number of redundant conversions. The value `delta` is added to all components of the tensor `image`. `image` is converted to `float` and scaled appropriately if it is in fixed-point representation, and `delta` is converted to the same data type. For regular images, `delta` should be in the range `(-1,1)`, as it is added to the image in floating point representation, where pixel values are in the `[0,1)` range. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_brightness(x, delta=0.1) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 1.1, 2.1, 3.1], [ 4.1, 5.1, 6.1]], [[ 7.1, 8.1, 9.1], [10.1, 11.1, 12.1]]], dtype=float32)> Args: image: RGB image or images to adjust. delta: A scalar. Amount to add to the pixel values. Returns: A brightness-adjusted tensor of the same shape and type as `image`. """ with ops.name_scope(None, 'adjust_brightness', [image, delta]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in [dtypes.float16, dtypes.float32]: flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) adjusted = math_ops.add( flt_image, math_ops.cast(delta, flt_image.dtype), name=name) return convert_image_dtype(adjusted, orig_dtype, saturate=True) @tf_export('image.adjust_contrast') @dispatch.add_dispatch_support def adjust_contrast(images, contrast_factor): """Adjust contrast of RGB or grayscale images. This is a convenience method that converts RGB images to float representation, adjusts their contrast, and then converts them back to the original data type. If several adjustments are chained, it is advisable to minimize the number of redundant conversions. `images` is a tensor of at least 3 dimensions. The last 3 dimensions are interpreted as `[height, width, channels]`. The other dimensions only represent a collection of images, such as `[batch, height, width, channels].` Contrast is adjusted independently for each channel of each image. For each channel, this Op computes the mean of the image pixels in the channel and then adjusts each component `x` of each pixel to `(x - mean) * contrast_factor + mean`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_contrast(x, 2) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[-3.5, -2.5, -1.5], [ 2.5, 3.5, 4.5]], [[ 8.5, 9.5, 10.5], [14.5, 15.5, 16.5]]], dtype=float32)> Args: images: Images to adjust. At least 3-D. contrast_factor: A float multiplier for adjusting contrast. Returns: The contrast-adjusted image or images. """ with ops.name_scope(None, 'adjust_contrast', [images, contrast_factor]) as name: images = ops.convert_to_tensor(images, name='images') # Remember original dtype to so we can convert back if needed orig_dtype = images.dtype if orig_dtype in (dtypes.float16, dtypes.float32): flt_images = images else: flt_images = convert_image_dtype(images, dtypes.float32) adjusted = gen_image_ops.adjust_contrastv2( flt_images, contrast_factor=contrast_factor, name=name) return convert_image_dtype(adjusted, orig_dtype, saturate=True) @tf_export('image.adjust_gamma') @dispatch.add_dispatch_support def adjust_gamma(image, gamma=1, gain=1): """Performs [Gamma Correction](http://en.wikipedia.org/wiki/Gamma_correction). on the input image. Also known as Power Law Transform. This function converts the input images at first to float representation, then transforms them pixelwise according to the equation `Out = gain * In**gamma`, and then converts the back to the original data type. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_gamma(x, 0.2) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[1. , 1.1486983, 1.2457309], [1.319508 , 1.3797297, 1.4309691]], [[1.4757731, 1.5157166, 1.5518456], [1.5848932, 1.6153942, 1.6437519]]], dtype=float32)> Args: image : RGB image or images to adjust. gamma : A scalar or tensor. Non-negative real number. gain : A scalar or tensor. The constant multiplier. Returns: A Tensor. A Gamma-adjusted tensor of the same shape and type as `image`. Raises: ValueError: If gamma is negative. Notes: For gamma greater than 1, the histogram will shift towards left and the output image will be darker than the input image. For gamma less than 1, the histogram will shift towards right and the output image will be brighter than the input image. References: [Wikipedia](http://en.wikipedia.org/wiki/Gamma_correction) """ with ops.name_scope(None, 'adjust_gamma', [image, gamma, gain]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in [dtypes.float16, dtypes.float32]: flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) assert_op = _assert(gamma >= 0, ValueError, 'Gamma should be a non-negative real number.') if assert_op: gamma = control_flow_ops.with_dependencies(assert_op, gamma) # According to the definition of gamma correction. adjusted_img = gain * flt_image**gamma return convert_image_dtype(adjusted_img, orig_dtype, saturate=True) @tf_export('image.convert_image_dtype') @dispatch.add_dispatch_support def convert_image_dtype(image, dtype, saturate=False, name=None): """Convert `image` to `dtype`, scaling its values if needed. The operation supports data types (for `image` and `dtype`) of `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`, `float16`, `float32`, `float64`, `bfloat16`. Images that are represented using floating point values are expected to have values in the range [0,1). Image data stored in integer data types are expected to have values in the range `[0,MAX]`, where `MAX` is the largest positive representable number for the data type. This op converts between data types, scaling the values appropriately before casting. Usage Example: >>> x = [[[1, 2, 3], [4, 5, 6]], ... [[7, 8, 9], [10, 11, 12]]] >>> x_int8 = tf.convert_to_tensor(x, dtype=tf.int8) >>> tf.image.convert_image_dtype(x_int8, dtype=tf.float16, saturate=False) <tf.Tensor: shape=(2, 2, 3), dtype=float16, numpy= array([[[0.00787, 0.01575, 0.02362], [0.0315 , 0.03937, 0.04724]], [[0.0551 , 0.063 , 0.07086], [0.07874, 0.0866 , 0.0945 ]]], dtype=float16)> Converting integer types to floating point types returns normalized floating point values in the range [0, 1); the values are normalized by the `MAX` value of the input dtype. Consider the following two examples: >>> a = [[[1], [2]], [[3], [4]]] >>> a_int8 = tf.convert_to_tensor(a, dtype=tf.int8) >>> tf.image.convert_image_dtype(a_int8, dtype=tf.float32) <tf.Tensor: shape=(2, 2, 1), dtype=float32, numpy= array([[[0.00787402], [0.01574803]], [[0.02362205], [0.03149606]]], dtype=float32)> >>> a_int32 = tf.convert_to_tensor(a, dtype=tf.int32) >>> tf.image.convert_image_dtype(a_int32, dtype=tf.float32) <tf.Tensor: shape=(2, 2, 1), dtype=float32, numpy= array([[[4.6566129e-10], [9.3132257e-10]], [[1.3969839e-09], [1.8626451e-09]]], dtype=float32)> Despite having identical values of `a` and output dtype of `float32`, the outputs differ due to the different input dtypes (`int8` vs. `int32`). This is, again, because the values are normalized by the `MAX` value of the input dtype. Note that converting floating point values to integer type may lose precision. In the example below, an image tensor `b` of dtype `float32` is converted to `int8` and back to `float32`. The final output, however, is different from the original input `b` due to precision loss. >>> b = [[[0.12], [0.34]], [[0.56], [0.78]]] >>> b_float32 = tf.convert_to_tensor(b, dtype=tf.float32) >>> b_int8 = tf.image.convert_image_dtype(b_float32, dtype=tf.int8) >>> tf.image.convert_image_dtype(b_int8, dtype=tf.float32) <tf.Tensor: shape=(2, 2, 1), dtype=float32, numpy= array([[[0.11811024], [0.33858266]], [[0.5590551 ], [0.77952754]]], dtype=float32)> Scaling up from an integer type (input dtype) to another integer type (output dtype) will not map input dtype's `MAX` to output dtype's `MAX` but converting back and forth should result in no change. For example, as shown below, the `MAX` value of int8 (=127) is not mapped to the `MAX` value of int16 (=32,767) but, when scaled back, we get the same, original values of `c`. >>> c = [[[1], [2]], [[127], [127]]] >>> c_int8 = tf.convert_to_tensor(c, dtype=tf.int8) >>> c_int16 = tf.image.convert_image_dtype(c_int8, dtype=tf.int16) >>> print(c_int16) tf.Tensor( [[[ 256] [ 512]] [[32512] [32512]]], shape=(2, 2, 1), dtype=int16) >>> c_int8_back = tf.image.convert_image_dtype(c_int16, dtype=tf.int8) >>> print(c_int8_back) tf.Tensor( [[[ 1] [ 2]] [[127] [127]]], shape=(2, 2, 1), dtype=int8) Scaling down from an integer type to another integer type can be a lossy conversion. Notice in the example below that converting `int16` to `uint8` and back to `int16` has lost precision. >>> d = [[[1000], [2000]], [[3000], [4000]]] >>> d_int16 = tf.convert_to_tensor(d, dtype=tf.int16) >>> d_uint8 = tf.image.convert_image_dtype(d_int16, dtype=tf.uint8) >>> d_int16_back = tf.image.convert_image_dtype(d_uint8, dtype=tf.int16) >>> print(d_int16_back) tf.Tensor( [[[ 896] [1920]] [[2944] [3968]]], shape=(2, 2, 1), dtype=int16) Note that converting from floating point inputs to integer types may lead to over/underflow problems. Set saturate to `True` to avoid such problem in problematic conversions. If enabled, saturation will clip the output into the allowed range before performing a potentially dangerous cast (and only before performing such a cast, i.e., when casting from a floating point to an integer type, and when casting from a signed to an unsigned type; `saturate` has no effect on casts between floats, or on casts that increase the type's range). Args: image: An image. dtype: A `DType` to convert `image` to. saturate: If `True`, clip the input before casting (if necessary). name: A name for this operation (optional). Returns: `image`, converted to `dtype`. Raises: AttributeError: Raises an attribute error when dtype is neither float nor integer """ image = ops.convert_to_tensor(image, name='image') dtype = dtypes.as_dtype(dtype) if not dtype.is_floating and not dtype.is_integer: raise AttributeError('dtype must be either floating point or integer') if dtype == image.dtype: return array_ops.identity(image, name=name) with ops.name_scope(name, 'convert_image', [image]) as name: # Both integer: use integer multiplication in the larger range if image.dtype.is_integer and dtype.is_integer: scale_in = image.dtype.max scale_out = dtype.max if scale_in > scale_out: # Scaling down, scale first, then cast. The scaling factor will # cause in.max to be mapped to above out.max but below out.max+1, # so that the output is safely in the supported range. scale = (scale_in + 1) // (scale_out + 1) scaled = math_ops.floordiv(image, scale) if saturate: return math_ops.saturate_cast(scaled, dtype, name=name) else: return math_ops.cast(scaled, dtype, name=name) else: # Scaling up, cast first, then scale. The scale will not map in.max to # out.max, but converting back and forth should result in no change. if saturate: cast = math_ops.saturate_cast(image, dtype) else: cast = math_ops.cast(image, dtype) scale = (scale_out + 1) // (scale_in + 1) return math_ops.multiply(cast, scale, name=name) elif image.dtype.is_floating and dtype.is_floating: # Both float: Just cast, no possible overflows in the allowed ranges. # Note: We're ignoring float overflows. If your image dynamic range # exceeds float range, you're on your own. return math_ops.cast(image, dtype, name=name) else: if image.dtype.is_integer: # Converting to float: first cast, then scale. No saturation possible. cast = math_ops.cast(image, dtype) scale = 1. / image.dtype.max return math_ops.multiply(cast, scale, name=name) else: # Converting from float: first scale, then cast scale = dtype.max + 0.5 # avoid rounding problems in the cast scaled = math_ops.multiply(image, scale) if saturate: return math_ops.saturate_cast(scaled, dtype, name=name) else: return math_ops.cast(scaled, dtype, name=name) @tf_export('image.rgb_to_grayscale') @dispatch.add_dispatch_support def rgb_to_grayscale(images, name=None): """Converts one or more images from RGB to Grayscale. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 1, containing the Grayscale value of the pixels. >>> original = tf.constant([[[1.0, 2.0, 3.0]]]) >>> converted = tf.image.rgb_to_grayscale(original) >>> print(converted.numpy()) [[[1.81...]]] Args: images: The RGB tensor to convert. The last dimension must have size 3 and should contain RGB values. name: A name for the operation (optional). Returns: The converted grayscale image(s). """ with ops.name_scope(name, 'rgb_to_grayscale', [images]) as name: images = ops.convert_to_tensor(images, name='images') # Remember original dtype to so we can convert back if needed orig_dtype = images.dtype flt_image = convert_image_dtype(images, dtypes.float32) # Reference for converting between RGB and grayscale. # https://en.wikipedia.org/wiki/Luma_%28video%29 rgb_weights = [0.2989, 0.5870, 0.1140] gray_float = math_ops.tensordot(flt_image, rgb_weights, [-1, -1]) gray_float = array_ops.expand_dims(gray_float, -1) return convert_image_dtype(gray_float, orig_dtype, name=name) @tf_export('image.grayscale_to_rgb') @dispatch.add_dispatch_support def grayscale_to_rgb(images, name=None): """Converts one or more images from Grayscale to RGB. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 3, containing the RGB value of the pixels. The input images' last dimension must be size 1. >>> original = tf.constant([[[1.0], [2.0], [3.0]]]) >>> converted = tf.image.grayscale_to_rgb(original) >>> print(converted.numpy()) [[[1. 1. 1.] [2. 2. 2.] [3. 3. 3.]]] Args: images: The Grayscale tensor to convert. The last dimension must be size 1. name: A name for the operation (optional). Returns: The converted grayscale image(s). """ with ops.name_scope(name, 'grayscale_to_rgb', [images]) as name: images = _AssertGrayscaleImage(images) images = ops.convert_to_tensor(images, name='images') rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0) shape_list = ([array_ops.ones(rank_1, dtype=dtypes.int32)] + [array_ops.expand_dims(3, 0)]) multiples = array_ops.concat(shape_list, 0) rgb = array_ops.tile(images, multiples, name=name) rgb.set_shape(images.get_shape()[:-1].concatenate([3])) return rgb # pylint: disable=invalid-name @tf_export('image.random_hue') @dispatch.add_dispatch_support def random_hue(image, max_delta, seed=None): """Adjust the hue of RGB images by a random factor. Equivalent to `adjust_hue()` but uses a `delta` randomly picked in the interval `[-max_delta, max_delta)`. `max_delta` must be in the interval `[0, 0.5]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_hue(x, 0.2) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...> For producing deterministic results given a `seed` value, use `tf.image.stateless_random_hue`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: RGB image or images. The size of the last dimension must be 3. max_delta: float. The maximum value for the random delta. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `max_delta` is invalid. """ if max_delta > 0.5: raise ValueError('max_delta must be <= 0.5.') if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed) return adjust_hue(image, delta) @tf_export('image.stateless_random_hue', v1=[]) @dispatch.add_dispatch_support def stateless_random_hue(image, max_delta, seed): """Adjust the hue of RGB images by a random factor deterministically. Equivalent to `adjust_hue()` but uses a `delta` randomly picked in the interval `[-max_delta, max_delta)`. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). `max_delta` must be in the interval `[0, 0.5]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> seed = (1, 2) >>> tf.image.stateless_random_hue(x, 0.2, seed) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 1.6514902, 1. , 3. ], [ 4.65149 , 4. , 6. ]], [[ 7.65149 , 7. , 9. ], [10.65149 , 10. , 12. ]]], dtype=float32)> Args: image: RGB image or images. The size of the last dimension must be 3. max_delta: float. The maximum value for the random delta. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `max_delta` is invalid. """ if max_delta > 0.5: raise ValueError('max_delta must be <= 0.5.') if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = stateless_random_ops.stateless_random_uniform( shape=[], minval=-max_delta, maxval=max_delta, seed=seed) return adjust_hue(image, delta) @tf_export('image.adjust_hue') @dispatch.add_dispatch_support def adjust_hue(image, delta, name=None): """Adjust hue of RGB images. This is a convenience method that converts an RGB image to float representation, converts it to HSV, adds an offset to the hue channel, converts back to RGB and then back to the original data type. If several adjustments are chained it is advisable to minimize the number of redundant conversions. `image` is an RGB image. The image hue is adjusted by converting the image(s) to HSV and rotating the hue channel (H) by `delta`. The image is then converted back to RGB. `delta` must be in the interval `[-1, 1]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_hue(x, 0.2) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 2.3999996, 1. , 3. ], [ 5.3999996, 4. , 6. ]], [[ 8.4 , 7. , 9. ], [11.4 , 10. , 12. ]]], dtype=float32)> Args: image: RGB image or images. The size of the last dimension must be 3. delta: float. How much to add to the hue channel. name: A name for this operation (optional). Returns: Adjusted image(s), same shape and DType as `image`. Usage Example: >>> image = [[[1, 2, 3], [4, 5, 6]], ... [[7, 8, 9], [10, 11, 12]], ... [[13, 14, 15], [16, 17, 18]]] >>> image = tf.constant(image) >>> tf.image.adjust_hue(image, 0.2) <tf.Tensor: shape=(3, 2, 3), dtype=int32, numpy= array([[[ 2, 1, 3], [ 5, 4, 6]], [[ 8, 7, 9], [11, 10, 12]], [[14, 13, 15], [17, 16, 18]]], dtype=int32)> """ with ops.name_scope(name, 'adjust_hue', [image]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in (dtypes.float16, dtypes.float32): flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) rgb_altered = gen_image_ops.adjust_hue(flt_image, delta) return convert_image_dtype(rgb_altered, orig_dtype) # pylint: disable=invalid-name @tf_export('image.random_jpeg_quality') @dispatch.add_dispatch_support def random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed=None): """Randomly changes jpeg encoding quality for inducing jpeg noise. `min_jpeg_quality` must be in the interval `[0, 100]` and less than `max_jpeg_quality`. `max_jpeg_quality` must be in the interval `[0, 100]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_jpeg_quality(x, 75, 95) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...> For producing deterministic results given a `seed` value, use `tf.image.stateless_random_jpeg_quality`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: 3D image. Size of the last dimension must be 1 or 3. min_jpeg_quality: Minimum jpeg encoding quality to use. max_jpeg_quality: Maximum jpeg encoding quality to use. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid. """ if (min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or max_jpeg_quality > 100): raise ValueError('jpeg encoding range must be between 0 and 100.') if min_jpeg_quality >= max_jpeg_quality: raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.') jpeg_quality = random_ops.random_uniform([], min_jpeg_quality, max_jpeg_quality, seed=seed, dtype=dtypes.int32) return adjust_jpeg_quality(image, jpeg_quality) @tf_export('image.stateless_random_jpeg_quality', v1=[]) @dispatch.add_dispatch_support def stateless_random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed): """Deterministically radomize jpeg encoding quality for inducing jpeg noise. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). `min_jpeg_quality` must be in the interval `[0, 100]` and less than `max_jpeg_quality`. `max_jpeg_quality` must be in the interval `[0, 100]`. Usage Example: >>> x = [[[1, 2, 3], ... [4, 5, 6]], ... [[7, 8, 9], ... [10, 11, 12]]] >>> x_uint8 = tf.cast(x, tf.uint8) >>> seed = (1, 2) >>> tf.image.stateless_random_jpeg_quality(x_uint8, 75, 95, seed) <tf.Tensor: shape=(2, 2, 3), dtype=uint8, numpy= array([[[ 0, 4, 5], [ 1, 5, 6]], [[ 5, 9, 10], [ 5, 9, 10]]], dtype=uint8)> Args: image: 3D image. Size of the last dimension must be 1 or 3. min_jpeg_quality: Minimum jpeg encoding quality to use. max_jpeg_quality: Maximum jpeg encoding quality to use. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid. """ if (min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or max_jpeg_quality > 100): raise ValueError('jpeg encoding range must be between 0 and 100.') if min_jpeg_quality >= max_jpeg_quality: raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.') jpeg_quality = stateless_random_ops.stateless_random_uniform( shape=[], minval=min_jpeg_quality, maxval=max_jpeg_quality, seed=seed, dtype=dtypes.int32) return adjust_jpeg_quality(image, jpeg_quality) @tf_export('image.adjust_jpeg_quality') @dispatch.add_dispatch_support def adjust_jpeg_quality(image, jpeg_quality, name=None): """Adjust jpeg encoding quality of an image. This is a convenience method that converts an image to uint8 representation, encodes it to jpeg with `jpeg_quality`, decodes it, and then converts back to the original data type. `jpeg_quality` must be in the interval `[0, 100]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_jpeg_quality(x, 75) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]]], dtype=float32)> Args: image: 3D image. The size of the last dimension must be None, 1 or 3. jpeg_quality: Python int or Tensor of type int32. jpeg encoding quality. name: A name for this operation (optional). Returns: Adjusted image, same shape and DType as `image`. Raises: InvalidArgumentError: quality must be in [0,100] InvalidArgumentError: image must have 1 or 3 channels """ with ops.name_scope(name, 'adjust_jpeg_quality', [image]): image = ops.convert_to_tensor(image, name='image') channels = image.shape.as_list()[-1] # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype image = convert_image_dtype(image, dtypes.uint8, saturate=True) if not _is_tensor(jpeg_quality): # If jpeg_quality is a int (not tensor). jpeg_quality = ops.convert_to_tensor(jpeg_quality, dtype=dtypes.int32) image = gen_image_ops.encode_jpeg_variable_quality(image, jpeg_quality) image = gen_image_ops.decode_jpeg(image, channels=channels) return convert_image_dtype(image, orig_dtype, saturate=True) @tf_export('image.random_saturation') @dispatch.add_dispatch_support def random_saturation(image, lower, upper, seed=None): """Adjust the saturation of RGB images by a random factor. Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly picked in the interval `[lower, upper)`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_saturation(x, 5, 10) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 0. , 1.5, 3. ], [ 0. , 3. , 6. ]], [[ 0. , 4.5, 9. ], [ 0. , 6. , 12. ]]], dtype=float32)> For producing deterministic results given a `seed` value, use `tf.image.stateless_random_saturation`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: RGB image or images. The size of the last dimension must be 3. lower: float. Lower bound for the random saturation factor. upper: float. Upper bound for the random saturation factor. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError('upper must be > lower.') if lower < 0: raise ValueError('lower must be non-negative.') saturation_factor = random_ops.random_uniform([], lower, upper, seed=seed) return adjust_saturation(image, saturation_factor) @tf_export('image.stateless_random_saturation', v1=[]) @dispatch.add_dispatch_support def stateless_random_saturation(image, lower, upper, seed=None): """Adjust the saturation of RGB images by a random factor deterministically. Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly picked in the interval `[lower, upper)`. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> seed = (1, 2) >>> tf.image.stateless_random_saturation(x, 0.5, 1.0, seed) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 1.1559395, 2.0779698, 3. ], [ 4.1559396, 5.07797 , 6. ]], [[ 7.1559396, 8.07797 , 9. ], [10.155939 , 11.07797 , 12. ]]], dtype=float32)> Args: image: RGB image or images. The size of the last dimension must be 3. lower: float. Lower bound for the random saturation factor. upper: float. Upper bound for the random saturation factor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError('upper must be > lower.') if lower < 0: raise ValueError('lower must be non-negative.') saturation_factor = stateless_random_ops.stateless_random_uniform( shape=[], minval=lower, maxval=upper, seed=seed) return adjust_saturation(image, saturation_factor) @tf_export('image.adjust_saturation') @dispatch.add_dispatch_support def adjust_saturation(image, saturation_factor, name=None): """Adjust saturation of RGB images. This is a convenience method that converts RGB images to float representation, converts them to HSV, adds an offset to the saturation channel, converts back to RGB and then back to the original data type. If several adjustments are chained it is advisable to minimize the number of redundant conversions. `image` is an RGB image or images. The image saturation is adjusted by converting the images to HSV and multiplying the saturation (S) channel by `saturation_factor` and clipping. The images are then converted back to RGB. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_saturation(x, 0.5) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 2. , 2.5, 3. ], [ 5. , 5.5, 6. ]], [[ 8. , 8.5, 9. ], [11. , 11.5, 12. ]]], dtype=float32)> Args: image: RGB image or images. The size of the last dimension must be 3. saturation_factor: float. Factor to multiply the saturation by. name: A name for this operation (optional). Returns: Adjusted image(s), same shape and DType as `image`. Raises: InvalidArgumentError: input must have 3 channels """ with ops.name_scope(name, 'adjust_saturation', [image]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in (dtypes.float16, dtypes.float32): flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) adjusted = gen_image_ops.adjust_saturation(flt_image, saturation_factor) return convert_image_dtype(adjusted, orig_dtype) @tf_export('io.is_jpeg', 'image.is_jpeg', v1=['io.is_jpeg', 'image.is_jpeg']) def is_jpeg(contents, name=None): r"""Convenience function to check if the 'contents' encodes a JPEG image. Args: contents: 0-D `string`. The encoded image bytes. name: A name for the operation (optional) Returns: A scalar boolean tensor indicating if 'contents' may be a JPEG image. is_jpeg is susceptible to false positives. """ # Normal JPEGs start with \xff\xd8\xff\xe0 # JPEG with EXIF starts with \xff\xd8\xff\xe1 # Use \xff\xd8\xff to cover both. with ops.name_scope(name, 'is_jpeg'): substr = string_ops.substr(contents, 0, 3) return math_ops.equal(substr, b'\xff\xd8\xff', name=name) def _is_png(contents, name=None): r"""Convenience function to check if the 'contents' encodes a PNG image. Args: contents: 0-D `string`. The encoded image bytes. name: A name for the operation (optional) Returns: A scalar boolean tensor indicating if 'contents' may be a PNG image. is_png is susceptible to false positives. """ with ops.name_scope(name, 'is_png'): substr = string_ops.substr(contents, 0, 3) return math_ops.equal(substr, b'\211PN', name=name) tf_export( 'io.decode_and_crop_jpeg', 'image.decode_and_crop_jpeg', v1=['io.decode_and_crop_jpeg', 'image.decode_and_crop_jpeg'])( dispatch.add_dispatch_support(gen_image_ops.decode_and_crop_jpeg)) tf_export( 'io.decode_bmp', 'image.decode_bmp', v1=['io.decode_bmp', 'image.decode_bmp'])( dispatch.add_dispatch_support(gen_image_ops.decode_bmp)) tf_export( 'io.decode_gif', 'image.decode_gif', v1=['io.decode_gif', 'image.decode_gif'])( dispatch.add_dispatch_support(gen_image_ops.decode_gif)) tf_export( 'io.decode_jpeg', 'image.decode_jpeg', v1=['io.decode_jpeg', 'image.decode_jpeg'])( dispatch.add_dispatch_support(gen_image_ops.decode_jpeg)) tf_export( 'io.decode_png', 'image.decode_png', v1=['io.decode_png', 'image.decode_png'])( dispatch.add_dispatch_support(gen_image_ops.decode_png)) tf_export( 'io.encode_jpeg', 'image.encode_jpeg', v1=['io.encode_jpeg', 'image.encode_jpeg'])( dispatch.add_dispatch_support(gen_image_ops.encode_jpeg)) tf_export( 'io.extract_jpeg_shape', 'image.extract_jpeg_shape', v1=['io.extract_jpeg_shape', 'image.extract_jpeg_shape'])( dispatch.add_dispatch_support(gen_image_ops.extract_jpeg_shape)) @tf_export('io.encode_png', 'image.encode_png') @dispatch.add_dispatch_support def encode_png(image, compression=-1, name=None): r"""PNG-encode an image. `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` where `channels` is: * 1: for grayscale. * 2: for grayscale + alpha. * 3: for RGB. * 4: for RGBA. The ZLIB compression level, `compression`, can be -1 for the PNG-encoder default or a value from 0 to 9. 9 is the highest compression level, generating the smallest output, but is slower. Args: image: A `Tensor`. Must be one of the following types: `uint8`, `uint16`. 3-D with shape `[height, width, channels]`. compression: An optional `int`. Defaults to `-1`. Compression level. name: A name for the operation (optional). Returns: A `Tensor` of type `string`. """ return gen_image_ops.encode_png( ops.convert_to_tensor(image), compression, name) @tf_export( 'io.decode_image', 'image.decode_image', v1=['io.decode_image', 'image.decode_image']) @dispatch.add_dispatch_support def decode_image(contents, channels=None, dtype=dtypes.uint8, name=None, expand_animations=True): """Function for `decode_bmp`, `decode_gif`, `decode_jpeg`, and `decode_png`. Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the appropriate operation to convert the input bytes `string` into a `Tensor` of type `dtype`. Note: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`, as opposed to `decode_bmp`, `decode_jpeg` and `decode_png`, which return 3-D arrays `[height, width, num_channels]`. Make sure to take this into account when constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or PNG files. Alternately, set the `expand_animations` argument of this function to `False`, in which case the op will return 3-dimensional tensors and will truncate animated GIF files to the first frame. NOTE: If the first frame of an animated GIF does not occupy the entire canvas (maximum frame width x maximum frame height), then it fills the unoccupied areas (in the first frame) with zeros (black). For frames after the first frame that does not occupy the entire canvas, it uses the previous frame to fill the unoccupied areas. Args: contents: A `Tensor` of type `string`. 0-D. The encoded image bytes. channels: An optional `int`. Defaults to `0`. Number of color channels for the decoded image. dtype: The desired DType of the returned `Tensor`. name: A name for the operation (optional) expand_animations: An optional `bool`. Defaults to `True`. Controls the shape of the returned op's output. If `True`, the returned op will produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all GIFs, whether animated or not. If, `False`, the returned op will produce a 3-D tensor for all file types and will truncate animated GIFs to the first frame. Returns: `Tensor` with type `dtype` and a 3- or 4-dimensional shape, depending on the file type and the value of the `expand_animations` parameter. Raises: ValueError: On incorrect number of channels. """ with ops.name_scope(name, 'decode_image'): channels = 0 if channels is None else channels if dtype not in [dtypes.float32, dtypes.uint8, dtypes.uint16]: dest_dtype = dtype dtype = dtypes.uint16 return convert_image_dtype( gen_image_ops.decode_image( contents=contents, channels=channels, expand_animations=expand_animations, dtype=dtype), dest_dtype) else: return gen_image_ops.decode_image( contents=contents, channels=channels, expand_animations=expand_animations, dtype=dtype) @tf_export('image.total_variation') @dispatch.add_dispatch_support def total_variation(images, name=None): """Calculate and return the total variation for one or more images. The total variation is the sum of the absolute differences for neighboring pixel-values in the input images. This measures how much noise is in the images. This can be used as a loss-function during optimization so as to suppress noise in images. If you have a batch of images, then you should calculate the scalar loss-value as the sum: `loss = tf.reduce_sum(tf.image.total_variation(images))` This implements the anisotropic 2-D version of the formula described here: https://en.wikipedia.org/wiki/Total_variation_denoising Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. name: A name for the operation (optional). Raises: ValueError: if images.shape is not a 3-D or 4-D vector. Returns: The total variation of `images`. If `images` was 4-D, return a 1-D float Tensor of shape `[batch]` with the total variation for each image in the batch. If `images` was 3-D, return a scalar float with the total variation for that image. """ with ops.name_scope(name, 'total_variation'): ndims = images.get_shape().ndims if ndims == 3: # The input is a single image with shape [height, width, channels]. # Calculate the difference of neighboring pixel-values. # The images are shifted one pixel along the height and width by slicing. pixel_dif1 = images[1:, :, :] - images[:-1, :, :] pixel_dif2 = images[:, 1:, :] - images[:, :-1, :] # Sum for all axis. (None is an alias for all axis.) sum_axis = None elif ndims == 4: # The input is a batch of images with shape: # [batch, height, width, channels]. # Calculate the difference of neighboring pixel-values. # The images are shifted one pixel along the height and width by slicing. pixel_dif1 = images[:, 1:, :, :] - images[:, :-1, :, :] pixel_dif2 = images[:, :, 1:, :] - images[:, :, :-1, :] # Only sum for the last 3 axis. # This results in a 1-D tensor with the total variation for each image. sum_axis = [1, 2, 3] else: raise ValueError('\'images\' must be either 3 or 4-dimensional.') # Calculate the total variation by taking the absolute value of the # pixel-differences and summing over the appropriate axis. tot_var = ( math_ops.reduce_sum(math_ops.abs(pixel_dif1), axis=sum_axis) + math_ops.reduce_sum(math_ops.abs(pixel_dif2), axis=sum_axis)) return tot_var @tf_export('image.sample_distorted_bounding_box', v1=[]) @dispatch.add_dispatch_support def sample_distorted_bounding_box_v2(image_size, bounding_boxes, seed=0, min_object_covered=0.1, aspect_ratio_range=None, area_range=None, max_attempts=None, use_image_if_no_bounding_boxes=None, name=None): """Generate a single randomly distorted bounding box for an image. Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints. The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like. Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and the height of the underlying image. For example, ```python # Generate a single distorted bounding box. begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bounding_boxes, min_object_covered=0.1) # Draw the bounding box in an image summary. image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) tf.compat.v1.summary.image('images_with_box', image_with_box) # Employ the bounding box to distort the image. distorted_image = tf.slice(image, begin, size) ``` Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = true` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised. For producing deterministic results given a `seed` value, use `tf.image.stateless_sample_distorted_bounding_box`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`. bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. seed: An optional `int`. Defaults to `0`. If `seed` is set to non-zero, the random number generator is seeded by the given `seed`. Otherwise, it is seeded by a random seed. min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied. aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75, 1.33]`. The cropped area of the image must have an aspect `ratio = width / height` within this range. area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The cropped area of the image must contain a fraction of the supplied image within this range. max_attempts: An optional `int`. Defaults to `100`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`. Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (begin, size, bboxes). begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`. size: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`. bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing the distorted bounding box. Provide as input to `tf.image.draw_bounding_boxes`. """ seed1, seed2 = random_seed.get_seed(seed) if seed else (0, 0) with ops.name_scope(name, 'sample_distorted_bounding_box'): return gen_image_ops.sample_distorted_bounding_box_v2( image_size, bounding_boxes, seed=seed1, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes, name=name) @tf_export('image.stateless_sample_distorted_bounding_box', v1=[]) @dispatch.add_dispatch_support def stateless_sample_distorted_bounding_box(image_size, bounding_boxes, seed, min_object_covered=0.1, aspect_ratio_range=None, area_range=None, max_attempts=None, use_image_if_no_bounding_boxes=None, name=None): """Generate a randomly distorted bounding box for an image deterministically. Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op, given the same `seed`, deterministically outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints. The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like. Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and the height of the underlying image. The output of this Op is guaranteed to be the same given the same `seed` and is independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). Example usage: >>> image = np.array([[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]]) >>> bbox = tf.constant( ... [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) >>> seed = (1, 2) >>> # Generate a single distorted bounding box. >>> bbox_begin, bbox_size, bbox_draw = ( ... tf.image.stateless_sample_distorted_bounding_box( ... tf.shape(image), bounding_boxes=bbox, seed=seed)) >>> # Employ the bounding box to distort the image. >>> tf.slice(image, bbox_begin, bbox_size) <tf.Tensor: shape=(2, 2, 1), dtype=int64, numpy= array([[[1], [2]], [[4], [5]]])> >>> # Draw the bounding box in an image summary. >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) >>> tf.image.draw_bounding_boxes( ... tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors) <tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy= array([[[[1.], [1.], [3.]], [[1.], [1.], [6.]], [[7.], [8.], [9.]]]], dtype=float32)> Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = true` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised. Args: image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`. bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied. aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75, 1.33]`. The cropped area of the image must have an aspect `ratio = width / height` within this range. area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The cropped area of the image must contain a fraction of the supplied image within this range. max_attempts: An optional `int`. Defaults to `100`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`. Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (begin, size, bboxes). begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`. size: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`. bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing the distorted bounding box. Provide as input to `tf.image.draw_bounding_boxes`. """ with ops.name_scope(name, 'stateless_sample_distorted_bounding_box'): return gen_image_ops.stateless_sample_distorted_bounding_box( image_size=image_size, bounding_boxes=bounding_boxes, seed=seed, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes, name=name) @tf_export(v1=['image.sample_distorted_bounding_box']) @dispatch.add_dispatch_support @deprecation.deprecated( date=None, instructions='`seed2` arg is deprecated.' 'Use sample_distorted_bounding_box_v2 instead.') def sample_distorted_bounding_box(image_size, bounding_boxes, seed=None, seed2=None, min_object_covered=0.1, aspect_ratio_range=None, area_range=None, max_attempts=None, use_image_if_no_bounding_boxes=None, name=None): """Generate a single randomly distorted bounding box for an image. Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints. The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like. Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image. For example, ```python # Generate a single distorted bounding box. begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bounding_boxes, min_object_covered=0.1) # Draw the bounding box in an image summary. image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) tf.compat.v1.summary.image('images_with_box', image_with_box) # Employ the bounding box to distort the image. distorted_image = tf.slice(image, begin, size) ``` Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = True` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised. Args: image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`. bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. seed: An optional `int`. Defaults to `0`. If either `seed` or `seed2` are set to non-zero, the random number generator is seeded by the given `seed`. Otherwise, it is seeded by a random seed. seed2: An optional `int`. Defaults to `0`. A second seed to avoid seed collision. min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied. aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75, 1.33]`. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The cropped area of the image must contain a fraction of the supplied image within this range. max_attempts: An optional `int`. Defaults to `100`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`. Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (begin, size, bboxes). begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`. size: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`. bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing the distorted bounding box. Provide as input to `tf.image.draw_bounding_boxes`. """ with ops.name_scope(name, 'sample_distorted_bounding_box'): return gen_image_ops.sample_distorted_bounding_box_v2( image_size, bounding_boxes, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes, name=name) @tf_export('image.non_max_suppression') @dispatch.add_dispatch_support def non_max_suppression(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), name=None): """Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices = tf.image.non_max_suppression( boxes, scores, max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression. iou_threshold: A 0-D float tensor representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A 0-D float tensor representing the threshold for deciding when to remove boxes based on score. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. """ with ops.name_scope(name, 'non_max_suppression'): iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, name='score_threshold') return gen_image_ops.non_max_suppression_v3(boxes, scores, max_output_size, iou_threshold, score_threshold) @tf_export('image.non_max_suppression_with_scores') @dispatch.add_dispatch_support def non_max_suppression_with_scores(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), soft_nms_sigma=0.0, name=None): """Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices, selected_scores = tf.image.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold=1.0, score_threshold=0.1, soft_nms_sigma=0.5) selected_boxes = tf.gather(boxes, selected_indices) ``` This function generalizes the `tf.image.non_max_suppression` op by also supporting a Soft-NMS (with Gaussian weighting) mode (c.f. Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score of other overlapping boxes instead of directly causing them to be pruned. Consequently, in contrast to `tf.image.non_max_suppression`, `tf.image.non_max_suppression_padded` returns the new scores of each input box in the second output, `selected_scores`. To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be larger than 0. When `soft_nms_sigma` equals 0, the behavior of `tf.image.non_max_suppression_padded` is identical to that of `tf.image.non_max_suppression` (except for the extra output) both in function and in running time. Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression. iou_threshold: A 0-D float tensor representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A 0-D float tensor representing the threshold for deciding when to remove boxes based on score. soft_nms_sigma: A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et al (c.f. https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) NMS. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. selected_scores: A 1-D float tensor of shape `[M]` representing the corresponding scores for each selected box, where `M <= max_output_size`. Scores only differ from corresponding input scores when using Soft NMS (i.e. when `soft_nms_sigma>0`) """ with ops.name_scope(name, 'non_max_suppression_with_scores'): iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, name='score_threshold') soft_nms_sigma = ops.convert_to_tensor( soft_nms_sigma, name='soft_nms_sigma') (selected_indices, selected_scores, _) = gen_image_ops.non_max_suppression_v5( boxes, scores, max_output_size, iou_threshold, score_threshold, soft_nms_sigma, pad_to_max_output_size=False) return selected_indices, selected_scores @tf_export('image.non_max_suppression_overlaps') @dispatch.add_dispatch_support def non_max_suppression_with_overlaps(overlaps, scores, max_output_size, overlap_threshold=0.5, score_threshold=float('-inf'), name=None): """Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high overlap with previously selected boxes. N-by-n overlap values are supplied as square matrix. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices = tf.image.non_max_suppression_overlaps( overlaps, scores, max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: overlaps: A 2-D float `Tensor` of shape `[num_boxes, num_boxes]` representing the n-by-n box overlap values. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression. overlap_threshold: A 0-D float tensor representing the threshold for deciding whether boxes overlap too much with respect to the provided overlap values. score_threshold: A 0-D float tensor representing the threshold for deciding when to remove boxes based on score. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the overlaps tensor, where `M <= max_output_size`. """ with ops.name_scope(name, 'non_max_suppression_overlaps'): overlap_threshold = ops.convert_to_tensor( overlap_threshold, name='overlap_threshold') # pylint: disable=protected-access return gen_image_ops.non_max_suppression_with_overlaps( overlaps, scores, max_output_size, overlap_threshold, score_threshold) # pylint: enable=protected-access _rgb_to_yiq_kernel = [[0.299, 0.59590059, 0.2115], [0.587, -0.27455667, -0.52273617], [0.114, -0.32134392, 0.31119955]] @tf_export('image.rgb_to_yiq') @dispatch.add_dispatch_support def rgb_to_yiq(images): """Converts one or more images from RGB to YIQ. Outputs a tensor of the same shape as the `images` tensor, containing the YIQ value of the pixels. The output is only well defined if the value in images are in [0,1]. Usage Example: >>> x = tf.constant([[[1.0, 2.0, 3.0]]]) >>> tf.image.rgb_to_yiq(x) <tf.Tensor: shape=(1, 1, 3), dtype=float32, numpy=array([[[ 1.815 , -0.91724455, 0.09962624]]], dtype=float32)> Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _rgb_to_yiq_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) _yiq_to_rgb_kernel = [[1, 1, 1], [0.95598634, -0.27201283, -1.10674021], [0.6208248, -0.64720424, 1.70423049]] @tf_export('image.yiq_to_rgb') @dispatch.add_dispatch_support def yiq_to_rgb(images): """Converts one or more images from YIQ to RGB. Outputs a tensor of the same shape as the `images` tensor, containing the RGB value of the pixels. The output is only well defined if the Y value in images are in [0,1], I value are in [-0.5957,0.5957] and Q value are in [-0.5226,0.5226]. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _yiq_to_rgb_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) _rgb_to_yuv_kernel = [[0.299, -0.14714119, 0.61497538], [0.587, -0.28886916, -0.51496512], [0.114, 0.43601035, -0.10001026]] @tf_export('image.rgb_to_yuv') @dispatch.add_dispatch_support def rgb_to_yuv(images): """Converts one or more images from RGB to YUV. Outputs a tensor of the same shape as the `images` tensor, containing the YUV value of the pixels. The output is only well defined if the value in images are in [0, 1]. There are two ways of representing an image: [0, 255] pixel values range or [0, 1] (as float) pixel values range. Users need to convert the input image into a float [0, 1] range. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _rgb_to_yuv_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) _yuv_to_rgb_kernel = [[1, 1, 1], [0, -0.394642334, 2.03206185], [1.13988303, -0.58062185, 0]] @tf_export('image.yuv_to_rgb') @dispatch.add_dispatch_support def yuv_to_rgb(images): """Converts one or more images from YUV to RGB. Outputs a tensor of the same shape as the `images` tensor, containing the RGB value of the pixels. The output is only well defined if the Y value in images are in [0,1], U and V value are in [-0.5,0.5]. As per the above description, you need to scale your YUV images if their pixel values are not in the required range. Below given example illustrates preprocessing of each channel of images before feeding them to `yuv_to_rgb`. ```python yuv_images = tf.random.uniform(shape=[100, 64, 64, 3], maxval=255) last_dimension_axis = len(yuv_images.shape) - 1 yuv_tensor_images = tf.truediv( tf.subtract( yuv_images, tf.reduce_min(yuv_images) ), tf.subtract( tf.reduce_max(yuv_images), tf.reduce_min(yuv_images) ) ) y, u, v = tf.split(yuv_tensor_images, 3, axis=last_dimension_axis) target_uv_min, target_uv_max = -0.5, 0.5 u = u * (target_uv_max - target_uv_min) + target_uv_min v = v * (target_uv_max - target_uv_min) + target_uv_min preprocessed_yuv_images = tf.concat([y, u, v], axis=last_dimension_axis) rgb_tensor_images = tf.image.yuv_to_rgb(preprocessed_yuv_images) ``` Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _yuv_to_rgb_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) def _verify_compatible_image_shapes(img1, img2): """Checks if two image tensors are compatible for applying SSIM or PSNR. This function checks if two sets of images have ranks at least 3, and if the last three dimensions match. Args: img1: Tensor containing the first image batch. img2: Tensor containing the second image batch. Returns: A tuple containing: the first tensor shape, the second tensor shape, and a list of control_flow_ops.Assert() ops implementing the checks. Raises: ValueError: When static shape check fails. """ shape1 = img1.get_shape().with_rank_at_least(3) shape2 = img2.get_shape().with_rank_at_least(3) shape1[-3:].assert_is_compatible_with(shape2[-3:]) if shape1.ndims is not None and shape2.ndims is not None: for dim1, dim2 in zip( reversed(shape1.dims[:-3]), reversed(shape2.dims[:-3])): if not (dim1 == 1 or dim2 == 1 or dim1.is_compatible_with(dim2)): raise ValueError('Two images are not compatible: %s and %s' % (shape1, shape2)) # Now assign shape tensors. shape1, shape2 = array_ops.shape_n([img1, img2]) # TODO(sjhwang): Check if shape1[:-3] and shape2[:-3] are broadcastable. checks = [] checks.append( control_flow_ops.Assert( math_ops.greater_equal(array_ops.size(shape1), 3), [shape1, shape2], summarize=10)) checks.append( control_flow_ops.Assert( math_ops.reduce_all(math_ops.equal(shape1[-3:], shape2[-3:])), [shape1, shape2], summarize=10)) return shape1, shape2, checks @tf_export('image.psnr') @dispatch.add_dispatch_support def psnr(a, b, max_val, name=None): """Returns the Peak Signal-to-Noise Ratio between a and b. This is intended to be used on signals (or images). Produces a PSNR value for each image in batch. The last three dimensions of input are expected to be [height, width, depth]. Example: ```python # Read images from file. im1 = tf.decode_png('path/to/im1.png') im2 = tf.decode_png('path/to/im2.png') # Compute PSNR over tf.uint8 Tensors. psnr1 = tf.image.psnr(im1, im2, max_val=255) # Compute PSNR over tf.float32 Tensors. im1 = tf.image.convert_image_dtype(im1, tf.float32) im2 = tf.image.convert_image_dtype(im2, tf.float32) psnr2 = tf.image.psnr(im1, im2, max_val=1.0) # psnr1 and psnr2 both have type tf.float32 and are almost equal. ``` Args: a: First set of images. b: Second set of images. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). name: Namespace to embed the computation in. Returns: The scalar PSNR between a and b. The returned tensor has type `tf.float32` and shape [batch_size, 1]. """ with ops.name_scope(name, 'PSNR', [a, b]): # Need to convert the images to float32. Scale max_val accordingly so that # PSNR is computed correctly. max_val = math_ops.cast(max_val, a.dtype) max_val = convert_image_dtype(max_val, dtypes.float32) a = convert_image_dtype(a, dtypes.float32) b = convert_image_dtype(b, dtypes.float32) mse = math_ops.reduce_mean(math_ops.squared_difference(a, b), [-3, -2, -1]) psnr_val = math_ops.subtract( 20 * math_ops.log(max_val) / math_ops.log(10.0), np.float32(10 / np.log(10)) * math_ops.log(mse), name='psnr') _, _, checks = _verify_compatible_image_shapes(a, b) with ops.control_dependencies(checks): return array_ops.identity(psnr_val) def _ssim_helper(x, y, reducer, max_val, compensation=1.0, k1=0.01, k2=0.03): r"""Helper function for computing SSIM. SSIM estimates covariances with weighted sums. The default parameters use a biased estimate of the covariance: Suppose `reducer` is a weighted sum, then the mean estimators are \mu_x = \sum_i w_i x_i, \mu_y = \sum_i w_i y_i, where w_i's are the weighted-sum weights, and covariance estimator is cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y) with assumption \sum_i w_i = 1. This covariance estimator is biased, since E[cov_{xy}] = (1 - \sum_i w_i ^ 2) Cov(X, Y). For SSIM measure with unbiased covariance estimators, pass as `compensation` argument (1 - \sum_i w_i ^ 2). Args: x: First set of images. y: Second set of images. reducer: Function that computes 'local' averages from the set of images. For non-convolutional version, this is usually tf.reduce_mean(x, [1, 2]), and for convolutional version, this is usually tf.nn.avg_pool2d or tf.nn.conv2d with weighted-sum kernel. max_val: The dynamic range (i.e., the difference between the maximum possible allowed value and the minimum allowed value). compensation: Compensation factor. See above. k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we took the values in the range of 0 < K2 < 0.4). Returns: A pair containing the luminance measure, and the contrast-structure measure. """ c1 = (k1 * max_val)**2 c2 = (k2 * max_val)**2 # SSIM luminance measure is # (2 * mu_x * mu_y + c1) / (mu_x ** 2 + mu_y ** 2 + c1). mean0 = reducer(x) mean1 = reducer(y) num0 = mean0 * mean1 * 2.0 den0 = math_ops.square(mean0) + math_ops.square(mean1) luminance = (num0 + c1) / (den0 + c1) # SSIM contrast-structure measure is # (2 * cov_{xy} + c2) / (cov_{xx} + cov_{yy} + c2). # Note that `reducer` is a weighted sum with weight w_k, \sum_i w_i = 1, then # cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y) # = \sum_i w_i x_i y_i - (\sum_i w_i x_i) (\sum_j w_j y_j). num1 = reducer(x * y) * 2.0 den1 = reducer(math_ops.square(x) + math_ops.square(y)) c2 *= compensation cs = (num1 - num0 + c2) / (den1 - den0 + c2) # SSIM score is the product of the luminance and contrast-structure measures. return luminance, cs def _fspecial_gauss(size, sigma): """Function to mimic the 'fspecial' gaussian MATLAB function.""" size = ops.convert_to_tensor(size, dtypes.int32) sigma = ops.convert_to_tensor(sigma) coords = math_ops.cast(math_ops.range(size), sigma.dtype) coords -= math_ops.cast(size - 1, sigma.dtype) / 2.0 g = math_ops.square(coords) g *= -0.5 / math_ops.square(sigma) g = array_ops.reshape(g, shape=[1, -1]) + array_ops.reshape(g, shape=[-1, 1]) g = array_ops.reshape(g, shape=[1, -1]) # For tf.nn.softmax(). g = nn_ops.softmax(g) return array_ops.reshape(g, shape=[size, size, 1, 1]) def _ssim_per_channel(img1, img2, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): """Computes SSIM index between img1 and img2 per color channel. This function matches the standard SSIM implementation from: Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing. Details: - 11x11 Gaussian filter of width 1.5 is used. - k1 = 0.01, k2 = 0.03 as in the original paper. Args: img1: First image batch. img2: Second image batch. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). filter_size: Default value 11 (size of gaussian filter). filter_sigma: Default value 1.5 (width of gaussian filter). k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we took the values in the range of 0 < K2 < 0.4). Returns: A pair of tensors containing and channel-wise SSIM and contrast-structure values. The shape is [..., channels]. """ filter_size = constant_op.constant(filter_size, dtype=dtypes.int32) filter_sigma = constant_op.constant(filter_sigma, dtype=img1.dtype) shape1, shape2 = array_ops.shape_n([img1, img2]) checks = [ control_flow_ops.Assert( math_ops.reduce_all( math_ops.greater_equal(shape1[-3:-1], filter_size)), [shape1, filter_size], summarize=8), control_flow_ops.Assert( math_ops.reduce_all( math_ops.greater_equal(shape2[-3:-1], filter_size)), [shape2, filter_size], summarize=8) ] # Enforce the check to run before computation. with ops.control_dependencies(checks): img1 = array_ops.identity(img1) # TODO(sjhwang): Try to cache kernels and compensation factor. kernel = _fspecial_gauss(filter_size, filter_sigma) kernel = array_ops.tile(kernel, multiples=[1, 1, shape1[-1], 1]) # The correct compensation factor is `1.0 - tf.reduce_sum(tf.square(kernel))`, # but to match MATLAB implementation of MS-SSIM, we use 1.0 instead. compensation = 1.0 # TODO(sjhwang): Try FFT. # TODO(sjhwang): Gaussian kernel is separable in space. Consider applying # 1-by-n and n-by-1 Gaussian filters instead of an n-by-n filter. def reducer(x): shape = array_ops.shape(x) x = array_ops.reshape(x, shape=array_ops.concat([[-1], shape[-3:]], 0)) y = nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID') return array_ops.reshape( y, array_ops.concat([shape[:-3], array_ops.shape(y)[1:]], 0)) luminance, cs = _ssim_helper(img1, img2, reducer, max_val, compensation, k1, k2) # Average over the second and the third from the last: height, width. axes = constant_op.constant([-3, -2], dtype=dtypes.int32) ssim_val = math_ops.reduce_mean(luminance * cs, axes) cs = math_ops.reduce_mean(cs, axes) return ssim_val, cs @tf_export('image.ssim') @dispatch.add_dispatch_support def ssim(img1, img2, max_val, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): """Computes SSIM index between img1 and img2. This function is based on the standard SSIM implementation from: Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing. Note: The true SSIM is only defined on grayscale. This function does not perform any colorspace transform. (If the input is already YUV, then it will compute YUV SSIM average.) Details: - 11x11 Gaussian filter of width 1.5 is used. - k1 = 0.01, k2 = 0.03 as in the original paper. The image sizes must be at least 11x11 because of the filter size. Example: ```python # Read images (of size 255 x 255) from file. im1 = tf.image.decode_image(tf.io.read_file('path/to/im1.png')) im2 = tf.image.decode_image(tf.io.read_file('path/to/im2.png')) tf.shape(im1) # `img1.png` has 3 channels; shape is `(255, 255, 3)` tf.shape(im2) # `img2.png` has 3 channels; shape is `(255, 255, 3)` # Add an outer batch for each image. im1 = tf.expand_dims(im1, axis=0) im2 = tf.expand_dims(im2, axis=0) # Compute SSIM over tf.uint8 Tensors. ssim1 = tf.image.ssim(im1, im2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) # Compute SSIM over tf.float32 Tensors. im1 = tf.image.convert_image_dtype(im1, tf.float32) im2 = tf.image.convert_image_dtype(im2, tf.float32) ssim2 = tf.image.ssim(im1, im2, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) # ssim1 and ssim2 both have type tf.float32 and are almost equal. ``` Args: img1: First image batch. 4-D Tensor of shape `[batch, height, width, channels]` with only Positive Pixel Values. img2: Second image batch. 4-D Tensor of shape `[batch, height, width, channels]` with only Positive Pixel Values. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). filter_size: Default value 11 (size of gaussian filter). filter_sigma: Default value 1.5 (width of gaussian filter). k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we took the values in the range of 0 < K2 < 0.4). Returns: A tensor containing an SSIM value for each image in batch. Returned SSIM values are in range (-1, 1], when pixel values are non-negative. Returns a tensor with shape: broadcast(img1.shape[:-3], img2.shape[:-3]). """ with ops.name_scope(None, 'SSIM', [img1, img2]): # Convert to tensor if needed. img1 = ops.convert_to_tensor(img1, name='img1') img2 = ops.convert_to_tensor(img2, name='img2') # Shape checking. _, _, checks = _verify_compatible_image_shapes(img1, img2) with ops.control_dependencies(checks): img1 = array_ops.identity(img1) # Need to convert the images to float32. Scale max_val accordingly so that # SSIM is computed correctly. max_val = math_ops.cast(max_val, img1.dtype) max_val = convert_image_dtype(max_val, dtypes.float32) img1 = convert_image_dtype(img1, dtypes.float32) img2 = convert_image_dtype(img2, dtypes.float32) ssim_per_channel, _ = _ssim_per_channel(img1, img2, max_val, filter_size, filter_sigma, k1, k2) # Compute average over color channels. return math_ops.reduce_mean(ssim_per_channel, [-1]) # Default values obtained by Wang et al. _MSSSIM_WEIGHTS = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333) @tf_export('image.ssim_multiscale') @dispatch.add_dispatch_support def ssim_multiscale(img1, img2, max_val, power_factors=_MSSSIM_WEIGHTS, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): """Computes the MS-SSIM between img1 and img2. This function assumes that `img1` and `img2` are image batches, i.e. the last three dimensions are [height, width, channels]. Note: The true SSIM is only defined on grayscale. This function does not perform any colorspace transform. (If the input is already YUV, then it will compute YUV SSIM average.) Original paper: Wang, Zhou, Eero P. Simoncelli, and Alan C. Bovik. "Multiscale structural similarity for image quality assessment." Signals, Systems and Computers, 2004. Args: img1: First image batch with only Positive Pixel Values. img2: Second image batch with only Positive Pixel Values. Must have the same rank as img1. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). power_factors: Iterable of weights for each of the scales. The number of scales used is the length of the list. Index 0 is the unscaled resolution's weight and each increasing scale corresponds to the image being downsampled by 2. Defaults to (0.0448, 0.2856, 0.3001, 0.2363, 0.1333), which are the values obtained in the original paper. filter_size: Default value 11 (size of gaussian filter). filter_sigma: Default value 1.5 (width of gaussian filter). k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we took the values in the range of 0 < K2 < 0.4). Returns: A tensor containing an MS-SSIM value for each image in batch. The values are in range [0, 1]. Returns a tensor with shape: broadcast(img1.shape[:-3], img2.shape[:-3]). """ with ops.name_scope(None, 'MS-SSIM', [img1, img2]): # Convert to tensor if needed. img1 = ops.convert_to_tensor(img1, name='img1') img2 = ops.convert_to_tensor(img2, name='img2') # Shape checking. shape1, shape2, checks = _verify_compatible_image_shapes(img1, img2) with ops.control_dependencies(checks): img1 = array_ops.identity(img1) # Need to convert the images to float32. Scale max_val accordingly so that # SSIM is computed correctly. max_val = math_ops.cast(max_val, img1.dtype) max_val = convert_image_dtype(max_val, dtypes.float32) img1 = convert_image_dtype(img1, dtypes.float32) img2 = convert_image_dtype(img2, dtypes.float32) imgs = [img1, img2] shapes = [shape1, shape2] # img1 and img2 are assumed to be a (multi-dimensional) batch of # 3-dimensional images (height, width, channels). `heads` contain the batch # dimensions, and `tails` contain the image dimensions. heads = [s[:-3] for s in shapes] tails = [s[-3:] for s in shapes] divisor = [1, 2, 2, 1] divisor_tensor = constant_op.constant(divisor[1:], dtype=dtypes.int32) def do_pad(images, remainder): padding = array_ops.expand_dims(remainder, -1) padding = array_ops.pad(padding, [[1, 0], [1, 0]]) return [array_ops.pad(x, padding, mode='SYMMETRIC') for x in images] mcs = [] for k in range(len(power_factors)): with ops.name_scope(None, 'Scale%d' % k, imgs): if k > 0: # Avg pool takes rank 4 tensors. Flatten leading dimensions. flat_imgs = [ array_ops.reshape(x, array_ops.concat([[-1], t], 0)) for x, t in zip(imgs, tails) ] remainder = tails[0] % divisor_tensor need_padding = math_ops.reduce_any(math_ops.not_equal(remainder, 0)) # pylint: disable=cell-var-from-loop padded = control_flow_ops.cond(need_padding, lambda: do_pad(flat_imgs, remainder), lambda: flat_imgs) # pylint: enable=cell-var-from-loop downscaled = [ nn_ops.avg_pool( x, ksize=divisor, strides=divisor, padding='VALID') for x in padded ] tails = [x[1:] for x in array_ops.shape_n(downscaled)] imgs = [ array_ops.reshape(x, array_ops.concat([h, t], 0)) for x, h, t in zip(downscaled, heads, tails) ] # Overwrite previous ssim value since we only need the last one. ssim_per_channel, cs = _ssim_per_channel( *imgs, max_val=max_val, filter_size=filter_size, filter_sigma=filter_sigma, k1=k1, k2=k2) mcs.append(nn_ops.relu(cs)) # Remove the cs score for the last scale. In the MS-SSIM calculation, # we use the l(p) at the highest scale. l(p) * cs(p) is ssim(p). mcs.pop() # Remove the cs score for the last scale. mcs_and_ssim = array_ops.stack( mcs + [nn_ops.relu(ssim_per_channel)], axis=-1) # Take weighted geometric mean across the scale axis. ms_ssim = math_ops.reduce_prod( math_ops.pow(mcs_and_ssim, power_factors), [-1]) return math_ops.reduce_mean(ms_ssim, [-1]) # Avg over color channels. @tf_export('image.image_gradients') @dispatch.add_dispatch_support def image_gradients(image): """Returns image gradients (dy, dx) for each color channel. Both output tensors have the same shape as the input: [batch_size, h, w, d]. The gradient values are organized so that [I(x+1, y) - I(x, y)] is in location (x, y). That means that dy will always have zeros in the last row, and dx will always have zeros in the last column. Usage Example: ```python BATCH_SIZE = 1 IMAGE_HEIGHT = 5 IMAGE_WIDTH = 5 CHANNELS = 1 image = tf.reshape(tf.range(IMAGE_HEIGHT * IMAGE_WIDTH * CHANNELS, delta=1, dtype=tf.float32), shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS)) dy, dx = tf.image.image_gradients(image) print(image[0, :,:,0]) tf.Tensor( [[ 0. 1. 2. 3. 4.] [ 5. 6. 7. 8. 9.] [10. 11. 12. 13. 14.] [15. 16. 17. 18. 19.] [20. 21. 22. 23. 24.]], shape=(5, 5), dtype=float32) print(dy[0, :,:,0]) tf.Tensor( [[5. 5. 5. 5. 5.] [5. 5. 5. 5. 5.] [5. 5. 5. 5. 5.] [5. 5. 5. 5. 5.] [0. 0. 0. 0. 0.]], shape=(5, 5), dtype=float32) print(dx[0, :,:,0]) tf.Tensor( [[1. 1. 1. 1. 0.] [1. 1. 1. 1. 0.] [1. 1. 1. 1. 0.] [1. 1. 1. 1. 0.] [1. 1. 1. 1. 0.]], shape=(5, 5), dtype=float32) ``` Args: image: Tensor with shape [batch_size, h, w, d]. Returns: Pair of tensors (dy, dx) holding the vertical and horizontal image gradients (1-step finite difference). Raises: ValueError: If `image` is not a 4D tensor. """ if image.get_shape().ndims != 4: raise ValueError('image_gradients expects a 4D tensor ' '[batch_size, h, w, d], not {}.'.format(image.get_shape())) image_shape = array_ops.shape(image) batch_size, height, width, depth = array_ops.unstack(image_shape) dy = image[:, 1:, :, :] - image[:, :-1, :, :] dx = image[:, :, 1:, :] - image[:, :, :-1, :] # Return tensors with same size as original image by concatenating # zeros. Place the gradient [I(x+1,y) - I(x,y)] on the base pixel (x, y). shape = array_ops.stack([batch_size, 1, width, depth]) dy = array_ops.concat([dy, array_ops.zeros(shape, image.dtype)], 1) dy = array_ops.reshape(dy, image_shape) shape = array_ops.stack([batch_size, height, 1, depth]) dx = array_ops.concat([dx, array_ops.zeros(shape, image.dtype)], 2) dx = array_ops.reshape(dx, image_shape) return dy, dx @tf_export('image.sobel_edges') @dispatch.add_dispatch_support def sobel_edges(image): """Returns a tensor holding Sobel edge maps. Example usage: For general usage, `image` would be loaded from a file as below: ```python image_bytes = tf.io.read_file(path_to_image_file) image = tf.image.decode_image(image_bytes) image = tf.cast(image, tf.float32) image = tf.expand_dims(image, 0) ``` But for demo purposes, we are using randomly generated values for `image`: >>> image = tf.random.uniform( ... maxval=255, shape=[1, 28, 28, 3], dtype=tf.float32) >>> sobel = tf.image.sobel_edges(image) >>> sobel_y = np.asarray(sobel[0, :, :, :, 0]) # sobel in y-direction >>> sobel_x = np.asarray(sobel[0, :, :, :, 1]) # sobel in x-direction For displaying the sobel results, PIL's [Image Module]( https://pillow.readthedocs.io/en/stable/reference/Image.html) can be used: ```python # Display edge maps for the first channel (at index 0) Image.fromarray(sobel_y[..., 0] / 4 + 0.5).show() Image.fromarray(sobel_x[..., 0] / 4 + 0.5).show() ``` Args: image: Image tensor with shape [batch_size, h, w, d] and type float32 or float64. The image(s) must be 2x2 or larger. Returns: Tensor holding edge maps for each channel. Returns a tensor with shape [batch_size, h, w, d, 2] where the last two dimensions hold [[dy[0], dx[0]], [dy[1], dx[1]], ..., [dy[d-1], dx[d-1]]] calculated using the Sobel filter. """ # Define vertical and horizontal Sobel filters. static_image_shape = image.get_shape() image_shape = array_ops.shape(image) kernels = [[[-1, -2, -1], [0, 0, 0], [1, 2, 1]], [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]] num_kernels = len(kernels) kernels = np.transpose(np.asarray(kernels), (1, 2, 0)) kernels = np.expand_dims(kernels, -2) kernels_tf = constant_op.constant(kernels, dtype=image.dtype) kernels_tf = array_ops.tile( kernels_tf, [1, 1, image_shape[-1], 1], name='sobel_filters') # Use depth-wise convolution to calculate edge maps per channel. pad_sizes = [[0, 0], [1, 1], [1, 1], [0, 0]] padded = array_ops.pad(image, pad_sizes, mode='REFLECT') # Output tensor has shape [batch_size, h, w, d * num_kernels]. strides = [1, 1, 1, 1] output = nn.depthwise_conv2d(padded, kernels_tf, strides, 'VALID') # Reshape to [batch_size, h, w, d, num_kernels]. shape = array_ops.concat([image_shape, [num_kernels]], 0) output = array_ops.reshape(output, shape=shape) output.set_shape(static_image_shape.concatenate([num_kernels])) return output def resize_bicubic(images, size, align_corners=False, name=None, half_pixel_centers=False): return gen_image_ops.resize_bicubic( images=images, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, name=name) def resize_bilinear(images, size, align_corners=False, name=None, half_pixel_centers=False): return gen_image_ops.resize_bilinear( images=images, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, name=name) def resize_nearest_neighbor(images, size, align_corners=False, name=None, half_pixel_centers=False): return gen_image_ops.resize_nearest_neighbor( images=images, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, name=name) resize_area_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.AREA...)` instead.')) tf_export(v1=['image.resize_area'])( resize_area_deprecation( dispatch.add_dispatch_support(gen_image_ops.resize_area))) resize_bicubic_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.BICUBIC...)` instead.')) tf_export(v1=['image.resize_bicubic'])( dispatch.add_dispatch_support(resize_bicubic_deprecation(resize_bicubic))) resize_bilinear_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.BILINEAR...)` instead.')) tf_export(v1=['image.resize_bilinear'])( dispatch.add_dispatch_support(resize_bilinear_deprecation(resize_bilinear))) resize_nearest_neighbor_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` ' 'instead.')) tf_export(v1=['image.resize_nearest_neighbor'])( dispatch.add_dispatch_support( resize_nearest_neighbor_deprecation(resize_nearest_neighbor))) @tf_export('image.crop_and_resize', v1=[]) @dispatch.add_dispatch_support def crop_and_resize_v2(image, boxes, box_indices, crop_size, method='bilinear', extrapolation_value=0, name=None): """Extracts crops from the input image tensor and resizes them. Extracts crops from the input image tensor and resizes them using bilinear sampling or nearest neighbor sampling (possibly with aspect ratio change) to a common output size specified by `crop_size`. This is more general than the `crop_to_bounding_box` op which extracts a fixed size slice from the input image and does not allow resizing or aspect ratio change. Returns a tensor with `crops` from the input `image` at positions defined at the bounding box locations in `boxes`. The cropped boxes are all resized (with bilinear or nearest neighbor interpolation) to a fixed `size = [crop_height, crop_width]`. The result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned. In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical results to using `tf.compat.v1.image.resize_bilinear()` or `tf.compat.v1.image.resize_nearest_neighbor()`(depends on the `method` argument) with `align_corners=True`. Args: image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. Both `image_height` and `image_width` need to be positive. boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image height is mapped to `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use `extrapolation_value` to extrapolate the input image values. box_indices: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. The value of `box_ind[i]` specifies the image that the `i`-th box refers to. crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All cropped image patches are resized to this size. The aspect ratio of the image content is not preserved. Both `crop_height` and `crop_width` need to be positive. method: An optional string specifying the sampling method for resizing. It can be either `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling methods are supported: Bilinear and Nearest Neighbor. extrapolation_value: An optional `float`. Defaults to `0`. Value used for extrapolation, when applicable. name: A name for the operation (optional). Returns: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. Example: ```python import tensorflow as tf BATCH_SIZE = 1 NUM_BOXES = 5 IMAGE_HEIGHT = 256 IMAGE_WIDTH = 256 CHANNELS = 3 CROP_SIZE = (24, 24) image = tf.random.normal(shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS) ) boxes = tf.random.uniform(shape=(NUM_BOXES, 4)) box_indices = tf.random.uniform(shape=(NUM_BOXES,), minval=0, maxval=BATCH_SIZE, dtype=tf.int32) output = tf.image.crop_and_resize(image, boxes, box_indices, CROP_SIZE) output.shape #=> (5, 24, 24, 3) ``` """ return gen_image_ops.crop_and_resize(image, boxes, box_indices, crop_size, method, extrapolation_value, name) @tf_export(v1=['image.crop_and_resize']) @dispatch.add_dispatch_support @deprecation.deprecated_args(None, 'box_ind is deprecated, use box_indices instead', 'box_ind') def crop_and_resize_v1( # pylint: disable=missing-docstring image, boxes, box_ind=None, crop_size=None, method='bilinear', extrapolation_value=0, name=None, box_indices=None): box_ind = deprecation.deprecated_argument_lookup('box_indices', box_indices, 'box_ind', box_ind) return gen_image_ops.crop_and_resize(image, boxes, box_ind, crop_size, method, extrapolation_value, name) crop_and_resize_v1.__doc__ = gen_image_ops.crop_and_resize.__doc__ @tf_export(v1=['image.extract_glimpse']) @dispatch.add_dispatch_support def extract_glimpse( input, # pylint: disable=redefined-builtin size, offsets, centered=True, normalized=True, uniform_noise=True, name=None): """Extracts a glimpse from the input tensor. Returns a set of windows called glimpses extracted at location `offsets` from the input tensor. If the windows only partially overlaps the inputs, the non-overlapping areas will be filled with random noise. The result is a 4-D tensor of shape `[batch_size, glimpse_height, glimpse_width, channels]`. The channels and batch dimensions are the same as that of the input tensor. The height and width of the output windows are specified in the `size` parameter. The argument `normalized` and `centered` controls how the windows are built: * If the coordinates are normalized but not centered, 0.0 and 1.0 correspond to the minimum and maximum of each height and width dimension. * If the coordinates are both normalized and centered, they range from -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0). * If the coordinates are not normalized they are interpreted as numbers of pixels. Usage Example: >>> x = [[[[0.0], ... [1.0], ... [2.0]], ... [[3.0], ... [4.0], ... [5.0]], ... [[6.0], ... [7.0], ... [8.0]]]] >>> tf.compat.v1.image.extract_glimpse(x, size=(2, 2), offsets=[[1, 1]], ... centered=False, normalized=False) <tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy= array([[[[0.], [1.]], [[3.], [4.]]]], dtype=float32)> Args: input: A `Tensor` of type `float32`. A 4-D float tensor of shape `[batch_size, height, width, channels]`. size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape `[batch_size, 2]` containing the y, x locations of the center of each window. centered: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are centered relative to the image, in which case the (0, 0) offset is relative to the center of the input images. If false, the (0,0) offset corresponds to the upper left corner of the input images. normalized: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are normalized. uniform_noise: An optional `bool`. Defaults to `True`. indicates if the noise should be generated using a uniform distribution or a Gaussian distribution. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ return gen_image_ops.extract_glimpse( input=input, size=size, offsets=offsets, centered=centered, normalized=normalized, uniform_noise=uniform_noise, name=name) @tf_export('image.extract_glimpse', v1=[]) @dispatch.add_dispatch_support def extract_glimpse_v2( input, # pylint: disable=redefined-builtin size, offsets, centered=True, normalized=True, noise='uniform', name=None): """Extracts a glimpse from the input tensor. Returns a set of windows called glimpses extracted at location `offsets` from the input tensor. If the windows only partially overlaps the inputs, the non-overlapping areas will be filled with random noise. The result is a 4-D tensor of shape `[batch_size, glimpse_height, glimpse_width, channels]`. The channels and batch dimensions are the same as that of the input tensor. The height and width of the output windows are specified in the `size` parameter. The argument `normalized` and `centered` controls how the windows are built: * If the coordinates are normalized but not centered, 0.0 and 1.0 correspond to the minimum and maximum of each height and width dimension. * If the coordinates are both normalized and centered, they range from -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0). * If the coordinates are not normalized they are interpreted as numbers of pixels. Usage Example: >>> x = [[[[0.0], ... [1.0], ... [2.0]], ... [[3.0], ... [4.0], ... [5.0]], ... [[6.0], ... [7.0], ... [8.0]]]] >>> tf.image.extract_glimpse(x, size=(2, 2), offsets=[[1, 1]], ... centered=False, normalized=False) <tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy= array([[[[4.], [5.]], [[7.], [8.]]]], dtype=float32)> Args: input: A `Tensor` of type `float32`. A 4-D float tensor of shape `[batch_size, height, width, channels]`. size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape `[batch_size, 2]` containing the y, x locations of the center of each window. centered: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are centered relative to the image, in which case the (0, 0) offset is relative to the center of the input images. If false, the (0,0) offset corresponds to the upper left corner of the input images. normalized: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are normalized. noise: An optional `string`. Defaults to `uniform`. indicates if the noise should be `uniform` (uniform distribution), `gaussian` (gaussian distribution), or `zero` (zero padding). name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ return gen_image_ops.extract_glimpse_v2( input=input, size=size, offsets=offsets, centered=centered, normalized=normalized, noise=noise, uniform_noise=False, name=name) @tf_export('image.combined_non_max_suppression') @dispatch.add_dispatch_support def combined_non_max_suppression(boxes, scores, max_output_size_per_class, max_total_size, iou_threshold=0.5, score_threshold=float('-inf'), pad_per_class=False, clip_boxes=True, name=None): """Greedily selects a subset of bounding boxes in descending order of score. This operation performs non_max_suppression on the inputs per batch, across all classes. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Also note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is the final boxes, scores and classes tensor returned after performing non_max_suppression. Args: boxes: A 4-D float `Tensor` of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used. scores: A 3-D float `Tensor` of shape `[batch_size, num_boxes, num_classes]` representing a single score corresponding to each box (each row of boxes). max_output_size_per_class: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression per class max_total_size: A int32 scalar representing maximum number of boxes retained over all classes. Note that setting this value to a large number may result in OOM error depending on the system workload. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. pad_per_class: If false, the output nmsed boxes, scores and classes are padded/clipped to `max_total_size`. If true, the output nmsed boxes, scores and classes are padded to be of length `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in which case it is clipped to `max_total_size`. Defaults to false. clip_boxes: If true, the coordinates of output nmsed boxes will be clipped to [0, 1]. If false, output the box coordinates as it is. Defaults to true. name: A name for the operation (optional). Returns: 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor containing the non-max suppressed boxes. 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing the scores for the boxes. 'nmsed_classes': A [batch_size, max_detections] float32 tensor containing the class for boxes. 'valid_detections': A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top valid_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings. """ with ops.name_scope(name, 'combined_non_max_suppression'): iou_threshold = ops.convert_to_tensor( iou_threshold, dtype=dtypes.float32, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, dtype=dtypes.float32, name='score_threshold') # Convert `max_total_size` to tensor *without* setting the `dtype` param. # This allows us to catch `int32` overflow case with `max_total_size` # whose expected dtype is `int32` by the op registration. Any number within # `int32` will get converted to `int32` tensor. Anything larger will get # converted to `int64`. Passing in `int64` for `max_total_size` to the op # will throw dtype mismatch exception. # TODO(b/173251596): Once there is a more general solution to warn against # int overflow conversions, revisit this check. max_total_size = ops.convert_to_tensor(max_total_size) return gen_image_ops.combined_non_max_suppression( boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold, pad_per_class, clip_boxes) def _bbox_overlap(boxes_a, boxes_b): """Calculates the overlap (iou - intersection over union) between boxes_a and boxes_b. Args: boxes_a: a tensor with a shape of [batch_size, N, 4]. N is the number of boxes per image. The last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form. boxes_b: a tensor with a shape of [batch_size, M, 4]. M is the number of boxes. The last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form. Returns: intersection_over_union: a tensor with as a shape of [batch_size, N, M], representing the ratio of intersection area over union area (IoU) between two boxes """ with ops.name_scope('bbox_overlap'): a_y_min, a_x_min, a_y_max, a_x_max = array_ops.split( value=boxes_a, num_or_size_splits=4, axis=2) b_y_min, b_x_min, b_y_max, b_x_max = array_ops.split( value=boxes_b, num_or_size_splits=4, axis=2) # Calculates the intersection area. i_xmin = math_ops.maximum( a_x_min, array_ops.transpose(b_x_min, [0, 2, 1])) i_xmax = math_ops.minimum( a_x_max, array_ops.transpose(b_x_max, [0, 2, 1])) i_ymin = math_ops.maximum( a_y_min, array_ops.transpose(b_y_min, [0, 2, 1])) i_ymax = math_ops.minimum( a_y_max, array_ops.transpose(b_y_max, [0, 2, 1])) i_area = math_ops.maximum( (i_xmax - i_xmin), 0) * math_ops.maximum((i_ymax - i_ymin), 0) # Calculates the union area. a_area = (a_y_max - a_y_min) * (a_x_max - a_x_min) b_area = (b_y_max - b_y_min) * (b_x_max - b_x_min) EPSILON = 1e-8 # Adds a small epsilon to avoid divide-by-zero. u_area = a_area + array_ops.transpose(b_area, [0, 2, 1]) - i_area + EPSILON # Calculates IoU. intersection_over_union = i_area / u_area return intersection_over_union def _self_suppression(iou, _, iou_sum, iou_threshold): """Suppress boxes in the same tile. Compute boxes that cannot be suppressed by others (i.e., can_suppress_others), and then use them to suppress boxes in the same tile. Args: iou: a tensor of shape [batch_size, num_boxes_with_padding] representing intersection over union. iou_sum: a scalar tensor. iou_threshold: a scalar tensor. Returns: iou_suppressed: a tensor of shape [batch_size, num_boxes_with_padding]. iou_diff: a scalar tensor representing whether any box is supressed in this step. iou_sum_new: a scalar tensor of shape [batch_size] that represents the iou sum after suppression. iou_threshold: a scalar tensor. """ batch_size = array_ops.shape(iou)[0] can_suppress_others = math_ops.cast( array_ops.reshape( math_ops.reduce_max(iou, 1) < iou_threshold, [batch_size, -1, 1]), iou.dtype) iou_after_suppression = array_ops.reshape( math_ops.cast( math_ops.reduce_max(can_suppress_others * iou, 1) < iou_threshold, iou.dtype), [batch_size, -1, 1]) * iou iou_sum_new = math_ops.reduce_sum(iou_after_suppression, [1, 2]) return [ iou_after_suppression, math_ops.reduce_any(iou_sum - iou_sum_new > iou_threshold), iou_sum_new, iou_threshold ] def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size): """Suppress boxes between different tiles. Args: boxes: a tensor of shape [batch_size, num_boxes_with_padding, 4] box_slice: a tensor of shape [batch_size, tile_size, 4] iou_threshold: a scalar tensor inner_idx: a scalar tensor representing the tile index of the tile that is used to supress box_slice tile_size: an integer representing the number of boxes in a tile Returns: boxes: unchanged boxes as input box_slice_after_suppression: box_slice after suppression iou_threshold: unchanged """ batch_size = array_ops.shape(boxes)[0] new_slice = array_ops.slice( boxes, [0, inner_idx * tile_size, 0], [batch_size, tile_size, 4]) iou = _bbox_overlap(new_slice, box_slice) box_slice_after_suppression = array_ops.expand_dims( math_ops.cast(math_ops.reduce_all(iou < iou_threshold, [1]), box_slice.dtype), 2) * box_slice return boxes, box_slice_after_suppression, iou_threshold, inner_idx + 1 def _suppression_loop_body(boxes, iou_threshold, output_size, idx, tile_size): """Process boxes in the range [idx*tile_size, (idx+1)*tile_size). Args: boxes: a tensor with a shape of [batch_size, anchors, 4]. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IOU. output_size: an int32 tensor of size [batch_size]. Representing the number of selected boxes for each batch. idx: an integer scalar representing induction variable. tile_size: an integer representing the number of boxes in a tile Returns: boxes: updated boxes. iou_threshold: pass down iou_threshold to the next iteration. output_size: the updated output_size. idx: the updated induction variable. """ with ops.name_scope('suppression_loop_body'): num_tiles = array_ops.shape(boxes)[1] // tile_size batch_size = array_ops.shape(boxes)[0] def cross_suppression_func(boxes, box_slice, iou_threshold, inner_idx): return _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size) # Iterates over tiles that can possibly suppress the current tile. box_slice = array_ops.slice(boxes, [0, idx * tile_size, 0], [batch_size, tile_size, 4]) _, box_slice, _, _ = control_flow_ops.while_loop( lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx, cross_suppression_func, [boxes, box_slice, iou_threshold, constant_op.constant(0)]) # Iterates over the current tile to compute self-suppression. iou = _bbox_overlap(box_slice, box_slice) mask = array_ops.expand_dims( array_ops.reshape( math_ops.range(tile_size), [1, -1]) > array_ops.reshape( math_ops.range(tile_size), [-1, 1]), 0) iou *= math_ops.cast( math_ops.logical_and(mask, iou >= iou_threshold), iou.dtype) suppressed_iou, _, _, _ = control_flow_ops.while_loop( lambda _iou, loop_condition, _iou_sum, _: loop_condition, _self_suppression, [iou, constant_op.constant(True), math_ops.reduce_sum(iou, [1, 2]), iou_threshold]) suppressed_box = math_ops.reduce_sum(suppressed_iou, 1) > 0 box_slice *= array_ops.expand_dims( 1.0 - math_ops.cast(suppressed_box, box_slice.dtype), 2) # Uses box_slice to update the input boxes. mask = array_ops.reshape( math_ops.cast( math_ops.equal(math_ops.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1]) boxes = array_ops.tile(array_ops.expand_dims( box_slice, [1]), [1, num_tiles, 1, 1]) * mask + array_ops.reshape( boxes, [batch_size, num_tiles, tile_size, 4]) * (1 - mask) boxes = array_ops.reshape(boxes, [batch_size, -1, 4]) # Updates output_size. output_size += math_ops.reduce_sum( math_ops.cast( math_ops.reduce_any(box_slice > 0, [2]), dtypes.int32), [1]) return boxes, iou_threshold, output_size, idx + 1 @tf_export('image.non_max_suppression_padded') @dispatch.add_dispatch_support def non_max_suppression_padded(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), pad_to_max_output_size=False, name=None, sorted_input=False, canonicalized_coordinates=False, tile_size=512): """Greedily selects a subset of bounding boxes in descending order of score. Performs algorithmically equivalent operation to tf.image.non_max_suppression, with the addition of an optional parameter which zero-pads the output to be of size `max_output_size`. The output of this operation is a tuple containing the set of integers indexing into the input collection of bounding boxes representing the selected boxes and the number of valid indices in the index set. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.slice` and `tf.gather` operations. For example: ```python selected_indices_padded, num_valid = tf.image.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size=True) selected_indices = tf.slice( selected_indices_padded, tf.constant([0]), num_valid) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4]. Dimensions except the last two are batch dimensions. scores: a tensor of rank 1 or higher with a shape of [..., num_boxes]. max_output_size: a scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. Note that setting this value to a large number may result in OOM error depending on the system workload. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IoU (intersection over union). score_threshold: a float representing the threshold for box scores. Boxes with a score that is not larger than this threshold will be suppressed. pad_to_max_output_size: whether to pad the output idx to max_output_size. Must be set to True when the input is a batch of images. name: name of operation. sorted_input: a boolean indicating whether the input boxes and scores are sorted in descending order by the score. canonicalized_coordinates: if box coordinates are given as `[y_min, x_min, y_max, x_max]`, setting to True eliminate redundant computation to canonicalize box coordinates. tile_size: an integer representing the number of boxes in a tile, i.e., the maximum number of boxes per image that can be used to suppress other boxes in parallel; larger tile_size means larger parallelism and potentially more redundant work. Returns: idx: a tensor with a shape of [..., num_boxes] representing the indices selected by non-max suppression. The leading dimensions are the batch dimensions of the input boxes. All numbers are within [0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i] indices (i.e., idx[i][:num_valid[i]]) are valid. num_valid: a tensor of rank 0 or higher with a shape of [...] representing the number of valid indices in idx. Its dimensions are the batch dimensions of the input boxes. Raises: ValueError: When set pad_to_max_output_size to False for batched input. """ with ops.name_scope(name, 'non_max_suppression_padded'): if not pad_to_max_output_size: # pad_to_max_output_size may be set to False only when the shape of # boxes is [num_boxes, 4], i.e., a single image. We make best effort to # detect violations at compile time. If `boxes` does not have a static # rank, the check allows computation to proceed. if boxes.get_shape().rank is not None and boxes.get_shape().rank > 2: raise ValueError("'pad_to_max_output_size' (value {}) must be True for " 'batched input'.format(pad_to_max_output_size)) if name is None: name = '' idx, num_valid = non_max_suppression_padded_v2( boxes, scores, max_output_size, iou_threshold, score_threshold, sorted_input, canonicalized_coordinates, tile_size) # def_function.function seems to lose shape information, so set it here. if not pad_to_max_output_size: idx = idx[0, :num_valid] else: batch_dims = array_ops.concat([ array_ops.shape(boxes)[:-2], array_ops.expand_dims(max_output_size, 0) ], 0) idx = array_ops.reshape(idx, batch_dims) return idx, num_valid # TODO(b/158709815): Improve performance regression due to # def_function.function. @def_function.function( experimental_implements='non_max_suppression_padded_v2') def non_max_suppression_padded_v2(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), sorted_input=False, canonicalized_coordinates=False, tile_size=512): """Non-maximum suppression. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval `[0, 1]`) or absolute. The bounding box coordinates are cannonicalized to `[y_min, x_min, y_max, x_max]`, where `(y_min, x_min)` and `(y_max, x_mas)` are the coordinates of the lower left and upper right corner. User may indiciate the input box coordinates are already canonicalized to eliminate redundant work by setting canonicalized_coordinates to `True`. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. Similar to tf.image.non_max_suppression, non_max_suppression_padded implements hard NMS but can operate on a batch of images and improves performance by titling the bounding boxes. Non_max_suppression_padded should be preferred over tf.image_non_max_suppression when running on devices with abundant parallelsim for higher computation speed. For soft NMS, refer to tf.image.non_max_suppression_with_scores. While a serial NMS algorithm iteratively uses the highest-scored unprocessed box to suppress boxes, this algorithm uses many boxes to suppress other boxes in parallel. The key idea is to partition boxes into tiles based on their score and suppresses boxes tile by tile, thus achieving parallelism within a tile. The tile size determines the degree of parallelism. In cross suppression (using boxes of tile A to suppress boxes of tile B), all boxes in A can independently suppress boxes in B. Self suppression (suppressing boxes of the same tile) needs to be iteratively applied until there's no more suppression. In each iteration, boxes that cannot be suppressed are used to suppress boxes in the same tile. boxes = boxes.pad_to_multiply_of(tile_size) num_tiles = len(boxes) // tile_size output_boxes = [] for i in range(num_tiles): box_tile = boxes[i*tile_size : (i+1)*tile_size] for j in range(i - 1): # in parallel suppress boxes in box_tile using boxes from suppressing_tile suppressing_tile = boxes[j*tile_size : (j+1)*tile_size] iou = _bbox_overlap(box_tile, suppressing_tile) # if the box is suppressed in iou, clear it to a dot box_tile *= _update_boxes(iou) # Iteratively handle the diagnal tile. iou = _box_overlap(box_tile, box_tile) iou_changed = True while iou_changed: # boxes that are not suppressed by anything else suppressing_boxes = _get_suppressing_boxes(iou) # boxes that are suppressed by suppressing_boxes suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes) # clear iou to 0 for boxes that are suppressed, as they cannot be used # to suppress other boxes any more new_iou = _clear_iou(iou, suppressed_boxes) iou_changed = (new_iou != iou) iou = new_iou # remaining boxes that can still suppress others, are selected boxes. output_boxes.append(_get_suppressing_boxes(iou)) if len(output_boxes) >= max_output_size: break Args: boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4]. Dimensions except the last two are batch dimensions. The last dimension represents box coordinates, given as [y_1, x_1, y_2, x_2]. The coordinates on each dimension can be given in any order (see also `canonicalized_coordinates`) but must describe a box with a positive area. scores: a tensor of rank 1 or higher with a shape of [..., num_boxes]. max_output_size: a scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IoU (intersection over union). score_threshold: a float representing the threshold for box scores. Boxes with a score that is not larger than this threshold will be suppressed. sorted_input: a boolean indicating whether the input boxes and scores are sorted in descending order by the score. canonicalized_coordinates: if box coordinates are given as `[y_min, x_min, y_max, x_max]`, setting to True eliminate redundant computation to canonicalize box coordinates. tile_size: an integer representing the number of boxes in a tile, i.e., the maximum number of boxes per image that can be used to suppress other boxes in parallel; larger tile_size means larger parallelism and potentially more redundant work. Returns: idx: a tensor with a shape of [..., num_boxes] representing the indices selected by non-max suppression. The leading dimensions are the batch dimensions of the input boxes. All numbers are within [0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i] indices (i.e., idx[i][:num_valid[i]]) are valid. num_valid: a tensor of rank 0 or higher with a shape of [...] representing the number of valid indices in idx. Its dimensions are the batch dimensions of the input boxes. Raises: ValueError: When set pad_to_max_output_size to False for batched input. """ def _sort_scores_and_boxes(scores, boxes): """Sort boxes based their score from highest to lowest. Args: scores: a tensor with a shape of [batch_size, num_boxes] representing the scores of boxes. boxes: a tensor with a shape of [batch_size, num_boxes, 4] representing the boxes. Returns: sorted_scores: a tensor with a shape of [batch_size, num_boxes] representing the sorted scores. sorted_boxes: a tensor representing the sorted boxes. sorted_scores_indices: a tensor with a shape of [batch_size, num_boxes] representing the index of the scores in a sorted descending order. """ with ops.name_scope('sort_scores_and_boxes'): batch_size = array_ops.shape(boxes)[0] num_boxes = array_ops.shape(boxes)[1] sorted_scores_indices = sort_ops.argsort( scores, axis=1, direction='DESCENDING') index_offsets = math_ops.range(batch_size) * num_boxes indices = array_ops.reshape( sorted_scores_indices + array_ops.expand_dims(index_offsets, 1), [-1]) sorted_scores = array_ops.reshape( array_ops.gather(array_ops.reshape(scores, [-1]), indices), [batch_size, -1]) sorted_boxes = array_ops.reshape( array_ops.gather(array_ops.reshape(boxes, [-1, 4]), indices), [batch_size, -1, 4]) return sorted_scores, sorted_boxes, sorted_scores_indices batch_dims = array_ops.shape(boxes)[:-2] num_boxes = array_ops.shape(boxes)[-2] boxes = array_ops.reshape(boxes, [-1, num_boxes, 4]) scores = array_ops.reshape(scores, [-1, num_boxes]) batch_size = array_ops.shape(boxes)[0] if score_threshold != float('-inf'): with ops.name_scope('filter_by_score'): score_mask = math_ops.cast(scores > score_threshold, scores.dtype) scores *= score_mask box_mask = array_ops.expand_dims( math_ops.cast(score_mask, boxes.dtype), 2) boxes *= box_mask if not canonicalized_coordinates: with ops.name_scope('canonicalize_coordinates'): y_1, x_1, y_2, x_2 = array_ops.split( value=boxes, num_or_size_splits=4, axis=2) y_1_is_min = math_ops.reduce_all( math_ops.less_equal(y_1[0, 0, 0], y_2[0, 0, 0])) y_min, y_max = control_flow_ops.cond( y_1_is_min, lambda: (y_1, y_2), lambda: (y_2, y_1)) x_1_is_min = math_ops.reduce_all( math_ops.less_equal(x_1[0, 0, 0], x_2[0, 0, 0])) x_min, x_max = control_flow_ops.cond( x_1_is_min, lambda: (x_1, x_2), lambda: (x_2, x_1)) boxes = array_ops.concat([y_min, x_min, y_max, x_max], axis=2) if not sorted_input: scores, boxes, sorted_indices = _sort_scores_and_boxes(scores, boxes) else: # Default value required for Autograph. sorted_indices = array_ops.zeros_like(scores, dtype=dtypes.int32) pad = math_ops.cast( math_ops.ceil( math_ops.cast( math_ops.maximum(num_boxes, max_output_size), dtypes.float32) / math_ops.cast(tile_size, dtypes.float32)), dtypes.int32) * tile_size - num_boxes boxes = array_ops.pad( math_ops.cast(boxes, dtypes.float32), [[0, 0], [0, pad], [0, 0]]) scores = array_ops.pad( math_ops.cast(scores, dtypes.float32), [[0, 0], [0, pad]]) num_boxes_after_padding = num_boxes + pad num_iterations = num_boxes_after_padding // tile_size def _loop_cond(unused_boxes, unused_threshold, output_size, idx): return math_ops.logical_and( math_ops.reduce_min(output_size) < max_output_size, idx < num_iterations) def suppression_loop_body(boxes, iou_threshold, output_size, idx): return _suppression_loop_body( boxes, iou_threshold, output_size, idx, tile_size) selected_boxes, _, output_size, _ = control_flow_ops.while_loop( _loop_cond, suppression_loop_body, [ boxes, iou_threshold, array_ops.zeros([batch_size], dtypes.int32), constant_op.constant(0) ], shape_invariants=[ tensor_shape.TensorShape([None, None, 4]), tensor_shape.TensorShape([]), tensor_shape.TensorShape([None]), tensor_shape.TensorShape([]), ], ) num_valid = math_ops.minimum(output_size, max_output_size) idx = num_boxes_after_padding - math_ops.cast( nn_ops.top_k( math_ops.cast(math_ops.reduce_any( selected_boxes > 0, [2]), dtypes.int32) * array_ops.expand_dims( math_ops.range(num_boxes_after_padding, 0, -1), 0), max_output_size)[0], dtypes.int32) idx = math_ops.minimum(idx, num_boxes - 1) if not sorted_input: index_offsets = math_ops.range(batch_size) * num_boxes gather_idx = array_ops.reshape( idx + array_ops.expand_dims(index_offsets, 1), [-1]) idx = array_ops.reshape( array_ops.gather(array_ops.reshape(sorted_indices, [-1]), gather_idx), [batch_size, -1]) invalid_index = array_ops.zeros([batch_size, max_output_size], dtype=dtypes.int32) idx_index = array_ops.expand_dims(math_ops.range(max_output_size), 0) num_valid_expanded = array_ops.expand_dims(num_valid, 1) idx = array_ops.where(idx_index < num_valid_expanded, idx, invalid_index) num_valid = array_ops.reshape(num_valid, batch_dims) return idx, num_valid def non_max_suppression_padded_v1(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), pad_to_max_output_size=False, name=None): """Greedily selects a subset of bounding boxes in descending order of score. Performs algorithmically equivalent operation to tf.image.non_max_suppression, with the addition of an optional parameter which zero-pads the output to be of size `max_output_size`. The output of this operation is a tuple containing the set of integers indexing into the input collection of bounding boxes representing the selected boxes and the number of valid indices in the index set. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.slice` and `tf.gather` operations. For example: ```python selected_indices_padded, num_valid = tf.image.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size=True) selected_indices = tf.slice( selected_indices_padded, tf.constant([0]), num_valid) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. pad_to_max_output_size: bool. If True, size of `selected_indices` output is padded to `max_output_size`. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. valid_outputs: A scalar integer `Tensor` denoting how many elements in `selected_indices` are valid. Valid elements occur first, then padding. """ with ops.name_scope(name, 'non_max_suppression_padded'): iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, name='score_threshold') return gen_image_ops.non_max_suppression_v4(boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size) @tf_export('image.draw_bounding_boxes', v1=[]) @dispatch.add_dispatch_support def draw_bounding_boxes_v2(images, boxes, colors, name=None): """Draw bounding boxes on a batch of images. Outputs a copy of `images` but draws on top of the pixels zero or more bounding boxes specified by the locations in `boxes`. The coordinates of the each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and the height of the underlying image. For example, if an image is 100 x 200 pixels (height x width) and the bounding box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates). Parts of the bounding box may fall outside the image. Args: images: A `Tensor`. Must be one of the following types: `float32`, `half`. 4-D with shape `[batch, height, width, depth]`. A batch of images. boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding boxes. colors: A `Tensor` of type `float32`. 2-D. A list of RGBA colors to cycle through for the boxes. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `images`. Usage Example: >>> # create an empty image >>> img = tf.zeros([1, 3, 3, 3]) >>> # draw a box around the image >>> box = np.array([0, 0, 1, 1]) >>> boxes = box.reshape([1, 1, 4]) >>> # alternate between red and blue >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) >>> tf.image.draw_bounding_boxes(img, boxes, colors) <tf.Tensor: shape=(1, 3, 3, 3), dtype=float32, numpy= array([[[[1., 0., 0.], [1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [0., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.], [1., 0., 0.]]]], dtype=float32)> """ if colors is None: return gen_image_ops.draw_bounding_boxes(images, boxes, name) return gen_image_ops.draw_bounding_boxes_v2(images, boxes, colors, name) @tf_export(v1=['image.draw_bounding_boxes']) @dispatch.add_dispatch_support def draw_bounding_boxes(images, boxes, name=None, colors=None): """Draw bounding boxes on a batch of images. Outputs a copy of `images` but draws on top of the pixels zero or more bounding boxes specified by the locations in `boxes`. The coordinates of the each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and the height of the underlying image. For example, if an image is 100 x 200 pixels (height x width) and the bounding box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates). Parts of the bounding box may fall outside the image. Args: images: A `Tensor`. Must be one of the following types: `float32`, `half`. 4-D with shape `[batch, height, width, depth]`. A batch of images. boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding boxes. name: A name for the operation (optional). colors: A `Tensor` of type `float32`. 2-D. A list of RGBA colors to cycle through for the boxes. Returns: A `Tensor`. Has the same type as `images`. Usage Example: >>> # create an empty image >>> img = tf.zeros([1, 3, 3, 3]) >>> # draw a box around the image >>> box = np.array([0, 0, 1, 1]) >>> boxes = box.reshape([1, 1, 4]) >>> # alternate between red and blue >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) >>> tf.image.draw_bounding_boxes(img, boxes, colors) <tf.Tensor: shape=(1, 3, 3, 3), dtype=float32, numpy= array([[[[1., 0., 0.], [1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [0., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.], [1., 0., 0.]]]], dtype=float32)> """ return draw_bounding_boxes_v2(images, boxes, colors, name) @tf_export('image.generate_bounding_box_proposals') @dispatch.add_dispatch_support def generate_bounding_box_proposals(scores, bbox_deltas, image_info, anchors, nms_threshold=0.7, pre_nms_topn=6000, min_size=16, post_nms_topn=300, name=None): """Generate bounding box proposals from encoded bounding boxes. Args: scores: A 4-D float `Tensor` of shape `[num_images, height, width, num_achors]` containing scores of the boxes for given anchors, can be unsorted. bbox_deltas: A 4-D float `Tensor` of shape `[num_images, height, width, 4 x num_anchors]` encoding boxes with respect to each anchor. Coordinates are given in the form `[dy, dx, dh, dw]`. image_info: A 2-D float `Tensor` of shape `[num_images, 5]` containing image information Height, Width, Scale. anchors: A 2-D float `Tensor` of shape `[num_anchors, 4]` describing the anchor boxes. Boxes are formatted in the form `[y1, x1, y2, x2]`. nms_threshold: A scalar float `Tensor` for non-maximal-suppression threshold. Defaults to 0.7. pre_nms_topn: A scalar int `Tensor` for the number of top scoring boxes to be used as input. Defaults to 6000. min_size: A scalar float `Tensor`. Any box that has a smaller size than min_size will be discarded. Defaults to 16. post_nms_topn: An integer. Maximum number of rois in the output. name: A name for this operation (optional). Returns: rois: Region of interest boxes sorted by their scores. roi_probabilities: scores of the ROI boxes in the ROIs' `Tensor`. """ return gen_image_ops.generate_bounding_box_proposals( scores=scores, bbox_deltas=bbox_deltas, image_info=image_info, anchors=anchors, nms_threshold=nms_threshold, pre_nms_topn=pre_nms_topn, min_size=min_size, post_nms_topn=post_nms_topn, name=name)
apache-2.0
-2,365,389,480,885,356,000
38.411254
88
0.642189
false
motion2015/edx-platform
cms/lib/xblock/authoring_mixin.py
163
1500
""" Mixin class that provides authoring capabilities for XBlocks. """ import logging from django.conf import settings from xblock.core import XBlock from xblock.fields import XBlockMixin from xblock.fragment import Fragment log = logging.getLogger(__name__) VISIBILITY_VIEW = 'visibility_view' @XBlock.needs("i18n") class AuthoringMixin(XBlockMixin): """ Mixin class that provides authoring capabilities for XBlocks. """ _services_requested = { 'i18n': 'need', } def _get_studio_resource_url(self, relative_url): """ Returns the Studio URL to a static resource. """ return settings.STATIC_URL + relative_url def visibility_view(self, _context=None): """ Render the view to manage an xblock's visibility settings in Studio. Args: _context: Not actively used for this view. Returns: (Fragment): An HTML fragment for editing the visibility of this XBlock. """ fragment = Fragment() from contentstore.utils import reverse_course_url fragment.add_content(self.system.render_template('visibility_editor.html', { 'xblock': self, 'manage_groups_url': reverse_course_url('group_configurations_list_handler', self.location.course_key), })) fragment.add_javascript_url(self._get_studio_resource_url('/js/xblock/authoring.js')) fragment.initialize_js('VisibilityEditorInit') return fragment
agpl-3.0
2,505,907,580,996,359,000
29.612245
115
0.66
false
stonebig/bokeh
bokeh/models/axes.py
2
11338
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors. # All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- ''' Guide renderers for various kinds of axes that can be added to Bokeh plots ''' #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function, unicode_literals import logging log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports # External imports # Bokeh imports from ..core.enums import TickLabelOrientation from ..core.has_props import abstract from ..core.properties import Auto, Datetime, Dict, Either, Enum, Float, Include, Instance, Int, Override, Seq, String, Tuple from ..core.property_mixins import LineProps, TextProps from .formatters import BasicTickFormatter, CategoricalTickFormatter, DatetimeTickFormatter, LogTickFormatter, TickFormatter, MercatorTickFormatter from .renderers import GuideRenderer from .tickers import Ticker, BasicTicker, LogTicker, CategoricalTicker, DatetimeTicker, FixedTicker, MercatorTicker #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- __all__ = ( 'Axis', 'CategoricalAxis', 'ContinuousAxis', 'DatetimeAxis', 'LinearAxis', 'LogAxis', 'MercatorAxis', ) #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- @abstract class Axis(GuideRenderer): ''' A base class that defines common properties for all axis types. ''' bounds = Either(Auto, Tuple(Float, Float), Tuple(Datetime, Datetime), help=""" Bounds for the rendered axis. If unset, the axis will span the entire plot in the given dimension. """) x_range_name = String('default', help=""" A particular (named) x-range to use for computing screen locations when rendering an axis on the plot. If unset, use the default x-range. """) y_range_name = String('default', help=""" A particular (named) y-range to use for computing screen locations when rendering an axis on the plot. If unset, use the default y-range. """) ticker = Instance(Ticker, help=""" A Ticker to use for computing locations of axis components. The property may also be passed a sequence of floating point numbers as a shorthand for creating and configuring a ``FixedTicker``, e.g. the following code .. code-block:: python from bokeh.plotting import figure p = figure() p.xaxis.ticker = [10, 20, 37.4] is equivalent to: .. code-block:: python from bokeh.plotting import figure from bokeh.models.tickers import FixedTicker p = figure() p.xaxis.ticker = FixedTicker(ticks=[10, 20, 37.4]) """).accepts(Seq(Float), lambda ticks: FixedTicker(ticks=ticks)) formatter = Instance(TickFormatter, help=""" A ``TickFormatter`` to use for formatting the visual appearance of ticks. """) axis_label = String(default='', help=""" A text label for the axis, displayed parallel to the axis rule. .. note:: LaTeX notation is not currently supported; please see :bokeh-issue:`647` to track progress or contribute. """) axis_label_standoff = Int(default=5, help=""" The distance in pixels that the axis labels should be offset from the tick labels. """) axis_label_props = Include(TextProps, help=""" The %s of the axis label. """) axis_label_text_font_size = Override(default={'value': "10pt"}) axis_label_text_font_style = Override(default="italic") major_label_standoff = Int(default=5, help=""" The distance in pixels that the major tick labels should be offset from the associated ticks. """) major_label_orientation = Either(Enum("horizontal", "vertical"), Float, help=""" What direction the major label text should be oriented. If a number is supplied, the angle of the text is measured from horizontal. """) major_label_overrides = Dict(Either(Float, String), String, default={}, help=""" Provide explicit tick label values for specific tick locations that override normal formatting. """) major_label_props = Include(TextProps, help=""" The %s of the major tick labels. """) major_label_text_align = Override(default="center") major_label_text_baseline = Override(default="alphabetic") major_label_text_font_size = Override(default={'value': "8pt"}) axis_props = Include(LineProps, help=""" The %s of the axis line. """) major_tick_props = Include(LineProps, help=""" The %s of the major ticks. """) major_tick_in = Int(default=2, help=""" The distance in pixels that major ticks should extend into the main plot area. """) major_tick_out = Int(default=6, help=""" The distance in pixels that major ticks should extend out of the main plot area. """) minor_tick_props = Include(LineProps, help=""" The %s of the minor ticks. """) minor_tick_in = Int(default=0, help=""" The distance in pixels that minor ticks should extend into the main plot area. """) minor_tick_out = Int(default=4, help=""" The distance in pixels that major ticks should extend out of the main plot area. """) fixed_location = Either(Float, String, Tuple(String, String), Tuple(String, String, String), default=None, help=""" Set to specify a fixed coordinate location to draw the axis. The direction of ticks and major labels is determined by the side panel that the axis belongs to. .. note:: Axes labels are suppressed when axes are positioned at fixed locations inside the central plot area. """) @abstract class ContinuousAxis(Axis): ''' A base class for all numeric, non-categorical axes types. ''' pass class LinearAxis(ContinuousAxis): ''' An axis that picks nice numbers for tick locations on a linear scale. Configured with a ``BasicTickFormatter`` by default. ''' ticker = Override(default=lambda: BasicTicker()) formatter = Override(default=lambda: BasicTickFormatter()) class LogAxis(ContinuousAxis): ''' An axis that picks nice numbers for tick locations on a log scale. Configured with a ``LogTickFormatter`` by default. ''' ticker = Override(default=lambda: LogTicker()) formatter = Override(default=lambda: LogTickFormatter()) class CategoricalAxis(Axis): ''' An axis that displays ticks and labels for categorical ranges. The ``CategoricalAxis`` can handle factor ranges with up to two levels of nesting, including drawing a separator line between top-level groups of factors. ''' ticker = Override(default=lambda: CategoricalTicker()) formatter = Override(default=lambda: CategoricalTickFormatter()) separator_props = Include(LineProps, help=""" The %s of the separator line between top-level categorical groups. This property always applies to factors in the outermost level of nesting. """) separator_line_color = Override(default="lightgrey") separator_line_width = Override(default=2) group_props = Include(TextProps, help=""" The %s of the group categorical labels. This property always applies to factors in the outermost level of nesting. If the list of categorical factors is flat (i.e. no nesting) then this property has no effect. """) group_label_orientation = Either(Enum(TickLabelOrientation), Float, default="parallel", help=""" What direction the group label text should be oriented. If a number is supplied, the angle of the text is measured from horizontal. This property always applies to factors in the outermost level of nesting. If the list of categorical factors is flat (i.e. no nesting) then this property has no effect. """) group_text_font_size = Override(default={'value': "8pt"}) group_text_font_style = Override(default="bold") group_text_color = Override(default="grey") subgroup_props = Include(TextProps, help=""" The %s of the subgroup categorical labels. This property always applies to factors in the middle level of nesting. If the list of categorical factors is has only zero or one levels of nesting, then this property has no effect. """) subgroup_label_orientation = Either(Enum(TickLabelOrientation), Float, default="parallel", help=""" What direction the subgroup label text should be oriented. If a number is supplied, the angle of the text is measured from horizontal. This property always applies to factors in the middle level of nesting. If the list of categorical factors is has only zero or one levels of nesting, then this property has no effect. """) subgroup_text_font_size = Override(default={'value': "8pt"}) subgroup_text_font_style = Override(default="bold") class DatetimeAxis(LinearAxis): ''' A ``LinearAxis`` that picks nice numbers for tick locations on a datetime scale. Configured with a ``DatetimeTickFormatter`` by default. ''' ticker = Override(default=lambda: DatetimeTicker()) formatter = Override(default=lambda: DatetimeTickFormatter()) class MercatorAxis(LinearAxis): ''' An axis that picks nice numbers for tick locations on a Mercator scale. Configured with a ``MercatorTickFormatter`` by default. Args: dimension ('lat' or 'lon', optional) : Whether this axis will display latitude or longitude values. (default: 'lat') ''' def __init__(self, dimension='lat', **kw): super(MercatorAxis, self).__init__(**kw) # Just being careful. It would be defeat the purpose for anyone to actually # configure this axis with differnet kinds of tickers or formatters. if isinstance(self.ticker, MercatorTicker): self.ticker.dimension = dimension if isinstance(self.formatter, MercatorTickFormatter): self.formatter.dimension = dimension ticker = Override(default=lambda: MercatorTicker()) formatter = Override(default=lambda: MercatorTickFormatter()) #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
bsd-3-clause
6,628,841,109,837,154,000
33.048048
147
0.611836
false
sopier/django
django/contrib/contenttypes/management.py
476
2521
from django.apps import apps from django.db import DEFAULT_DB_ALIAS, router from django.utils import six from django.utils.six.moves import input def update_contenttypes(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, **kwargs): """ Creates content types for models in the given app, removing any model entries that no longer have a matching model class. """ if not app_config.models_module: return try: ContentType = apps.get_model('contenttypes', 'ContentType') except LookupError: return if not router.allow_migrate_model(using, ContentType): return ContentType.objects.clear_cache() app_label = app_config.label app_models = { model._meta.model_name: model for model in app_config.get_models()} if not app_models: return # Get all the content types content_types = { ct.model: ct for ct in ContentType.objects.using(using).filter(app_label=app_label) } to_remove = [ ct for (model_name, ct) in six.iteritems(content_types) if model_name not in app_models ] cts = [ ContentType( app_label=app_label, model=model_name, ) for (model_name, model) in six.iteritems(app_models) if model_name not in content_types ] ContentType.objects.using(using).bulk_create(cts) if verbosity >= 2: for ct in cts: print("Adding content type '%s | %s'" % (ct.app_label, ct.model)) # Confirm that the content type is stale before deletion. if to_remove: if interactive: content_type_display = '\n'.join( ' %s | %s' % (ct.app_label, ct.model) for ct in to_remove ) ok_to_delete = input("""The following content types are stale and need to be deleted: %s Any objects related to these content types by a foreign key will also be deleted. Are you sure you want to delete these content types? If you're unsure, answer 'no'. Type 'yes' to continue, or 'no' to cancel: """ % content_type_display) else: ok_to_delete = False if ok_to_delete == 'yes': for ct in to_remove: if verbosity >= 2: print("Deleting stale content type '%s | %s'" % (ct.app_label, ct.model)) ct.delete() else: if verbosity >= 2: print("Stale content types remain.")
bsd-3-clause
6,550,575,133,137,145,000
29.011905
101
0.595795
false
datalogics-robb/scons
test/Scanner/parallel-rescan.py
2
2125
#!/usr/bin/env python # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" """ Verify that when a source file is generated and the -j option is used, the source file correctly gets re-scanned for implicit dependencies after it's built. """ import TestSCons test = TestSCons.TestSCons() test.write('SConstruct', """\ env = Environment() env['BUILDERS']['COPY'] = Builder(action = Copy("$TARGET", "$SOURCE")) env.COPY('a.c', 'a.in') env.COPY('b.c', 'b.in') env.StaticLibrary('lib', ['a.c', 'b.c']) """) test.write("a.in", """\ #include "a.h" """) test.write("b.in", """\ #include "b.h" """) test.write("a.h", """\ char *A_FILE = "b.in"; """) test.write("b.h", """\ char *B_FILE = "b.in"; """) test.run(arguments = '-j4 .', stderr=TestSCons.noisy_ar, match=TestSCons.match_re_dotall) # If the dependencies weren't re-scanned properly, the .h files won't # show up in the previous run's dependency lists, and the .o files and # library will get rebuilt here. test.up_to_date(arguments = '.') test.pass_test()
mit
-1,010,770,543,265,059,500
28.513889
73
0.700706
false
Bristol-Braille/canute-ui
ui/i18n.py
1
2127
import gettext import logging from collections import namedtuple, OrderedDict log = logging.getLogger(__name__) def install(locale_code): try: translations = gettext.translation( 'canute', localedir='ui/locale', languages=[locale_code], fallback=False ) except OSError as e: log.warning(e) translations = gettext.NullTranslations() translations.install() # Before having installed _() we need extractors to see language titles. # It's convenient to have it act as the identity function, too. def _(x): return x Builtin = namedtuple('BuiltinLang', ['code', 'title']) # Would prefer "British English, UEB grade N" for the following but # (1) it's too long to be included in the languages menu title, (2) it # might be irrelevant if there are no British-isms in this small # collection of text, (3) US users might object on principle. # TRANSLATORS: This is a language name menu item, so should always appear # in the language it denotes so that it remains readable to those who # speak only that language, just as "Deutsch" should always be left as # "Deutsch" in a language menu. Addition of a Braille grade marker seems # appropriate, if possible. ueb1 = Builtin(code='en_GB.UTF-8@ueb1', title=_('English, UEB grade 1')) # TRANSLATORS: This is a language name menu item, so should always appear # in the language it denotes so that it remains readable to those who # speak only that language, just as "Deutsch" should always be left as # "Deutsch" in a language menu. Addition of a Braille grade marker seems # appropriate, if possible. ueb2 = Builtin(code='en_GB.UTF-8@ueb2', title=_('English, UEB grade 2')) del _ DEFAULT_LOCALE = ueb2 install(DEFAULT_LOCALE.code) # Rely on dedup. BUILTIN_LANGUAGES = OrderedDict([ (DEFAULT_LOCALE.code, _(DEFAULT_LOCALE.title)), (ueb1.code, _(ueb1.title)), (ueb2.code, _(ueb2.title)), ]) # For detecting the default language of older installations, which # didn't really have switchable language but did add a default # sort-of-locale to the global state file. OLD_DEFAULT_LOCALE = 'en_GB:en'
gpl-3.0
-3,407,019,175,297,618,400
33.306452
73
0.716502
false
super3/PyDev
Old Workspace/EndlessScroll.py
1
1826
# EndlessScroll.py # Objective: Make an endless scrollable world. # Author: Super3boy (super3.org) # Imports import pygame # Start PyGame pygame.init() # Define Colors black = [0, 0 ,0] white = [255, 255, 255] blue = [ 0, 0 , 255] green = [ 0, 255, 0] red = [255, 0, 0] class Block(pygame.sprite.Sprite): def __init__(self, locX, locY, img): # Call the parent class (Sprite) constructor pygame.sprite.Sprite.__init__(self) # Create an image self.image = pygame.image.load(img).convert() self.image.set_colorkey(white) # Set bounds self.rect = self.image.get_rect() # Set draw location self.rect.x = locX self.rect.y = locY # Set and Display Screen sizeX = 800 sizeY = 400 scrollX = 0 scrollSpeed = 5 size = [sizeX, sizeY] screen = pygame.display.set_mode(size) # Set Background and Get Size background_image = pygame.image.load("scrollback4.png").convert() background_size = background_image.get_size() # Set Screen's Title pygame.display.set_caption("Enless Scroll Test") # This is a list of sprites. # The list is managed by a class called 'RenderPlain.' sprites = pygame.sprite.RenderPlain() # Sentinel for Game Loop done = False # Game Timer clock = pygame.time.Clock() # Main Game Loop while done == False: # Limit FPS of Game Loop clock.tick(30) # Check for Events for event in pygame.event.get(): if event.type == pygame.QUIT: done = True # Clear the Screen screen.fill(white) # Set Movement key=pygame.key.get_pressed() #checking pressed keys if key[pygame.K_LEFT]: scrollX += scrollSpeed elif key[pygame.K_RIGHT]: scrollX -= scrollSpeed # Show Background screen.blit( background_image , [scrollX ,0]) # Update and Draw all the sprites sprites.update() sprites.draw(screen) # Update Display pygame.display.flip() # Exit Program pygame.quit()
mit
7,367,442,955,149,169,000
19.761364
65
0.697152
false
Tithen-Firion/youtube-dl
youtube_dl/extractor/fivemin.py
79
1917
from __future__ import unicode_literals from .common import InfoExtractor class FiveMinIE(InfoExtractor): IE_NAME = '5min' _VALID_URL = r'(?:5min:|https?://(?:[^/]*?5min\.com/|delivery\.vidible\.tv/aol)(?:(?:Scripts/PlayerSeed\.js|playerseed/?)?\?.*?playList=)?)(?P<id>\d+)' _TESTS = [ { # From http://www.engadget.com/2013/11/15/ipad-mini-retina-display-review/ 'url': 'http://pshared.5min.com/Scripts/PlayerSeed.js?sid=281&width=560&height=345&playList=518013791', 'md5': '4f7b0b79bf1a470e5004f7112385941d', 'info_dict': { 'id': '518013791', 'ext': 'mp4', 'title': 'iPad Mini with Retina Display Review', 'description': 'iPad mini with Retina Display review', 'duration': 177, 'uploader': 'engadget', 'upload_date': '20131115', 'timestamp': 1384515288, }, 'params': { # m3u8 download 'skip_download': True, } }, { # From http://on.aol.com/video/how-to-make-a-next-level-fruit-salad-518086247 'url': '5min:518086247', 'md5': 'e539a9dd682c288ef5a498898009f69e', 'info_dict': { 'id': '518086247', 'ext': 'mp4', 'title': 'How to Make a Next-Level Fruit Salad', 'duration': 184, }, 'skip': 'no longer available', }, { 'url': 'http://embed.5min.com/518726732/', 'only_matching': True, }, { 'url': 'http://delivery.vidible.tv/aol?playList=518013791', 'only_matching': True, } ] def _real_extract(self, url): video_id = self._match_id(url) return self.url_result('aol-video:%s' % video_id)
unlicense
-753,836,174,347,529,500
34.5
155
0.491914
false
CloudI/cloudi_api_python
cloudi.py
2
34829
#!/usr/bin/env python #-*-Mode:python;coding:utf-8;tab-width:4;c-basic-offset:4;indent-tabs-mode:()-*- # ex: set ft=python fenc=utf-8 sts=4 ts=4 sw=4 et nomod: # # MIT License # # Copyright (c) 2011-2021 Michael Truog <mjtruog at protonmail dot com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # """ Python CloudI API <https://cloudi.org/api.html#1_Intro>. Example usage is available in the integration tests <https://cloudi.org/tutorials.html#cloudi_examples>. """ import sys import os import struct import socket import select import collections import traceback import inspect from functools import partial from timeit import default_timer from erlang import (binary_to_term, term_to_binary, OtpErlangAtom, OtpErlangBinary) if sys.version_info[0] >= 3: TypeUnicode = str def _function_argc(function): args, _, _, _, _, _, _ = inspect.getfullargspec(function) return len(args) else: TypeUnicode = unicode def _function_argc(function): # pylint: disable=deprecated-method args, _, _, _ = inspect.getargspec(function) return len(args) __all__ = [ 'API', 'InvalidInputException', 'MessageDecodingException', 'TerminateException', ] _MESSAGE_INIT = 1 _MESSAGE_SEND_ASYNC = 2 _MESSAGE_SEND_SYNC = 3 _MESSAGE_RECV_ASYNC = 4 _MESSAGE_RETURN_ASYNC = 5 _MESSAGE_RETURN_SYNC = 6 _MESSAGE_RETURNS_ASYNC = 7 _MESSAGE_KEEPALIVE = 8 _MESSAGE_REINIT = 9 _MESSAGE_SUBSCRIBE_COUNT = 10 _MESSAGE_TERM = 11 # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-public-methods # pylint: disable=useless-object-inheritance class API(object): """ CloudI API object for use in a single thread of execution """ ASYNC = 1 SYNC = -1 def __init__(self, thread_index): protocol_str = os.getenv('CLOUDI_API_INIT_PROTOCOL') if protocol_str is None: sys.stderr.write('CloudI service execution must occur in CloudI\n') raise InvalidInputException() buffer_size_str = os.getenv('CLOUDI_API_INIT_BUFFER_SIZE') if buffer_size_str is None: raise InvalidInputException() if protocol_str == 'tcp': self.__s = socket.fromfd( thread_index + 3, socket.AF_INET, socket.SOCK_STREAM ) self.__use_header = True elif protocol_str == 'udp': self.__s = socket.fromfd( thread_index + 3, socket.AF_INET, socket.SOCK_DGRAM ) self.__use_header = False elif protocol_str == 'local': self.__s = socket.fromfd( thread_index + 3, socket.AF_UNIX, socket.SOCK_STREAM ) self.__use_header = True else: raise InvalidInputException() self.__initialization_complete = False self.__terminate = False self.__size = int(buffer_size_str) self.__callbacks = {} self.__timeout_terminate = 10 # TIMEOUT_TERMINATE_MIN self.__send(term_to_binary(OtpErlangAtom(b'init'))) (self.__process_index, self.__process_count, self.__process_count_max, self.__process_count_min, self.__prefix, self.__timeout_initialize, self.__timeout_async, self.__timeout_sync, self.__timeout_terminate, self.__priority_default) = self.__poll_request(None, False) @staticmethod def thread_count(): """ returns the thread count from the service configuration """ thread_count = os.getenv('CLOUDI_API_INIT_THREAD_COUNT') if thread_count is None: raise InvalidInputException() return int(thread_count) def subscribe(self, pattern, function): """ subscribes to a service name pattern with a callback """ if _function_argc(function) != 10: # self + arguments for a member function # api + arguments for a static function raise InvalidInputException() if not inspect.ismethod(function): function = partial(function, self) key = self.__prefix + pattern value = self.__callbacks.get(key, None) if value is None: self.__callbacks[key] = collections.deque([function]) else: value.append(function) self.__send(term_to_binary((OtpErlangAtom(b'subscribe'), pattern))) def subscribe_count(self, pattern): """ returns the number of subscriptions for a single service name pattern """ self.__send(term_to_binary((OtpErlangAtom(b'subscribe_count'), pattern))) return self.__poll_request(None, False) def unsubscribe(self, pattern): """ unsubscribes from a service name pattern once """ key = self.__prefix + pattern value = self.__callbacks.get(key, None) assert value is not None value.popleft() if value == collections.deque([]): del self.__callbacks[key] self.__send(term_to_binary((OtpErlangAtom(b'unsubscribe'), pattern))) def send_async(self, name, request, timeout=None, request_info=None, priority=None): """ sends an asynchronous service request """ # pylint: disable=too-many-arguments if timeout is None: timeout = self.__timeout_async if request_info is None: request_info = b'' if priority is None: priority = self.__priority_default self.__send(term_to_binary((OtpErlangAtom(b'send_async'), name, OtpErlangBinary(request_info), OtpErlangBinary(request), timeout, priority))) return self.__poll_request(None, False) def send_sync(self, name, request, timeout=None, request_info=None, priority=None): """ sends a synchronous service request """ # pylint: disable=too-many-arguments if timeout is None: timeout = self.__timeout_sync if request_info is None: request_info = b'' if priority is None: priority = self.__priority_default self.__send(term_to_binary((OtpErlangAtom(b'send_sync'), name, OtpErlangBinary(request_info), OtpErlangBinary(request), timeout, priority))) return self.__poll_request(None, False) def mcast_async(self, name, request, timeout=None, request_info=None, priority=None): """ sends asynchronous service requests to all subscribers of the matching service name pattern """ # pylint: disable=too-many-arguments if timeout is None: timeout = self.__timeout_async if request_info is None: request_info = b'' if priority is None: priority = self.__priority_default self.__send(term_to_binary((OtpErlangAtom(b'mcast_async'), name, OtpErlangBinary(request_info), OtpErlangBinary(request), timeout, priority))) return self.__poll_request(None, False) def forward_(self, request_type, name, request_info, request, timeout, priority, trans_id, pid): """ forwards a service request to a different service name """ # pylint: disable=too-many-arguments if request_type == API.ASYNC: self.forward_async(name, request_info, request, timeout, priority, trans_id, pid) elif request_type == API.SYNC: self.forward_sync(name, request_info, request, timeout, priority, trans_id, pid) else: raise InvalidInputException() def forward_async(self, name, request_info, request, timeout, priority, trans_id, pid): """ forwards an asynchronous service request to a different service name """ # pylint: disable=too-many-arguments self.__send(term_to_binary((OtpErlangAtom(b'forward_async'), name, OtpErlangBinary(request_info), OtpErlangBinary(request), timeout, priority, OtpErlangBinary(trans_id), pid))) raise ForwardAsyncException() def forward_sync(self, name, request_info, request, timeout, priority, trans_id, pid): """ forwards a synchronous service request to a different service name """ # pylint: disable=too-many-arguments self.__send(term_to_binary((OtpErlangAtom(b'forward_sync'), name, OtpErlangBinary(request_info), OtpErlangBinary(request), timeout, priority, OtpErlangBinary(trans_id), pid))) raise ForwardSyncException() def return_(self, request_type, name, pattern, response_info, response, timeout, trans_id, pid): """ provides a response to a service request """ # pylint: disable=too-many-arguments if request_type == API.ASYNC: self.return_async(name, pattern, response_info, response, timeout, trans_id, pid) elif request_type == API.SYNC: self.return_sync(name, pattern, response_info, response, timeout, trans_id, pid) else: raise InvalidInputException() def return_async(self, name, pattern, response_info, response, timeout, trans_id, pid): """ provides a response to an asynchronous service request """ # pylint: disable=too-many-arguments self.__send(term_to_binary((OtpErlangAtom(b'return_async'), name, pattern, OtpErlangBinary(response_info), OtpErlangBinary(response), timeout, OtpErlangBinary(trans_id), pid))) raise ReturnAsyncException() def return_sync(self, name, pattern, response_info, response, timeout, trans_id, pid): """ provides a response to a synchronous service request """ # pylint: disable=too-many-arguments self.__send(term_to_binary((OtpErlangAtom(b'return_sync'), name, pattern, OtpErlangBinary(response_info), OtpErlangBinary(response), timeout, OtpErlangBinary(trans_id), pid))) raise ReturnSyncException() def recv_async(self, timeout=None, trans_id=None, consume=True): """ blocks to receive an asynchronous service request response """ if timeout is None: timeout = self.__timeout_sync if trans_id is None: trans_id = b'\0' * 16 self.__send(term_to_binary((OtpErlangAtom(b'recv_async'), timeout, OtpErlangBinary(trans_id), consume))) return self.__poll_request(None, False) def process_index(self): """ returns the 0-based index of this process in the service instance """ return self.__process_index def process_count(self): """ returns the current process count based on the service configuration """ return self.__process_count def process_count_max(self): """ returns the count_process_dynamic maximum count """ return self.__process_count_max def process_count_min(self): """ returns the count_process_dynamic minimum count """ return self.__process_count_min def prefix(self): """ returns the service name pattern prefix from the service configuration """ return self.__prefix def timeout_initialize(self): """ returns the service initialization timeout """ return self.__timeout_initialize def timeout_async(self): """ returns the default asynchronous service request send timeout """ return self.__timeout_async def timeout_sync(self): """ returns the default synchronous service request send timeout """ return self.__timeout_sync def timeout_terminate(self): """ returns the service termination timeout """ return self.__timeout_terminate def priority_default(self): """ returns the default service request send priority """ return self.__priority_default def __null_response(self, request_type, name, pattern, request_info, request, timeout, priority, trans_id, pid): # pylint: disable=no-self-use # pylint: disable=too-many-arguments # pylint: disable=unused-argument return b'' def __callback(self, command, name, pattern, request_info, request, timeout, priority, trans_id, pid): # pylint: disable=too-many-arguments # pylint: disable=bare-except # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements # pylint: disable=too-many-locals # pylint: disable=broad-except function_queue = self.__callbacks.get(pattern, None) if function_queue is None: function = self.__null_response else: function = function_queue.popleft() function_queue.append(function) return_null_response = False if command == _MESSAGE_SEND_ASYNC: try: response = function(API.ASYNC, name, pattern, request_info, request, timeout, priority, trans_id, pid) if isinstance(response, tuple): response_info, response = response if not isinstance(response_info, (bytes, TypeUnicode)): response_info = b'' else: response_info = b'' if not isinstance(response, (bytes, TypeUnicode)): response = b'' except MessageDecodingException: self.__terminate = True return_null_response = True except TerminateException: return_null_response = True except ReturnAsyncException: return except ReturnSyncException: self.__terminate = True traceback.print_exc(file=sys.stderr) return except ForwardAsyncException: return except ForwardSyncException: self.__terminate = True traceback.print_exc(file=sys.stderr) return except AssertionError: traceback.print_exc(file=sys.stderr) sys.exit(1) except SystemExit: traceback.print_exc(file=sys.stderr) raise except Exception: return_null_response = True traceback.print_exc(file=sys.stderr) except: traceback.print_exc(file=sys.stderr) sys.exit(1) if return_null_response: response_info = b'' response = b'' try: self.return_async(name, pattern, response_info, response, timeout, trans_id, pid) except ReturnAsyncException: pass return if command == _MESSAGE_SEND_SYNC: try: response = function(API.SYNC, name, pattern, request_info, request, timeout, priority, trans_id, pid) if isinstance(response, tuple): response_info, response = response if not isinstance(response_info, (bytes, TypeUnicode)): response_info = b'' else: response_info = b'' if not isinstance(response, (bytes, TypeUnicode)): response = b'' except MessageDecodingException: self.__terminate = True return_null_response = True except TerminateException: return_null_response = True except ReturnSyncException: return except ReturnAsyncException: self.__terminate = True traceback.print_exc(file=sys.stderr) return except ForwardSyncException: return except ForwardAsyncException: self.__terminate = True traceback.print_exc(file=sys.stderr) return except AssertionError: traceback.print_exc(file=sys.stderr) sys.exit(1) except SystemExit: traceback.print_exc(file=sys.stderr) raise except Exception: return_null_response = True traceback.print_exc(file=sys.stderr) except: traceback.print_exc(file=sys.stderr) sys.exit(1) if return_null_response: response_info = b'' response = b'' try: self.return_sync(name, pattern, response_info, response, timeout, trans_id, pid) except ReturnSyncException: pass return raise MessageDecodingException() def __handle_events(self, external, data, data_size, j, command=None): # pylint: disable=too-many-arguments if command is None: if j > data_size: raise MessageDecodingException() i, j = j, j + 4 command = struct.unpack(b'=I', data[i:j])[0] while True: if command == _MESSAGE_TERM: self.__terminate = True if external: return False raise TerminateException(self.__timeout_terminate) if command == _MESSAGE_REINIT: i, j = j, j + 4 + 4 + 4 + 1 (self.__process_count, self.__timeout_async, self.__timeout_sync, self.__priority_default) = struct.unpack( b'=IIIb', data[i:j] ) elif command == _MESSAGE_KEEPALIVE: self.__send(term_to_binary(OtpErlangAtom(b'keepalive'))) else: raise MessageDecodingException() if j > data_size: raise MessageDecodingException() if j == data_size: return True i, j = j, j + 4 command = struct.unpack(b'=I', data[i:j])[0] def __poll_request(self, timeout, external): # pylint: disable=too-many-locals # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements if self.__terminate: if external: return False raise TerminateException(self.__timeout_terminate) if external and not self.__initialization_complete: self.__send(term_to_binary(OtpErlangAtom(b'polling'))) self.__initialization_complete = True poll_timer = None if timeout is None or timeout < 0: timeout_value = None elif timeout == 0: timeout_value = 0.0 elif timeout > 0: poll_timer = default_timer() timeout_value = timeout * 0.001 fd_in, _, fd_except = select.select([self.__s], [], [self.__s], timeout_value) if fd_except != []: return False if fd_in == []: return True data = b'' data = self.__recv(data) data_size = len(data) if data_size == 0: return False # socket was closed i, j = 0, 4 while True: command = struct.unpack(b'=I', data[i:j])[0] if command == _MESSAGE_INIT: i, j = j, j + 4 + 4 + 4 + 4 + 4 (process_index, process_count, process_count_max, process_count_min, prefix_size) = struct.unpack(b'=IIIII', data[i:j]) i, j = j, j + prefix_size + 4 + 4 + 4 + 4 + 1 (prefix, _, timeout_initialize, timeout_async, timeout_sync, timeout_terminate, priority_default) = struct.unpack( '=%dscIIIIb' % (prefix_size - 1), data[i:j] ) if j != data_size: assert external is False self.__handle_events(external, data, data_size, j) return (process_index, process_count, process_count_max, process_count_min, prefix.decode('utf-8'), timeout_initialize, timeout_sync, timeout_async, timeout_terminate, priority_default) if command in (_MESSAGE_SEND_ASYNC, _MESSAGE_SEND_SYNC): i, j = j, j + 4 name_size = struct.unpack(b'=I', data[i:j])[0] i, j = j, j + name_size + 4 (name, _, pattern_size) = struct.unpack('=%dscI' % (name_size - 1), data[i:j]) i, j = j, j + pattern_size + 4 (pattern, _, request_info_size) = struct.unpack( '=%dscI' % (pattern_size - 1), data[i:j] ) i, j = j, j + request_info_size + 1 + 4 (request_info, _, request_size) = struct.unpack( '=%dscI' % request_info_size, data[i:j] ) i, j = j, j + request_size + 1 + 4 + 1 + 16 + 4 (request, _, request_timeout, priority, trans_id, pid_size) = struct.unpack( '=%dscIb16sI' % request_size, data[i:j] ) i, j = j, j + pid_size pid = data[i:j] if j != data_size: assert external is True if not self.__handle_events(external, data, data_size, j): return False data = b'' self.__callback(command, name.decode('utf-8'), pattern.decode('utf-8'), request_info, request, request_timeout, priority, trans_id, binary_to_term(pid)) if self.__terminate: return False elif command in (_MESSAGE_RECV_ASYNC, _MESSAGE_RETURN_SYNC): i, j = j, j + 4 response_info_size = struct.unpack(b'=I', data[i:j])[0] i, j = j, j + response_info_size + 1 + 4 (response_info, _, response_size) = struct.unpack( '=%dscI' % response_info_size, data[i:j] ) i, j = j, j + response_size + 1 + 16 (response, _, trans_id) = struct.unpack( '=%dsc16s' % response_size, data[i:j] ) if j != data_size: assert external is False self.__handle_events(external, data, data_size, j) return (response_info, response, trans_id) elif command == _MESSAGE_RETURN_ASYNC: i, j = j, j + 16 trans_id = data[i:j] if j != data_size: assert external is False self.__handle_events(external, data, data_size, j) return trans_id elif command == _MESSAGE_RETURNS_ASYNC: i, j = j, j + 4 trans_id_count = struct.unpack(b'=I', data[i:j])[0] i, j = j, j + 16 * trans_id_count trans_ids = struct.unpack( b'=' + b'16s' * trans_id_count, data[i:j] ) if j != data_size: assert external is False self.__handle_events(external, data, data_size, j) return trans_ids elif command == _MESSAGE_SUBSCRIBE_COUNT: i, j = j, j + 4 count = struct.unpack(b'=I', data[i:j])[0] if j != data_size: assert external is False self.__handle_events(external, data, data_size, j) return count elif command == _MESSAGE_TERM: if not self.__handle_events(external, data, data_size, j, command=command): return False assert False elif command == _MESSAGE_REINIT: i, j = j, j + 4 + 4 + 4 + 1 (self.__process_count, self.__timeout_async, self.__timeout_sync, self.__priority_default) = struct.unpack( b'=IIIb', data[i:j] ) if j == data_size: data = b'' elif j < data_size: i, j = j, j + 4 continue else: raise MessageDecodingException() elif command == _MESSAGE_KEEPALIVE: self.__send(term_to_binary(OtpErlangAtom(b'keepalive'))) if j == data_size: data = b'' elif j < data_size: i, j = j, j + 4 continue else: raise MessageDecodingException() else: raise MessageDecodingException() if poll_timer is not None: poll_timer_new = default_timer() elapsed = max(0, int((poll_timer_new - poll_timer) * 1000.0)) poll_timer = poll_timer_new if elapsed >= timeout: timeout = 0 else: timeout -= elapsed if timeout_value is not None: if timeout == 0: return True if timeout > 0: timeout_value = timeout * 0.001 fd_in, _, fd_except = select.select([self.__s], [], [self.__s], timeout_value) if fd_except != []: return False if fd_in == []: return True data = self.__recv(data) data_size = len(data) if data_size == 0: return False # socket was closed i, j = 0, 4 def poll(self, timeout=-1): """ blocks to process incoming CloudI service requests """ return self.__poll_request(timeout, True) def shutdown(self, reason=None): """ shutdown the service successfully """ if reason is None: reason = b'' self.__send(term_to_binary((OtpErlangAtom(b'shutdown'), reason))) @staticmethod def __text_pairs_parse(text): pairs = {} data = text.split(b'\0') for i in range(0, len(data) - 1, 2): key = data[i] current = pairs.get(key, None) if current is None: pairs[key] = data[i + 1] elif isinstance(current, list): current.append(data[i + 1]) else: pairs[key] = [current, data[i + 1]] return pairs @staticmethod def __text_pairs_new(pairs, response): text_segments = [] for key, values in pairs.items(): if isinstance(values, bytes): text_segments.append(key) text_segments.append(values) else: assert not isinstance(values, str) for value in values: text_segments.append(key) text_segments.append(value) if response and text_segments == []: return b'\0' text_segments.append(b'') return b'\0'.join(text_segments) @staticmethod def info_key_value_parse(info): """ decode service request info key/value data """ return API.__text_pairs_parse(info) @staticmethod def info_key_value_new(pairs, response=True): """ encode service response info key/value data """ return API.__text_pairs_new(pairs, response) def __send(self, data): if self.__use_header: data = struct.pack(b'>I', len(data)) + data self.__s.sendall(data) def __recv(self, data_old): data = b'' if self.__use_header: i = 0 while i < 4: fragment = self.__s.recv(4 - i) data += fragment i += len(fragment) total = struct.unpack(b'>I', data)[0] data = data_old i = 0 while i < total: fragment = self.__s.recv(min(total - i, self.__size)) data += fragment i += len(fragment) else: data = data_old ready = True while ready is True: fragment = self.__s.recv(self.__size) data += fragment ready = (len(fragment) == self.__size) if ready: fd_in, _, _ = select.select([self.__s], [], [], 0) ready = (fd_in != []) return data class InvalidInputException(Exception): """ Invalid Input """ def __init__(self): Exception.__init__(self, 'Invalid Input') class ReturnSyncException(Exception): """ Synchronous Call Return Invalid """ def __init__(self): Exception.__init__(self, 'Synchronous Call Return Invalid') class ReturnAsyncException(Exception): """ Asynchronous Call Return Invalid """ def __init__(self): Exception.__init__(self, 'Asynchronous Call Return Invalid') class ForwardSyncException(Exception): """ Synchronous Call Forward Invalid """ def __init__(self): Exception.__init__(self, 'Synchronous Call Forward Invalid') class ForwardAsyncException(Exception): """ Asynchronous Call Forward Invalid """ def __init__(self): Exception.__init__(self, 'Asynchronous Call Forward Invalid') class MessageDecodingException(Exception): """ Message Decoding Error """ def __init__(self): Exception.__init__(self, 'Message Decoding Error') class TerminateException(Exception): """ Terminate """ def __init__(self, timeout): Exception.__init__(self, 'Terminate') self.__timeout = timeout def timeout(self): """ return the termination timeout """ return self.__timeout class FatalError(BaseException): """ Fatal Error """ def __init__(self, message): BaseException.__init__(self, message) # force unbuffered stdout/stderr handling without external configuration if sys.stderr.__class__.__name__ != '_unbuffered': class _unbuffered(object): # pylint: disable=too-few-public-methods def __init__(self, stream): # pylint: disable=import-outside-toplevel if sys.version_info[0] >= 3: import io self.__stream = io.TextIOWrapper( stream.buffer, encoding='UTF-8', errors=stream.errors, newline=stream.newlines, line_buffering=stream.line_buffering, write_through=False, ) else: import codecs self.encoding = 'UTF-8' self.__stream = codecs.getwriter(self.encoding)(stream) def write(self, data): """ unbuffered write function """ self.__stream.write(data) self.__stream.flush() def __getattr__(self, attr): return getattr(self.__stream, attr) sys.stdout = _unbuffered(sys.stdout) sys.stderr = _unbuffered(sys.stderr)
mit
-7,751,938,131,055,474,000
36.250267
80
0.509834
false
bopo/tablib
tablib/packages/xlwt/antlr.py
57
84201
## This file is part of PyANTLR. See LICENSE.txt for license ## details..........Copyright (C) Wolfgang Haefelinger, 2004. ## This file was copied for use with xlwt from the 2.7.7 ANTLR distribution. Yes, it ## says 2.7.5 below. The 2.7.5 distribution version didn't have a ## version in it. ## Here is the contents of the ANTLR 2.7.7 LICENSE.txt referred to above. # SOFTWARE RIGHTS # # ANTLR 1989-2006 Developed by Terence Parr # Partially supported by University of San Francisco & jGuru.com # # We reserve no legal rights to the ANTLR--it is fully in the # public domain. An individual or company may do whatever # they wish with source code distributed with ANTLR or the # code generated by ANTLR, including the incorporation of # ANTLR, or its output, into commerical software. # # We encourage users to develop software with ANTLR. However, # we do ask that credit is given to us for developing # ANTLR. By "credit", we mean that if you use ANTLR or # incorporate any source code into one of your programs # (commercial product, research project, or otherwise) that # you acknowledge this fact somewhere in the documentation, # research report, etc... If you like ANTLR and have # developed a nice tool with the output, please mention that # you developed it using ANTLR. In addition, we ask that the # headers remain intact in our source code. As long as these # guidelines are kept, we expect to continue enhancing this # system and expect to make other tools available as they are # completed. # # The primary ANTLR guy: # # Terence Parr # [email protected] # [email protected] ## End of contents of the ANTLR 2.7.7 LICENSE.txt ######################## ## get sys module import sys version = sys.version.split()[0] if version < '2.2.1': False = 0 if version < '2.3': True = not False ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### global symbols ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ANTLR Standard Tokens SKIP = -1 INVALID_TYPE = 0 EOF_TYPE = 1 EOF = 1 NULL_TREE_LOOKAHEAD = 3 MIN_USER_TYPE = 4 ### ANTLR's EOF Symbol EOF_CHAR = '' ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### general functions ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ## Version should be automatically derived from configure.in. For now, ## we need to bump it ourselfs. Don't remove the <version> tags. ## <version> def version(): r = { 'major' : '2', 'minor' : '7', 'micro' : '5', 'patch' : '' , 'version': '2.7.5' } return r ## </version> def error(fmt,*args): if fmt: print "error: ", fmt % tuple(args) def ifelse(cond,_then,_else): if cond : r = _then else: r = _else return r def is_string_type(x): # return (isinstance(x,str) or isinstance(x,unicode)) # Simplify; xlwt doesn't support Python < 2.3 return isinstance(basestring) def assert_string_type(x): assert is_string_type(x) pass ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ANTLR Exceptions ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class ANTLRException(Exception): def __init__(self, *args): Exception.__init__(self, *args) class RecognitionException(ANTLRException): def __init__(self, *args): ANTLRException.__init__(self, *args) self.fileName = None self.line = -1 self.column = -1 if len(args) >= 2: self.fileName = args[1] if len(args) >= 3: self.line = args[2] if len(args) >= 4: self.column = args[3] def __str__(self): buf = [''] if self.fileName: buf.append(self.fileName + ":") if self.line != -1: if not self.fileName: buf.append("line ") buf.append(str(self.line)) if self.column != -1: buf.append(":" + str(self.column)) buf.append(":") buf.append(" ") return str('').join(buf) __repr__ = __str__ class NoViableAltException(RecognitionException): def __init__(self, *args): RecognitionException.__init__(self, *args) self.token = None self.node = None if isinstance(args[0],AST): self.node = args[0] elif isinstance(args[0],Token): self.token = args[0] else: raise TypeError("NoViableAltException requires Token or AST argument") def __str__(self): if self.token: line = self.token.getLine() col = self.token.getColumn() text = self.token.getText() return "unexpected symbol at line %s (column %s): \"%s\"" % (line,col,text) if self.node == ASTNULL: return "unexpected end of subtree" assert self.node ### hackish, we assume that an AST contains method getText return "unexpected node: %s" % (self.node.getText()) __repr__ = __str__ class NoViableAltForCharException(RecognitionException): def __init__(self, *args): self.foundChar = None if len(args) == 2: self.foundChar = args[0] scanner = args[1] RecognitionException.__init__(self, "NoViableAlt", scanner.getFilename(), scanner.getLine(), scanner.getColumn()) elif len(args) == 4: self.foundChar = args[0] fileName = args[1] line = args[2] column = args[3] RecognitionException.__init__(self, "NoViableAlt", fileName, line, column) else: RecognitionException.__init__(self, "NoViableAlt", '', -1, -1) def __str__(self): mesg = "unexpected char: " if self.foundChar >= ' ' and self.foundChar <= '~': mesg += "'" + self.foundChar + "'" elif self.foundChar: mesg += "0x" + hex(ord(self.foundChar)).upper()[2:] else: mesg += "<None>" return mesg __repr__ = __str__ class SemanticException(RecognitionException): def __init__(self, *args): RecognitionException.__init__(self, *args) class MismatchedCharException(RecognitionException): NONE = 0 CHAR = 1 NOT_CHAR = 2 RANGE = 3 NOT_RANGE = 4 SET = 5 NOT_SET = 6 def __init__(self, *args): self.args = args if len(args) == 5: # Expected range / not range if args[3]: self.mismatchType = MismatchedCharException.NOT_RANGE else: self.mismatchType = MismatchedCharException.RANGE self.foundChar = args[0] self.expecting = args[1] self.upper = args[2] self.scanner = args[4] RecognitionException.__init__(self, "Mismatched char range", self.scanner.getFilename(), self.scanner.getLine(), self.scanner.getColumn()) elif len(args) == 4 and is_string_type(args[1]): # Expected char / not char if args[2]: self.mismatchType = MismatchedCharException.NOT_CHAR else: self.mismatchType = MismatchedCharException.CHAR self.foundChar = args[0] self.expecting = args[1] self.scanner = args[3] RecognitionException.__init__(self, "Mismatched char", self.scanner.getFilename(), self.scanner.getLine(), self.scanner.getColumn()) elif len(args) == 4 and isinstance(args[1], BitSet): # Expected BitSet / not BitSet if args[2]: self.mismatchType = MismatchedCharException.NOT_SET else: self.mismatchType = MismatchedCharException.SET self.foundChar = args[0] self.set = args[1] self.scanner = args[3] RecognitionException.__init__(self, "Mismatched char set", self.scanner.getFilename(), self.scanner.getLine(), self.scanner.getColumn()) else: self.mismatchType = MismatchedCharException.NONE RecognitionException.__init__(self, "Mismatched char") ## Append a char to the msg buffer. If special, # then show escaped version # def appendCharName(self, sb, c): if not c or c == 65535: # 65535 = (char) -1 = EOF sb.append("'<EOF>'") elif c == '\n': sb.append("'\\n'") elif c == '\r': sb.append("'\\r'"); elif c == '\t': sb.append("'\\t'") else: sb.append('\'' + c + '\'') ## # Returns an error message with line number/column information # def __str__(self): sb = [''] sb.append(RecognitionException.__str__(self)) if self.mismatchType == MismatchedCharException.CHAR: sb.append("expecting ") self.appendCharName(sb, self.expecting) sb.append(", found ") self.appendCharName(sb, self.foundChar) elif self.mismatchType == MismatchedCharException.NOT_CHAR: sb.append("expecting anything but '") self.appendCharName(sb, self.expecting) sb.append("'; got it anyway") elif self.mismatchType in [MismatchedCharException.RANGE, MismatchedCharException.NOT_RANGE]: sb.append("expecting char ") if self.mismatchType == MismatchedCharException.NOT_RANGE: sb.append("NOT ") sb.append("in range: ") appendCharName(sb, self.expecting) sb.append("..") appendCharName(sb, self.upper) sb.append(", found ") appendCharName(sb, self.foundChar) elif self.mismatchType in [MismatchedCharException.SET, MismatchedCharException.NOT_SET]: sb.append("expecting ") if self.mismatchType == MismatchedCharException.NOT_SET: sb.append("NOT ") sb.append("one of (") for i in range(len(self.set)): self.appendCharName(sb, self.set[i]) sb.append("), found ") self.appendCharName(sb, self.foundChar) return str().join(sb).strip() __repr__ = __str__ class MismatchedTokenException(RecognitionException): NONE = 0 TOKEN = 1 NOT_TOKEN = 2 RANGE = 3 NOT_RANGE = 4 SET = 5 NOT_SET = 6 def __init__(self, *args): self.args = args self.tokenNames = [] self.token = None self.tokenText = '' self.node = None if len(args) == 6: # Expected range / not range if args[3]: self.mismatchType = MismatchedTokenException.NOT_RANGE else: self.mismatchType = MismatchedTokenException.RANGE self.tokenNames = args[0] self.expecting = args[2] self.upper = args[3] self.fileName = args[5] elif len(args) == 4 and isinstance(args[2], int): # Expected token / not token if args[3]: self.mismatchType = MismatchedTokenException.NOT_TOKEN else: self.mismatchType = MismatchedTokenException.TOKEN self.tokenNames = args[0] self.expecting = args[2] elif len(args) == 4 and isinstance(args[2], BitSet): # Expected BitSet / not BitSet if args[3]: self.mismatchType = MismatchedTokenException.NOT_SET else: self.mismatchType = MismatchedTokenException.SET self.tokenNames = args[0] self.set = args[2] else: self.mismatchType = MismatchedTokenException.NONE RecognitionException.__init__(self, "Mismatched Token: expecting any AST node", "<AST>", -1, -1) if len(args) >= 2: if isinstance(args[1],Token): self.token = args[1] self.tokenText = self.token.getText() RecognitionException.__init__(self, "Mismatched Token", self.fileName, self.token.getLine(), self.token.getColumn()) elif isinstance(args[1],AST): self.node = args[1] self.tokenText = str(self.node) RecognitionException.__init__(self, "Mismatched Token", "<AST>", self.node.getLine(), self.node.getColumn()) else: self.tokenText = "<empty tree>" RecognitionException.__init__(self, "Mismatched Token", "<AST>", -1, -1) def appendTokenName(self, sb, tokenType): if tokenType == INVALID_TYPE: sb.append("<Set of tokens>") elif tokenType < 0 or tokenType >= len(self.tokenNames): sb.append("<" + str(tokenType) + ">") else: sb.append(self.tokenNames[tokenType]) ## # Returns an error message with line number/column information # def __str__(self): sb = [''] sb.append(RecognitionException.__str__(self)) if self.mismatchType == MismatchedTokenException.TOKEN: sb.append("expecting ") self.appendTokenName(sb, self.expecting) sb.append(", found " + self.tokenText) elif self.mismatchType == MismatchedTokenException.NOT_TOKEN: sb.append("expecting anything but '") self.appendTokenName(sb, self.expecting) sb.append("'; got it anyway") elif self.mismatchType in [MismatchedTokenException.RANGE, MismatchedTokenException.NOT_RANGE]: sb.append("expecting token ") if self.mismatchType == MismatchedTokenException.NOT_RANGE: sb.append("NOT ") sb.append("in range: ") appendTokenName(sb, self.expecting) sb.append("..") appendTokenName(sb, self.upper) sb.append(", found " + self.tokenText) elif self.mismatchType in [MismatchedTokenException.SET, MismatchedTokenException.NOT_SET]: sb.append("expecting ") if self.mismatchType == MismatchedTokenException.NOT_SET: sb.append("NOT ") sb.append("one of (") for i in range(len(self.set)): self.appendTokenName(sb, self.set[i]) sb.append("), found " + self.tokenText) return str().join(sb).strip() __repr__ = __str__ class TokenStreamException(ANTLRException): def __init__(self, *args): ANTLRException.__init__(self, *args) # Wraps an Exception in a TokenStreamException class TokenStreamIOException(TokenStreamException): def __init__(self, *args): if args and isinstance(args[0], Exception): io = args[0] TokenStreamException.__init__(self, str(io)) self.io = io else: TokenStreamException.__init__(self, *args) self.io = self # Wraps a RecognitionException in a TokenStreamException class TokenStreamRecognitionException(TokenStreamException): def __init__(self, *args): if args and isinstance(args[0], RecognitionException): recog = args[0] TokenStreamException.__init__(self, str(recog)) self.recog = recog else: raise TypeError("TokenStreamRecognitionException requires RecognitionException argument") def __str__(self): return str(self.recog) __repr__ = __str__ class TokenStreamRetryException(TokenStreamException): def __init__(self, *args): TokenStreamException.__init__(self, *args) class CharStreamException(ANTLRException): def __init__(self, *args): ANTLRException.__init__(self, *args) # Wraps an Exception in a CharStreamException class CharStreamIOException(CharStreamException): def __init__(self, *args): if args and isinstance(args[0], Exception): io = args[0] CharStreamException.__init__(self, str(io)) self.io = io else: CharStreamException.__init__(self, *args) self.io = self class TryAgain(Exception): pass ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### Token ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class Token(object): SKIP = -1 INVALID_TYPE = 0 EOF_TYPE = 1 EOF = 1 NULL_TREE_LOOKAHEAD = 3 MIN_USER_TYPE = 4 def __init__(self,**argv): try: self.type = argv['type'] except: self.type = INVALID_TYPE try: self.text = argv['text'] except: self.text = "<no text>" def isEOF(self): return (self.type == EOF_TYPE) def getColumn(self): return 0 def getLine(self): return 0 def getFilename(self): return None def setFilename(self,name): return self def getText(self): return "<no text>" def setText(self,text): if is_string_type(text): pass else: raise TypeError("Token.setText requires string argument") return self def setColumn(self,column): return self def setLine(self,line): return self def getType(self): return self.type def setType(self,type): if isinstance(type,int): self.type = type else: raise TypeError("Token.setType requires integer argument") return self def toString(self): ## not optimal type_ = self.type if type_ == 3: tval = 'NULL_TREE_LOOKAHEAD' elif type_ == 1: tval = 'EOF_TYPE' elif type_ == 0: tval = 'INVALID_TYPE' elif type_ == -1: tval = 'SKIP' else: tval = type_ return '["%s",<%s>]' % (self.getText(),tval) __str__ = toString __repr__ = toString ### static attribute .. Token.badToken = Token( type=INVALID_TYPE, text="<no text>") if __name__ == "__main__": print "testing .." T = Token.badToken print T ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CommonToken ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class CommonToken(Token): def __init__(self,**argv): Token.__init__(self,**argv) self.line = 0 self.col = 0 try: self.line = argv['line'] except: pass try: self.col = argv['col'] except: pass def getLine(self): return self.line def getText(self): return self.text def getColumn(self): return self.col def setLine(self,line): self.line = line return self def setText(self,text): self.text = text return self def setColumn(self,col): self.col = col return self def toString(self): ## not optimal type_ = self.type if type_ == 3: tval = 'NULL_TREE_LOOKAHEAD' elif type_ == 1: tval = 'EOF_TYPE' elif type_ == 0: tval = 'INVALID_TYPE' elif type_ == -1: tval = 'SKIP' else: tval = type_ d = { 'text' : self.text, 'type' : tval, 'line' : self.line, 'colm' : self.col } fmt = '["%(text)s",<%(type)s>,line=%(line)s,col=%(colm)s]' return fmt % d __str__ = toString __repr__ = toString if __name__ == '__main__' : T = CommonToken() print T T = CommonToken(col=15,line=1,text="some text", type=5) print T T = CommonToken() T.setLine(1).setColumn(15).setText("some text").setType(5) print T print T.getLine() print T.getColumn() print T.getText() print T.getType() ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CommonHiddenStreamToken ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class CommonHiddenStreamToken(CommonToken): def __init__(self,*args): CommonToken.__init__(self,*args) self.hiddenBefore = None self.hiddenAfter = None def getHiddenAfter(self): return self.hiddenAfter def getHiddenBefore(self): return self.hiddenBefore def setHiddenAfter(self,t): self.hiddenAfter = t def setHiddenBefore(self, t): self.hiddenBefore = t ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### Queue ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ## Shall be a circular buffer on tokens .. class Queue(object): def __init__(self): self.buffer = [] # empty list def append(self,item): self.buffer.append(item) def elementAt(self,index): return self.buffer[index] def reset(self): self.buffer = [] def removeFirst(self): self.buffer.pop(0) def length(self): return len(self.buffer) def __str__(self): return str(self.buffer) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### InputBuffer ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class InputBuffer(object): def __init__(self): self.nMarkers = 0 self.markerOffset = 0 self.numToConsume = 0 self.queue = Queue() def __str__(self): return "(%s,%s,%s,%s)" % ( self.nMarkers, self.markerOffset, self.numToConsume, self.queue) def __repr__(self): return str(self) def commit(self): self.nMarkers -= 1 def consume(self) : self.numToConsume += 1 ## probably better to return a list of items ## because of unicode. Or return a unicode ## string .. def getLAChars(self) : i = self.markerOffset n = self.queue.length() s = '' while i<n: s += self.queue.elementAt(i) return s ## probably better to return a list of items ## because of unicode chars def getMarkedChars(self) : s = '' i = 0 n = self.markerOffset while i<n: s += self.queue.elementAt(i) return s def isMarked(self) : return self.nMarkers != 0 def fill(self,k): ### abstract method raise NotImplementedError() def LA(self,k) : self.fill(k) return self.queue.elementAt(self.markerOffset + k - 1) def mark(self) : self.syncConsume() self.nMarkers += 1 return self.markerOffset def rewind(self,mark) : self.syncConsume() self.markerOffset = mark self.nMarkers -= 1 def reset(self) : self.nMarkers = 0 self.markerOffset = 0 self.numToConsume = 0 self.queue.reset() def syncConsume(self) : while self.numToConsume > 0: if self.nMarkers > 0: # guess mode -- leave leading characters and bump offset. self.markerOffset += 1 else: # normal mode -- remove first character self.queue.removeFirst() self.numToConsume -= 1 ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CharBuffer ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class CharBuffer(InputBuffer): def __init__(self,reader): ##assert isinstance(reader,file) super(CharBuffer,self).__init__() ## a reader is supposed to be anything that has ## a method 'read(int)'. self.input = reader def __str__(self): base = super(CharBuffer,self).__str__() return "CharBuffer{%s,%s" % (base,str(input)) def fill(self,amount): try: self.syncConsume() while self.queue.length() < (amount + self.markerOffset) : ## retrieve just one char - what happend at end ## of input? c = self.input.read(1) ### python's behaviour is to return the empty string on ### EOF, ie. no exception whatsoever is thrown. An empty ### python string has the nice feature that it is of ### type 'str' and "not ''" would return true. Contrary, ### one can't do this: '' in 'abc'. This should return ### false, but all we get is then a TypeError as an ### empty string is not a character. ### Let's assure then that we have either seen a ### character or an empty string (EOF). assert len(c) == 0 or len(c) == 1 ### And it shall be of type string (ASCII or UNICODE). assert is_string_type(c) ### Just append EOF char to buffer. Note that buffer may ### contain then just more than one EOF char .. ### use unicode chars instead of ASCII .. self.queue.append(c) except Exception,e: raise CharStreamIOException(e) ##except: # (mk) Cannot happen ... ##error ("unexpected exception caught ..") ##assert 0 ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### LexerSharedInputState ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class LexerSharedInputState(object): def __init__(self,ibuf): assert isinstance(ibuf,InputBuffer) self.input = ibuf self.column = 1 self.line = 1 self.tokenStartColumn = 1 self.tokenStartLine = 1 self.guessing = 0 self.filename = None def reset(self): self.column = 1 self.line = 1 self.tokenStartColumn = 1 self.tokenStartLine = 1 self.guessing = 0 self.filename = None self.input.reset() def LA(self,k): return self.input.LA(k) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TokenStream ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TokenStream(object): def nextToken(self): pass def __iter__(self): return TokenStreamIterator(self) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TokenStreamIterator ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TokenStreamIterator(object): def __init__(self,inst): if isinstance(inst,TokenStream): self.inst = inst return raise TypeError("TokenStreamIterator requires TokenStream object") def next(self): assert self.inst item = self.inst.nextToken() if not item or item.isEOF(): raise StopIteration() return item ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TokenStreamSelector ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TokenStreamSelector(TokenStream): def __init__(self): self._input = None self._stmap = {} self._stack = [] def addInputStream(self,stream,key): self._stmap[key] = stream def getCurrentStream(self): return self._input def getStream(self,sname): try: stream = self._stmap[sname] except: raise ValueError("TokenStream " + sname + " not found"); return stream; def nextToken(self): while 1: try: return self._input.nextToken() except TokenStreamRetryException,r: ### just retry "forever" pass def pop(self): stream = self._stack.pop(); self.select(stream); return stream; def push(self,arg): self._stack.append(self._input); self.select(arg) def retry(self): raise TokenStreamRetryException() def select(self,arg): if isinstance(arg,TokenStream): self._input = arg return if is_string_type(arg): self._input = self.getStream(arg) return raise TypeError("TokenStreamSelector.select requires " + "TokenStream or string argument") ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TokenStreamBasicFilter ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TokenStreamBasicFilter(TokenStream): def __init__(self,input): self.input = input; self.discardMask = BitSet() def discard(self,arg): if isinstance(arg,int): self.discardMask.add(arg) return if isinstance(arg,BitSet): self.discardMark = arg return raise TypeError("TokenStreamBasicFilter.discard requires" + "integer or BitSet argument") def nextToken(self): tok = self.input.nextToken() while tok and self.discardMask.member(tok.getType()): tok = self.input.nextToken() return tok ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TokenStreamHiddenTokenFilter ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TokenStreamHiddenTokenFilter(TokenStreamBasicFilter): def __init__(self,input): TokenStreamBasicFilter.__init__(self,input) self.hideMask = BitSet() self.nextMonitoredToken = None self.lastHiddenToken = None self.firstHidden = None def consume(self): self.nextMonitoredToken = self.input.nextToken() def consumeFirst(self): self.consume() p = None; while self.hideMask.member(self.LA(1).getType()) or \ self.discardMask.member(self.LA(1).getType()): if self.hideMask.member(self.LA(1).getType()): if not p: p = self.LA(1) else: p.setHiddenAfter(self.LA(1)) self.LA(1).setHiddenBefore(p) p = self.LA(1) self.lastHiddenToken = p if not self.firstHidden: self.firstHidden = p self.consume() def getDiscardMask(self): return self.discardMask def getHiddenAfter(self,t): return t.getHiddenAfter() def getHiddenBefore(self,t): return t.getHiddenBefore() def getHideMask(self): return self.hideMask def getInitialHiddenToken(self): return self.firstHidden def hide(self,m): if isinstance(m,int): self.hideMask.add(m) return if isinstance(m.BitMask): self.hideMask = m return def LA(self,i): return self.nextMonitoredToken def nextToken(self): if not self.LA(1): self.consumeFirst() monitored = self.LA(1) monitored.setHiddenBefore(self.lastHiddenToken) self.lastHiddenToken = None self.consume() p = monitored while self.hideMask.member(self.LA(1).getType()) or \ self.discardMask.member(self.LA(1).getType()): if self.hideMask.member(self.LA(1).getType()): p.setHiddenAfter(self.LA(1)) if p != monitored: self.LA(1).setHiddenBefore(p) p = self.lastHiddenToken = self.LA(1) self.consume() return monitored ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### StringBuffer ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class StringBuffer: def __init__(self,string=None): if string: self.text = list(string) else: self.text = [] def setLength(self,sz): if not sz : self.text = [] return assert sz>0 if sz >= self.length(): return ### just reset to empty buffer self.text = self.text[0:sz] def length(self): return len(self.text) def append(self,c): self.text.append(c) ### return buffer as string. Arg 'a' is used as index ## into the buffer and 2nd argument shall be the length. ## If 2nd args is absent, we return chars till end of ## buffer starting with 'a'. def getString(self,a=None,length=None): if not a : a = 0 assert a>=0 if a>= len(self.text) : return "" if not length: ## no second argument L = self.text[a:] else: assert (a+length) <= len(self.text) b = a + length L = self.text[a:b] s = "" for x in L : s += x return s toString = getString ## alias def __str__(self): return str(self.text) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### Reader ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ## When reading Japanese chars, it happens that a stream returns a ## 'char' of length 2. This looks like a bug in the appropriate ## codecs - but I'm rather unsure about this. Anyway, if this is ## the case, I'm going to split this string into a list of chars ## and put them on hold, ie. on a buffer. Next time when called ## we read from buffer until buffer is empty. ## wh: nov, 25th -> problem does not appear in Python 2.4.0.c1. class Reader(object): def __init__(self,stream): self.cin = stream self.buf = [] def read(self,num): assert num==1 if len(self.buf): return self.buf.pop() ## Read a char - this may return a string. ## Is this a bug in codecs/Python? c = self.cin.read(1) if not c or len(c)==1: return c L = list(c) L.reverse() for x in L: self.buf.append(x) ## read one char .. return self.read(1) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CharScanner ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class CharScanner(TokenStream): ## class members NO_CHAR = 0 EOF_CHAR = '' ### EOF shall be the empty string. def __init__(self, *argv, **kwargs): super(CharScanner, self).__init__() self.saveConsumedInput = True self.tokenClass = None self.caseSensitive = True self.caseSensitiveLiterals = True self.literals = None self.tabsize = 8 self._returnToken = None self.commitToPath = False self.traceDepth = 0 self.text = StringBuffer() self.hashString = hash(self) self.setTokenObjectClass(CommonToken) self.setInput(*argv) def __iter__(self): return CharScannerIterator(self) def setInput(self,*argv): ## case 1: ## if there's no arg we default to read from ## standard input if not argv: import sys self.setInput(sys.stdin) return ## get 1st argument arg1 = argv[0] ## case 2: ## if arg1 is a string, we assume it's a file name ## and open a stream using 2nd argument as open ## mode. If there's no 2nd argument we fall back to ## mode '+rb'. if is_string_type(arg1): f = open(arg1,"rb") self.setInput(f) self.setFilename(arg1) return ## case 3: ## if arg1 is a file we wrap it by a char buffer ( ## some additional checks?? No, can't do this in ## general). if isinstance(arg1,file): self.setInput(CharBuffer(arg1)) return ## case 4: ## if arg1 is of type SharedLexerInputState we use ## argument as is. if isinstance(arg1,LexerSharedInputState): self.inputState = arg1 return ## case 5: ## check whether argument type is of type input ## buffer. If so create a SharedLexerInputState and ## go ahead. if isinstance(arg1,InputBuffer): self.setInput(LexerSharedInputState(arg1)) return ## case 6: ## check whether argument type has a method read(int) ## If so create CharBuffer ... try: if arg1.read: rd = Reader(arg1) cb = CharBuffer(rd) ss = LexerSharedInputState(cb) self.inputState = ss return except: pass ## case 7: ## raise wrong argument exception raise TypeError(argv) def setTabSize(self,size) : self.tabsize = size def getTabSize(self) : return self.tabsize def setCaseSensitive(self,t) : self.caseSensitive = t def setCommitToPath(self,commit) : self.commitToPath = commit def setFilename(self,f) : self.inputState.filename = f def setLine(self,line) : self.inputState.line = line def setText(self,s) : self.resetText() self.text.append(s) def getCaseSensitive(self) : return self.caseSensitive def getCaseSensitiveLiterals(self) : return self.caseSensitiveLiterals def getColumn(self) : return self.inputState.column def setColumn(self,c) : self.inputState.column = c def getCommitToPath(self) : return self.commitToPath def getFilename(self) : return self.inputState.filename def getInputBuffer(self) : return self.inputState.input def getInputState(self) : return self.inputState def setInputState(self,state) : assert isinstance(state,LexerSharedInputState) self.inputState = state def getLine(self) : return self.inputState.line def getText(self) : return str(self.text) def getTokenObject(self) : return self._returnToken def LA(self,i) : c = self.inputState.input.LA(i) if not self.caseSensitive: ### E0006 c = c.__class__.lower(c) return c def makeToken(self,type) : try: ## dynamically load a class assert self.tokenClass tok = self.tokenClass() tok.setType(type) tok.setColumn(self.inputState.tokenStartColumn) tok.setLine(self.inputState.tokenStartLine) return tok except: self.panic("unable to create new token") return Token.badToken def mark(self) : return self.inputState.input.mark() def _match_bitset(self,b) : if b.member(self.LA(1)): self.consume() else: raise MismatchedCharException(self.LA(1), b, False, self) def _match_string(self,s) : for c in s: if self.LA(1) == c: self.consume() else: raise MismatchedCharException(self.LA(1), c, False, self) def match(self,item): if is_string_type(item): return self._match_string(item) else: return self._match_bitset(item) def matchNot(self,c) : if self.LA(1) != c: self.consume() else: raise MismatchedCharException(self.LA(1), c, True, self) def matchRange(self,c1,c2) : if self.LA(1) < c1 or self.LA(1) > c2 : raise MismatchedCharException(self.LA(1), c1, c2, False, self) else: self.consume() def newline(self) : self.inputState.line += 1 self.inputState.column = 1 def tab(self) : c = self.getColumn() nc = ( ((c-1)/self.tabsize) + 1) * self.tabsize + 1 self.setColumn(nc) def panic(self,s='') : print "CharScanner: panic: " + s sys.exit(1) def reportError(self,ex) : print ex def reportError(self,s) : if not self.getFilename(): print "error: " + str(s) else: print self.getFilename() + ": error: " + str(s) def reportWarning(self,s) : if not self.getFilename(): print "warning: " + str(s) else: print self.getFilename() + ": warning: " + str(s) def resetText(self) : self.text.setLength(0) self.inputState.tokenStartColumn = self.inputState.column self.inputState.tokenStartLine = self.inputState.line def rewind(self,pos) : self.inputState.input.rewind(pos) def setTokenObjectClass(self,cl): self.tokenClass = cl def testForLiteral(self,token): if not token: return assert isinstance(token,Token) _type = token.getType() ## special tokens can't be literals if _type in [SKIP,INVALID_TYPE,EOF_TYPE,NULL_TREE_LOOKAHEAD] : return _text = token.getText() if not _text: return assert is_string_type(_text) _type = self.testLiteralsTable(_text,_type) token.setType(_type) return _type def testLiteralsTable(self,*args): if is_string_type(args[0]): s = args[0] i = args[1] else: s = self.text.getString() i = args[0] ## check whether integer has been given if not isinstance(i,int): assert isinstance(i,int) ## check whether we have a dict assert isinstance(self.literals,dict) try: ## E0010 if not self.caseSensitiveLiterals: s = s.__class__.lower(s) i = self.literals[s] except: pass return i def toLower(self,c): return c.__class__.lower() def traceIndent(self): print ' ' * self.traceDepth def traceIn(self,rname): self.traceDepth += 1 self.traceIndent() print "> lexer %s c== %s" % (rname,self.LA(1)) def traceOut(self,rname): self.traceIndent() print "< lexer %s c== %s" % (rname,self.LA(1)) self.traceDepth -= 1 def uponEOF(self): pass def append(self,c): if self.saveConsumedInput : self.text.append(c) def commit(self): self.inputState.input.commit() def consume(self): if not self.inputState.guessing: c = self.LA(1) if self.caseSensitive: self.append(c) else: # use input.LA(), not LA(), to get original case # CharScanner.LA() would toLower it. c = self.inputState.input.LA(1) self.append(c) if c and c in "\t": self.tab() else: self.inputState.column += 1 self.inputState.input.consume() ## Consume chars until one matches the given char def consumeUntil_char(self,c): while self.LA(1) != EOF_CHAR and self.LA(1) != c: self.consume() ## Consume chars until one matches the given set def consumeUntil_bitset(self,bitset): while self.LA(1) != EOF_CHAR and not self.set.member(self.LA(1)): self.consume() ### If symbol seen is EOF then generate and set token, otherwise ### throw exception. def default(self,la1): if not la1 : self.uponEOF() self._returnToken = self.makeToken(EOF_TYPE) else: self.raise_NoViableAlt(la1) def filterdefault(self,la1,*args): if not la1: self.uponEOF() self._returnToken = self.makeToken(EOF_TYPE) return if not args: self.consume() raise TryAgain() else: ### apply filter object self.commit(); try: func=args[0] args=args[1:] apply(func,args) except RecognitionException, e: ## catastrophic failure self.reportError(e); self.consume(); raise TryAgain() def raise_NoViableAlt(self,la1=None): if not la1: la1 = self.LA(1) fname = self.getFilename() line = self.getLine() col = self.getColumn() raise NoViableAltForCharException(la1,fname,line,col) def set_return_token(self,_create,_token,_ttype,_offset): if _create and not _token and (not _ttype == SKIP): string = self.text.getString(_offset) _token = self.makeToken(_ttype) _token.setText(string) self._returnToken = _token return _token ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CharScannerIterator ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class CharScannerIterator: def __init__(self,inst): if isinstance(inst,CharScanner): self.inst = inst return raise TypeError("CharScannerIterator requires CharScanner object") def next(self): assert self.inst item = self.inst.nextToken() if not item or item.isEOF(): raise StopIteration() return item ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### BitSet ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### I'm assuming here that a long is 64bits. It appears however, that ### a long is of any size. That means we can use a single long as the ### bitset (!), ie. Python would do almost all the work (TBD). class BitSet(object): BITS = 64 NIBBLE = 4 LOG_BITS = 6 MOD_MASK = BITS -1 def __init__(self,data=None): if not data: BitSet.__init__(self,[long(0)]) return if isinstance(data,int): BitSet.__init__(self,[long(data)]) return if isinstance(data,long): BitSet.__init__(self,[data]) return if not isinstance(data,list): raise TypeError("BitSet requires integer, long, or " + "list argument") for x in data: if not isinstance(x,long): raise TypeError(self,"List argument item is " + "not a long: %s" % (x)) self.data = data def __str__(self): bits = len(self.data) * BitSet.BITS s = "" for i in xrange(0,bits): if self.at(i): s += "1" else: s += "o" if not ((i+1) % 10): s += '|%s|' % (i+1) return s def __repr__(self): return str(self) def member(self,item): if not item: return False if isinstance(item,int): return self.at(item) if not is_string_type(item): raise TypeError(self,"char or unichar expected: %s" % (item)) ## char is a (unicode) string with at most lenght 1, ie. ## a char. if len(item) != 1: raise TypeError(self,"char expected: %s" % (item)) ### handle ASCII/UNICODE char num = ord(item) ### check whether position num is in bitset return self.at(num) def wordNumber(self,bit): return bit >> BitSet.LOG_BITS def bitMask(self,bit): pos = bit & BitSet.MOD_MASK ## bit mod BITS return (1L << pos) def set(self,bit,on=True): # grow bitset as required (use with care!) i = self.wordNumber(bit) mask = self.bitMask(bit) if i>=len(self.data): d = i - len(self.data) + 1 for x in xrange(0,d): self.data.append(0L) assert len(self.data) == i+1 if on: self.data[i] |= mask else: self.data[i] &= (~mask) ### make add an alias for set add = set def off(self,bit,off=True): self.set(bit,not off) def at(self,bit): i = self.wordNumber(bit) v = self.data[i] m = self.bitMask(bit) return v & m ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### some further funcs ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### def illegalarg_ex(func): raise ValueError( "%s is only valid if parser is built for debugging" % (func.func_name)) def runtime_ex(func): raise RuntimeException( "%s is only valid if parser is built for debugging" % (func.func_name)) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TokenBuffer ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TokenBuffer(object): def __init__(self,stream): self.input = stream self.nMarkers = 0 self.markerOffset = 0 self.numToConsume = 0 self.queue = Queue() def reset(self) : self.nMarkers = 0 self.markerOffset = 0 self.numToConsume = 0 self.queue.reset() def consume(self) : self.numToConsume += 1 def fill(self, amount): self.syncConsume() while self.queue.length() < (amount + self.markerOffset): self.queue.append(self.input.nextToken()) def getInput(self): return self.input def LA(self,k) : self.fill(k) return self.queue.elementAt(self.markerOffset + k - 1).type def LT(self,k) : self.fill(k) return self.queue.elementAt(self.markerOffset + k - 1) def mark(self) : self.syncConsume() self.nMarkers += 1 return self.markerOffset def rewind(self,mark) : self.syncConsume() self.markerOffset = mark self.nMarkers -= 1 def syncConsume(self) : while self.numToConsume > 0: if self.nMarkers > 0: # guess mode -- leave leading characters and bump offset. self.markerOffset += 1 else: # normal mode -- remove first character self.queue.removeFirst() self.numToConsume -= 1 def __str__(self): return "(%s,%s,%s,%s,%s)" % ( self.input, self.nMarkers, self.markerOffset, self.numToConsume, self.queue) def __repr__(self): return str(self) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ParserSharedInputState ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class ParserSharedInputState(object): def __init__(self): self.input = None self.reset() def reset(self): self.guessing = 0 self.filename = None if self.input: self.input.reset() ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### Parser ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class Parser(object): def __init__(self, *args, **kwargs): self.tokenNames = None self.returnAST = None self.astFactory = None self.tokenTypeToASTClassMap = {} self.ignoreInvalidDebugCalls = False self.traceDepth = 0 if not args: self.inputState = ParserSharedInputState() return arg0 = args[0] assert isinstance(arg0,ParserSharedInputState) self.inputState = arg0 return def getTokenTypeToASTClassMap(self): return self.tokenTypeToASTClassMap def addMessageListener(self, l): if not self.ignoreInvalidDebugCalls: illegalarg_ex(addMessageListener) def addParserListener(self,l) : if (not self.ignoreInvalidDebugCalls) : illegalarg_ex(addParserListener) def addParserMatchListener(self, l) : if (not self.ignoreInvalidDebugCalls) : illegalarg_ex(addParserMatchListener) def addParserTokenListener(self, l) : if (not self.ignoreInvalidDebugCalls): illegalarg_ex(addParserTokenListener) def addSemanticPredicateListener(self, l) : if (not self.ignoreInvalidDebugCalls): illegalarg_ex(addSemanticPredicateListener) def addSyntacticPredicateListener(self, l) : if (not self.ignoreInvalidDebugCalls): illegalarg_ex(addSyntacticPredicateListener) def addTraceListener(self, l) : if (not self.ignoreInvalidDebugCalls): illegalarg_ex(addTraceListener) def consume(self): raise NotImplementedError() def _consumeUntil_type(self,tokenType): while self.LA(1) != EOF_TYPE and self.LA(1) != tokenType: self.consume() def _consumeUntil_bitset(self, set): while self.LA(1) != EOF_TYPE and not set.member(self.LA(1)): self.consume() def consumeUntil(self,arg): if isinstance(arg,int): self._consumeUntil_type(arg) else: self._consumeUntil_bitset(arg) def defaultDebuggingSetup(self): pass def getAST(self) : return self.returnAST def getASTFactory(self) : return self.astFactory def getFilename(self) : return self.inputState.filename def getInputState(self) : return self.inputState def setInputState(self, state) : self.inputState = state def getTokenName(self,num) : return self.tokenNames[num] def getTokenNames(self) : return self.tokenNames def isDebugMode(self) : return self.false def LA(self, i): raise NotImplementedError() def LT(self, i): raise NotImplementedError() def mark(self): return self.inputState.input.mark() def _match_int(self,t): if (self.LA(1) != t): raise MismatchedTokenException( self.tokenNames, self.LT(1), t, False, self.getFilename()) else: self.consume() def _match_set(self, b): if (not b.member(self.LA(1))): raise MismatchedTokenException( self.tokenNames,self.LT(1), b, False, self.getFilename()) else: self.consume() def match(self,set) : if isinstance(set,int): self._match_int(set) return if isinstance(set,BitSet): self._match_set(set) return raise TypeError("Parser.match requires integer ot BitSet argument") def matchNot(self,t): if self.LA(1) == t: raise MismatchedTokenException( tokenNames, self.LT(1), t, True, self.getFilename()) else: self.consume() def removeMessageListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeMessageListener) def removeParserListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeParserListener) def removeParserMatchListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeParserMatchListener) def removeParserTokenListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeParserTokenListener) def removeSemanticPredicateListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeSemanticPredicateListener) def removeSyntacticPredicateListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeSyntacticPredicateListener) def removeTraceListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeTraceListener) def reportError(self,x) : fmt = "syntax error:" f = self.getFilename() if f: fmt = ("%s:" % f) + fmt if isinstance(x,Token): line = x.getColumn() col = x.getLine() text = x.getText() fmt = fmt + 'unexpected symbol at line %s (column %s) : "%s"' print >>sys.stderr, fmt % (line,col,text) else: print >>sys.stderr, fmt,str(x) def reportWarning(self,s): f = self.getFilename() if f: print "%s:warning: %s" % (f,str(x)) else: print "warning: %s" % (str(x)) def rewind(self, pos) : self.inputState.input.rewind(pos) def setASTFactory(self, f) : self.astFactory = f def setASTNodeClass(self, cl) : self.astFactory.setASTNodeType(cl) def setASTNodeType(self, nodeType) : self.setASTNodeClass(nodeType) def setDebugMode(self, debugMode) : if (not self.ignoreInvalidDebugCalls): runtime_ex(setDebugMode) def setFilename(self, f) : self.inputState.filename = f def setIgnoreInvalidDebugCalls(self, value) : self.ignoreInvalidDebugCalls = value def setTokenBuffer(self, t) : self.inputState.input = t def traceIndent(self): print " " * self.traceDepth def traceIn(self,rname): self.traceDepth += 1 self.trace("> ", rname) def traceOut(self,rname): self.trace("< ", rname) self.traceDepth -= 1 ### wh: moved from ASTFactory to Parser def addASTChild(self,currentAST, child): if not child: return if not currentAST.root: currentAST.root = child elif not currentAST.child: currentAST.root.setFirstChild(child) else: currentAST.child.setNextSibling(child) currentAST.child = child currentAST.advanceChildToEnd() ### wh: moved from ASTFactory to Parser def makeASTRoot(self,currentAST,root) : if root: ### Add the current root as a child of new root root.addChild(currentAST.root) ### The new current child is the last sibling of the old root currentAST.child = currentAST.root currentAST.advanceChildToEnd() ### Set the new root currentAST.root = root ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### LLkParser ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class LLkParser(Parser): def __init__(self, *args, **kwargs): try: arg1 = args[0] except: arg1 = 1 if isinstance(arg1,int): super(LLkParser,self).__init__() self.k = arg1 return if isinstance(arg1,ParserSharedInputState): super(LLkParser,self).__init__(arg1) self.set_k(1,*args) return if isinstance(arg1,TokenBuffer): super(LLkParser,self).__init__() self.setTokenBuffer(arg1) self.set_k(1,*args) return if isinstance(arg1,TokenStream): super(LLkParser,self).__init__() tokenBuf = TokenBuffer(arg1) self.setTokenBuffer(tokenBuf) self.set_k(1,*args) return ### unknown argument raise TypeError("LLkParser requires integer, " + "ParserSharedInputStream or TokenStream argument") def consume(self): self.inputState.input.consume() def LA(self,i): return self.inputState.input.LA(i) def LT(self,i): return self.inputState.input.LT(i) def set_k(self,index,*args): try: self.k = args[index] except: self.k = 1 def trace(self,ee,rname): print type(self) self.traceIndent() guess = "" if self.inputState.guessing > 0: guess = " [guessing]" print(ee + rname + guess) for i in xrange(1,self.k+1): if i != 1: print(", ") if self.LT(i) : v = self.LT(i).getText() else: v = "null" print "LA(%s) == %s" % (i,v) print("\n") def traceIn(self,rname): self.traceDepth += 1; self.trace("> ", rname); def traceOut(self,rname): self.trace("< ", rname); self.traceDepth -= 1; ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TreeParserSharedInputState ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TreeParserSharedInputState(object): def __init__(self): self.guessing = 0 ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TreeParser ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TreeParser(object): def __init__(self, *args, **kwargs): self.inputState = TreeParserSharedInputState() self._retTree = None self.tokenNames = [] self.returnAST = None self.astFactory = ASTFactory() self.traceDepth = 0 def getAST(self): return self.returnAST def getASTFactory(self): return self.astFactory def getTokenName(self,num) : return self.tokenNames[num] def getTokenNames(self): return self.tokenNames def match(self,t,set) : assert isinstance(set,int) or isinstance(set,BitSet) if not t or t == ASTNULL: raise MismatchedTokenException(self.getTokenNames(), t,set, False) if isinstance(set,int) and t.getType() != set: raise MismatchedTokenException(self.getTokenNames(), t,set, False) if isinstance(set,BitSet) and not set.member(t.getType): raise MismatchedTokenException(self.getTokenNames(), t,set, False) def matchNot(self,t, ttype) : if not t or (t == ASTNULL) or (t.getType() == ttype): raise MismatchedTokenException(getTokenNames(), t, ttype, True) def reportError(self,ex): print >>sys.stderr,"error:",ex def reportWarning(self, s): print "warning:",s def setASTFactory(self,f): self.astFactory = f def setASTNodeType(self,nodeType): self.setASTNodeClass(nodeType) def setASTNodeClass(self,nodeType): self.astFactory.setASTNodeType(nodeType) def traceIndent(self): print " " * self.traceDepth def traceIn(self,rname,t): self.traceDepth += 1 self.traceIndent() print("> " + rname + "(" + ifelse(t,str(t),"null") + ")" + ifelse(self.inputState.guessing>0,"[guessing]","")) def traceOut(self,rname,t): self.traceIndent() print("< " + rname + "(" + ifelse(t,str(t),"null") + ")" + ifelse(self.inputState.guessing>0,"[guessing]","")) self.traceDepth -= 1 ### wh: moved from ASTFactory to TreeParser def addASTChild(self,currentAST, child): if not child: return if not currentAST.root: currentAST.root = child elif not currentAST.child: currentAST.root.setFirstChild(child) else: currentAST.child.setNextSibling(child) currentAST.child = child currentAST.advanceChildToEnd() ### wh: moved from ASTFactory to TreeParser def makeASTRoot(self,currentAST,root): if root: ### Add the current root as a child of new root root.addChild(currentAST.root) ### The new current child is the last sibling of the old root currentAST.child = currentAST.root currentAST.advanceChildToEnd() ### Set the new root currentAST.root = root ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### funcs to work on trees ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### def rightmost(ast): if ast: while(ast.right): ast = ast.right return ast def cmptree(s,t,partial): while(s and t): ### as a quick optimization, check roots first. if not s.equals(t): return False ### if roots match, do full list match test on children. if not cmptree(s.getFirstChild(),t.getFirstChild(),partial): return False s = s.getNextSibling() t = t.getNextSibling() r = ifelse(partial,not t,not s and not t) return r ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### AST ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class AST(object): def __init__(self): pass def addChild(self, c): pass def equals(self, t): return False def equalsList(self, t): return False def equalsListPartial(self, t): return False def equalsTree(self, t): return False def equalsTreePartial(self, t): return False def findAll(self, tree): return None def findAllPartial(self, subtree): return None def getFirstChild(self): return self def getNextSibling(self): return self def getText(self): return "" def getType(self): return INVALID_TYPE def getLine(self): return 0 def getColumn(self): return 0 def getNumberOfChildren(self): return 0 def initialize(self, t, txt): pass def initialize(self, t): pass def setFirstChild(self, c): pass def setNextSibling(self, n): pass def setText(self, text): pass def setType(self, ttype): pass def toString(self): self.getText() __str__ = toString def toStringList(self): return self.getText() def toStringTree(self): return self.getText() ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ASTNULLType ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### There is only one instance of this class **/ class ASTNULLType(AST): def __init__(self): AST.__init__(self) pass def getText(self): return "<ASTNULL>" def getType(self): return NULL_TREE_LOOKAHEAD ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### BaseAST ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class BaseAST(AST): verboseStringConversion = False tokenNames = None def __init__(self): self.down = None ## kid self.right = None ## sibling def addChild(self,node): if node: t = rightmost(self.down) if t: t.right = node else: assert not self.down self.down = node def getNumberOfChildren(self): t = self.down n = 0 while t: n += 1 t = t.right return n def doWorkForFindAll(self,v,target,partialMatch): sibling = self while sibling: c1 = partialMatch and sibling.equalsTreePartial(target) if c1: v.append(sibling) else: c2 = not partialMatch and sibling.equalsTree(target) if c2: v.append(sibling) ### regardless of match or not, check any children for matches if sibling.getFirstChild(): sibling.getFirstChild().doWorkForFindAll(v,target,partialMatch) sibling = sibling.getNextSibling() ### Is node t equal to 'self' in terms of token type and text? def equals(self,t): if not t: return False return self.getText() == t.getText() and self.getType() == t.getType() ### Is t an exact structural and equals() match of this tree. The ### 'self' reference is considered the start of a sibling list. ### def equalsList(self, t): return cmptree(self, t, partial=False) ### Is 't' a subtree of this list? ### The siblings of the root are NOT ignored. ### def equalsListPartial(self,t): return cmptree(self,t,partial=True) ### Is tree rooted at 'self' equal to 't'? The siblings ### of 'self' are ignored. ### def equalsTree(self, t): return self.equals(t) and \ cmptree(self.getFirstChild(), t.getFirstChild(), partial=False) ### Is 't' a subtree of the tree rooted at 'self'? The siblings ### of 'self' are ignored. ### def equalsTreePartial(self, t): if not t: return True return self.equals(t) and cmptree( self.getFirstChild(), t.getFirstChild(), partial=True) ### Walk the tree looking for all exact subtree matches. Return ### an ASTEnumerator that lets the caller walk the list ### of subtree roots found herein. def findAll(self,target): roots = [] ### the empty tree cannot result in an enumeration if not target: return None # find all matches recursively self.doWorkForFindAll(roots, target, False) return roots ### Walk the tree looking for all subtrees. Return ### an ASTEnumerator that lets the caller walk the list ### of subtree roots found herein. def findAllPartial(self,sub): roots = [] ### the empty tree cannot result in an enumeration if not sub: return None self.doWorkForFindAll(roots, sub, True) ### find all matches recursively return roots ### Get the first child of this node None if not children def getFirstChild(self): return self.down ### Get the next sibling in line after this one def getNextSibling(self): return self.right ### Get the token text for this node def getText(self): return "" ### Get the token type for this node def getType(self): return 0 def getLine(self): return 0 def getColumn(self): return 0 ### Remove all children */ def removeChildren(self): self.down = None def setFirstChild(self,c): self.down = c def setNextSibling(self, n): self.right = n ### Set the token text for this node def setText(self, text): pass ### Set the token type for this node def setType(self, ttype): pass ### static def setVerboseStringConversion(verbose,names): verboseStringConversion = verbose tokenNames = names setVerboseStringConversion = staticmethod(setVerboseStringConversion) ### Return an array of strings that maps token ID to it's text. ## @since 2.7.3 def getTokenNames(): return tokenNames def toString(self): return self.getText() ### return tree as lisp string - sibling included def toStringList(self): ts = self.toStringTree() sib = self.getNextSibling() if sib: ts += sib.toStringList() return ts __str__ = toStringList ### return tree as string - siblings ignored def toStringTree(self): ts = "" kid = self.getFirstChild() if kid: ts += " (" ts += " " + self.toString() if kid: ts += kid.toStringList() ts += " )" return ts ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CommonAST ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### Common AST node implementation class CommonAST(BaseAST): def __init__(self,token=None): super(CommonAST,self).__init__() self.ttype = INVALID_TYPE self.text = "<no text>" self.line = 0 self.column= 0 self.initialize(token) #assert self.text ### Get the token text for this node def getText(self): return self.text ### Get the token type for this node def getType(self): return self.ttype ### Get the line for this node def getLine(self): return self.line ### Get the column for this node def getColumn(self): return self.column def initialize(self,*args): if not args: return arg0 = args[0] if isinstance(arg0,int): arg1 = args[1] self.setType(arg0) self.setText(arg1) return if isinstance(arg0,AST) or isinstance(arg0,Token): self.setText(arg0.getText()) self.setType(arg0.getType()) self.line = arg0.getLine() self.column = arg0.getColumn() return ### Set the token text for this node def setText(self,text_): assert is_string_type(text_) self.text = text_ ### Set the token type for this node def setType(self,ttype_): assert isinstance(ttype_,int) self.ttype = ttype_ ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CommonASTWithHiddenTokens ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class CommonASTWithHiddenTokens(CommonAST): def __init__(self,*args): CommonAST.__init__(self,*args) self.hiddenBefore = None self.hiddenAfter = None def getHiddenAfter(self): return self.hiddenAfter def getHiddenBefore(self): return self.hiddenBefore def initialize(self,*args): CommonAST.initialize(self,*args) if args and isinstance(args[0],Token): assert isinstance(args[0],CommonHiddenStreamToken) self.hiddenBefore = args[0].getHiddenBefore() self.hiddenAfter = args[0].getHiddenAfter() ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ASTPair ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class ASTPair(object): def __init__(self): self.root = None ### current root of tree self.child = None ### current child to which siblings are added ### Make sure that child is the last sibling */ def advanceChildToEnd(self): if self.child: while self.child.getNextSibling(): self.child = self.child.getNextSibling() ### Copy an ASTPair. Don't call it clone() because we want type-safety */ def copy(self): tmp = ASTPair() tmp.root = self.root tmp.child = self.child return tmp def toString(self): r = ifelse(not root,"null",self.root.getText()) c = ifelse(not child,"null",self.child.getText()) return "[%s,%s]" % (r,c) __str__ = toString __repr__ = toString ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ASTFactory ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class ASTFactory(object): def __init__(self,table=None): self._class = None self._classmap = ifelse(table,table,None) def create(self,*args): if not args: return self.create(INVALID_TYPE) arg0 = args[0] arg1 = None arg2 = None try: arg1 = args[1] arg2 = args[2] except: pass # ctor(int) if isinstance(arg0,int) and not arg2: ### get class for 'self' type c = self.getASTNodeType(arg0) t = self.create(c) if t: t.initialize(arg0, ifelse(arg1,arg1,"")) return t # ctor(int,something) if isinstance(arg0,int) and arg2: t = self.create(arg2) if t: t.initialize(arg0,arg1) return t # ctor(AST) if isinstance(arg0,AST): t = self.create(arg0.getType()) if t: t.initialize(arg0) return t # ctor(token) if isinstance(arg0,Token) and not arg1: ttype = arg0.getType() assert isinstance(ttype,int) t = self.create(ttype) if t: t.initialize(arg0) return t # ctor(token,class) if isinstance(arg0,Token) and arg1: assert isinstance(arg1,type) assert issubclass(arg1,AST) # this creates instance of 'arg1' using 'arg0' as # argument. Wow, that's magic! t = arg1(arg0) assert t and isinstance(t,AST) return t # ctor(class) if isinstance(arg0,type): ### next statement creates instance of type (!) t = arg0() assert isinstance(t,AST) return t def setASTNodeClass(self,className=None): if not className: return assert isinstance(className,type) assert issubclass(className,AST) self._class = className ### kind of misnomer - use setASTNodeClass instead. setASTNodeType = setASTNodeClass def getASTNodeClass(self): return self._class def getTokenTypeToASTClassMap(self): return self._classmap def setTokenTypeToASTClassMap(self,amap): self._classmap = amap def error(self, e): import sys print >> sys.stderr, e def setTokenTypeASTNodeType(self, tokenType, className): """ Specify a mapping between a token type and a (AST) class. """ if not self._classmap: self._classmap = {} if not className: try: del self._classmap[tokenType] except: pass else: ### here we should also perform actions to ensure that ### a. class can be loaded ### b. class is a subclass of AST ### assert isinstance(className,type) assert issubclass(className,AST) ## a & b ### enter the class self._classmap[tokenType] = className def getASTNodeType(self,tokenType): """ For a given token type return the AST node type. First we lookup a mapping table, second we try _class and finally we resolve to "antlr.CommonAST". """ # first if self._classmap: try: c = self._classmap[tokenType] if c: return c except: pass # second if self._class: return self._class # default return CommonAST ### methods that have been moved to file scope - just listed ### here to be somewhat consistent with original API def dup(self,t): return antlr.dup(t,self) def dupList(self,t): return antlr.dupList(t,self) def dupTree(self,t): return antlr.dupTree(t,self) ### methods moved to other classes ### 1. makeASTRoot -> Parser ### 2. addASTChild -> Parser ### non-standard: create alias for longish method name maptype = setTokenTypeASTNodeType ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ASTVisitor ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class ASTVisitor(object): def __init__(self,*args): pass def visit(self,ast): pass ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### static methods and variables ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ASTNULL = ASTNULLType() ### wh: moved from ASTFactory as there's nothing ASTFactory-specific ### in this method. def make(*nodes): if not nodes: return None for i in xrange(0,len(nodes)): node = nodes[i] if node: assert isinstance(node,AST) root = nodes[0] tail = None if root: root.setFirstChild(None) for i in xrange(1,len(nodes)): if not nodes[i]: continue if not root: root = tail = nodes[i] elif not tail: root.setFirstChild(nodes[i]) tail = root.getFirstChild() else: tail.setNextSibling(nodes[i]) tail = tail.getNextSibling() ### Chase tail to last sibling while tail.getNextSibling(): tail = tail.getNextSibling() return root def dup(t,factory): if not t: return None if factory: dup_t = factory.create(t.__class__) else: raise TypeError("dup function requires ASTFactory argument") dup_t.initialize(t) return dup_t def dupList(t,factory): result = dupTree(t,factory) nt = result while t: ## for each sibling of the root t = t.getNextSibling() nt.setNextSibling(dupTree(t,factory)) nt = nt.getNextSibling() return result def dupTree(t,factory): result = dup(t,factory) if t: result.setFirstChild(dupList(t.getFirstChild(),factory)) return result ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ### $Id: antlr.py 3750 2009-02-13 00:13:04Z sjmachin $ # Local Variables: *** # mode: python *** # py-indent-offset: 4 *** # End: ***
mit
-8,496,658,699,182,714,000
28.297495
108
0.561205
false
tmm1/pygments.rb
vendor/pygments-main/pygments/lexers/rust.py
1
8235
# -*- coding: utf-8 -*- """ pygments.lexers.rust ~~~~~~~~~~~~~~~~~~~~ Lexers for the Rust language. :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, include, bygroups, words, default from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace __all__ = ['RustLexer'] class RustLexer(RegexLexer): """ Lexer for the Rust programming language (version 1.47). .. versionadded:: 1.6 """ name = 'Rust' filenames = ['*.rs', '*.rs.in'] aliases = ['rust', 'rs'] mimetypes = ['text/rust', 'text/x-rust'] keyword_types = (words(( 'u8', 'u16', 'u32', 'u64', 'u128', 'i8', 'i16', 'i32', 'i64', 'i128', 'usize', 'isize', 'f32', 'f64', 'char', 'str', 'bool', ), suffix=r'\b'), Keyword.Type) builtin_funcs_types = (words(( 'Copy', 'Send', 'Sized', 'Sync', 'Unpin', 'Drop', 'Fn', 'FnMut', 'FnOnce', 'drop', 'Box', 'ToOwned', 'Clone', 'PartialEq', 'PartialOrd', 'Eq', 'Ord', 'AsRef', 'AsMut', 'Into', 'From', 'Default', 'Iterator', 'Extend', 'IntoIterator', 'DoubleEndedIterator', 'ExactSizeIterator', 'Option', 'Some', 'None', 'Result', 'Ok', 'Err', 'String', 'ToString', 'Vec', ), suffix=r'\b'), Name.Builtin) builtin_macros = (words(( 'asm', 'assert', 'assert_eq', 'assert_ne', 'cfg', 'column', 'compile_error', 'concat', 'concat_idents', 'dbg', 'debug_assert', 'debug_assert_eq', 'debug_assert_ne', 'env', 'eprint', 'eprintln', 'file', 'format', 'format_args', 'format_args_nl', 'global_asm', 'include', 'include_bytes', 'include_str', 'is_aarch64_feature_detected', 'is_arm_feature_detected', 'is_mips64_feature_detected', 'is_mips_feature_detected', 'is_powerpc64_feature_detected', 'is_powerpc_feature_detected', 'is_x86_feature_detected', 'line', 'llvm_asm', 'log_syntax', 'macro_rules', 'matches', 'module_path', 'option_env', 'panic', 'print', 'println', 'stringify', 'thread_local', 'todo', 'trace_macros', 'unimplemented', 'unreachable', 'vec', 'write', 'writeln', ), suffix=r'!'), Name.Function.Magic) tokens = { 'root': [ # rust allows a file to start with a shebang, but if the first line # starts with #![ then it's not a shebang but a crate attribute. (r'#![^[\r\n].*$', Comment.Preproc), default('base'), ], 'base': [ # Whitespace and Comments (r'\n', Whitespace), (r'\s+', Whitespace), (r'//!.*?\n', String.Doc), (r'///(\n|[^/].*?\n)', String.Doc), (r'//(.*?)\n', Comment.Single), (r'/\*\*(\n|[^/*])', String.Doc, 'doccomment'), (r'/\*!', String.Doc, 'doccomment'), (r'/\*', Comment.Multiline, 'comment'), # Macro parameters (r"""\$([a-zA-Z_]\w*|\(,?|\),?|,?)""", Comment.Preproc), # Keywords (words(('as', 'async', 'await', 'box', 'const', 'crate', 'dyn', 'else', 'extern', 'for', 'if', 'impl', 'in', 'loop', 'match', 'move', 'mut', 'pub', 'ref', 'return', 'static', 'super', 'trait', 'unsafe', 'use', 'where', 'while'), suffix=r'\b'), Keyword), (words(('abstract', 'become', 'do', 'final', 'macro', 'override', 'priv', 'typeof', 'try', 'unsized', 'virtual', 'yield'), suffix=r'\b'), Keyword.Reserved), (r'(true|false)\b', Keyword.Constant), (r'self\b', Name.Builtin.Pseudo), (r'mod\b', Keyword, 'modname'), (r'let\b', Keyword.Declaration), (r'fn\b', Keyword, 'funcname'), (r'(struct|enum|type|union)\b', Keyword, 'typename'), (r'(default)(\s+)(type|fn)\b', bygroups(Keyword, Text, Keyword)), keyword_types, (r'[sS]elf\b', Name.Builtin.Pseudo), # Prelude (taken from Rust's src/libstd/prelude.rs) builtin_funcs_types, builtin_macros, # Path seperators, so types don't catch them. (r'::\b', Text), # Types in positions. (r'(?::|->)', Text, 'typename'), # Labels (r'(break|continue)(\s*)(\'[A-Za-z_]\w*)?', bygroups(Keyword, Text.Whitespace, Name.Label)), # Character literals (r"""'(\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0""" r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""", String.Char), (r"""b'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\0""" r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""", String.Char), # Binary literals (r'0b[01_]+', Number.Bin, 'number_lit'), # Octal literals (r'0o[0-7_]+', Number.Oct, 'number_lit'), # Hexadecimal literals (r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'), # Decimal literals (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|' r'\.[0-9_]*(?!\.)|[eE][+\-]?[0-9_]+)', Number.Float, 'number_lit'), (r'[0-9][0-9_]*', Number.Integer, 'number_lit'), # String literals (r'b"', String, 'bytestring'), (r'"', String, 'string'), (r'b?r(#*)".*?"\1', String), # Lifetime names (r"'", Operator, 'lifetime'), # Operators and Punctuation (r'\.\.=?', Operator), (r'[{}()\[\],.;]', Punctuation), (r'[+\-*/%&|<>^!~@=:?]', Operator), # Identifiers (r'[a-zA-Z_]\w*', Name), # Raw identifiers (r'r#[a-zA-Z_]\w*', Name), # Attributes (r'#!?\[', Comment.Preproc, 'attribute['), ], 'comment': [ (r'[^*/]+', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline), ], 'doccomment': [ (r'[^*/]+', String.Doc), (r'/\*', String.Doc, '#push'), (r'\*/', String.Doc, '#pop'), (r'[*/]', String.Doc), ], 'modname': [ (r'\s+', Text), (r'[a-zA-Z_]\w*', Name.Namespace, '#pop'), default('#pop'), ], 'funcname': [ (r'\s+', Text), (r'[a-zA-Z_]\w*', Name.Function, '#pop'), default('#pop'), ], 'typename': [ (r'\s+', Text), (r'&', Keyword.Pseudo), (r"'", Operator, 'lifetime'), builtin_funcs_types, keyword_types, (r'[a-zA-Z_]\w*', Name.Class, '#pop'), default('#pop'), ], 'lifetime': [ (r"(static|_)", Name.Builtin), (r"[a-zA-Z_]+\w*", Name.Attribute), default('#pop'), ], 'number_lit': [ (r'[ui](8|16|32|64|size)', Keyword, '#pop'), (r'f(32|64)', Keyword, '#pop'), default('#pop'), ], 'string': [ (r'"', String, '#pop'), (r"""\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0""" r"""|\\u\{[0-9a-fA-F]{1,6}\}""", String.Escape), (r'[^\\"]+', String), (r'\\', String), ], 'bytestring': [ (r"""\\x[89a-fA-F][0-9a-fA-F]""", String.Escape), include('string'), ], 'attribute_common': [ (r'"', String, 'string'), (r'\[', Comment.Preproc, 'attribute['), (r'\(', Comment.Preproc, 'attribute('), ], 'attribute[': [ include('attribute_common'), (r'\];?', Comment.Preproc, '#pop'), (r'[^"\]]+', Comment.Preproc), ], 'attribute(': [ include('attribute_common'), (r'\);?', Comment.Preproc, '#pop'), (r'[^")]+', Comment.Preproc), ], }
mit
6,084,747,765,619,548,000
35.763393
79
0.433394
false
UCL-RITS/django-shibboleth-remoteuser
shibboleth/views.py
10
2867
from django.conf import settings from django.contrib import auth from django.contrib.auth.decorators import login_required from django.core.exceptions import ImproperlyConfigured from django.http import HttpResponse from django.shortcuts import redirect from django.utils.decorators import method_decorator from django.views.generic import TemplateView from urllib import quote #Logout settings. from shibboleth.app_settings import LOGOUT_URL, LOGOUT_REDIRECT_URL, LOGOUT_SESSION_KEY class ShibbolethView(TemplateView): """ This is here to offer a Shib protected page that we can route users through to login. """ template_name = 'shibboleth/user_info.html' @method_decorator(login_required) def dispatch(self, request, *args, **kwargs): """ Django docs say to decorate the dispatch method for class based views. https://docs.djangoproject.com/en/dev/topics/auth/ """ return super(ShibbolethView, self).dispatch(request, *args, **kwargs) def get(self, request, **kwargs): """Process the request.""" next = self.request.GET.get('next', None) if next is not None: return redirect(next) return super(ShibbolethView, self).get(request) def get_context_data(self, **kwargs): context = super(ShibbolethView, self).get_context_data(**kwargs) context['user'] = self.request.user return context class ShibbolethLoginView(TemplateView): """ Pass the user to the Shibboleth login page. Some code borrowed from: https://github.com/stefanfoulis/django-class-based-auth-views. """ redirect_field_name = "target" def get(self, *args, **kwargs): #Remove session value that is forcing Shibboleth reauthentication. self.request.session.pop(LOGOUT_SESSION_KEY, None) login = settings.LOGIN_URL + '?target=%s' % quote(self.request.GET.get(self.redirect_field_name)) return redirect(login) class ShibbolethLogoutView(TemplateView): """ Pass the user to the Shibboleth logout page. Some code borrowed from: https://github.com/stefanfoulis/django-class-based-auth-views. """ redirect_field_name = "target" def get(self, *args, **kwargs): #Log the user out. auth.logout(self.request) #Set session key that middleware will use to force #Shibboleth reauthentication. self.request.session[LOGOUT_SESSION_KEY] = True #Get target url in order of preference. target = LOGOUT_REDIRECT_URL or\ quote(self.request.GET.get(self.redirect_field_name)) or\ quote(request.build_absolute_uri()) logout = LOGOUT_URL % target return redirect(logout)
mit
93,624,724,161,175,470
33.8375
105
0.657482
false
v-iam/azure-sdk-for-python
azure-mgmt-web/azure/mgmt/web/models/recover_response.py
3
1933
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .resource import Resource class RecoverResponse(Resource): """Response for an app recovery request. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :param name: Resource Name. :type name: str :param kind: Kind of resource. :type kind: str :param location: Resource Location. :type location: str :param type: Resource type. :type type: str :param tags: Resource tags. :type tags: dict :ivar operation_id: ID of the recovery operation. Can be used to check the status of the corresponding operation. :vartype operation_id: str """ _validation = { 'id': {'readonly': True}, 'location': {'required': True}, 'operation_id': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'kind': {'key': 'kind', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'operation_id': {'key': 'properties.operationId', 'type': 'str'}, } def __init__(self, location, name=None, kind=None, type=None, tags=None): super(RecoverResponse, self).__init__(name=name, kind=kind, location=location, type=type, tags=tags) self.operation_id = None
mit
6,860,574,707,578,278,000
33.517857
108
0.566994
false
silviolima/EstudoAppengine
tekton/tekton-master/src/tekton/gae/middleware/email_errors.py
4
2552
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import json import logging import traceback import time from google.appengine.api import app_identity, mail, capabilities from google.appengine.runtime import DeadlineExceededError from tekton.router import PathNotFound def get_apis_statuses(e): if not isinstance(e, DeadlineExceededError): return {} t1 = time.time() statuses = { 'blobstore': capabilities.CapabilitySet('blobstore').is_enabled(), 'datastore_v3': capabilities.CapabilitySet('datastore_v3').is_enabled(), 'datastore_v3_write': capabilities.CapabilitySet('datastore_v3', ['write']).is_enabled(), 'images': capabilities.CapabilitySet('images').is_enabled(), 'mail': capabilities.CapabilitySet('mail').is_enabled(), 'memcache': capabilities.CapabilitySet('memcache').is_enabled(), 'taskqueue': capabilities.CapabilitySet('taskqueue').is_enabled(), 'urlfetch': capabilities.CapabilitySet('urlfetch').is_enabled(), } t2 = time.time() statuses['time'] = t2 - t1 return statuses def send_error_to_admins(exception, handler, write_tmpl): import settings # workaround. See https://github.com/renzon/zenwarch/issues/3 tb = traceback.format_exc() errmsg = exception.message logging.error(errmsg) logging.error(tb) write_tmpl("/templates/error.html") appid = app_identity.get_application_id() subject = 'ERROR in %s: [%s] %s' % (appid, handler.request.path, errmsg) body = """ ------------- request ------------ %s ---------------------------------- ------------- GET params --------- %s ---------------------------------- ----------- POST params ---------- %s ---------------------------------- ----------- traceback ------------ %s ---------------------------------- """ % (handler.request, handler.request.GET, handler.request.POST, tb) body += 'API statuses = ' + json.dumps(get_apis_statuses(exception), indent=4) mail.send_mail_to_admins(sender=settings.SENDER_EMAIL, subject=subject, body=body) def execute(next_process, handler, dependencies, **kwargs): try: next_process(dependencies, **kwargs) except PathNotFound, e: handler.response.set_status(404) send_error_to_admins(e, handler, dependencies['_write_tmpl']) except BaseException, e: handler.response.status_code = 400 send_error_to_admins(e, handler, dependencies['_write_tmpl'])
mit
-8,949,559,600,167,558,000
33.026667
97
0.605408
false
zielmicha/freeciv-android
lib/freeciv/maptiles.py
4
6047
import ui import graphics import time import contextlib from ui import stream from ui import ctrl from client import freeciv SELECT_POPUP = 0 class MapWidget(ui.Widget): def __init__(self, client): self.client = client self.size = (0, 0) self.drawer = TileDrawer(client) self.tile_size = 512 self.tile_storage = {} self.tile_client_cache = {} # corresponds to client's one self.tile_map_pos = {} self.tile_draw_time = {} self.screen_pos = (0, 0) self.screen_tiles = (2500 // self.tile_size + 1, 1800 // self.tile_size + 1) self.redraw_queue = set() ctrl.bind_event('tile_posnotify', self.pos_notify) ctrl.bind_event('tile_init', self.client_init) ctrl.bind_event('tile_getconfig', self.send_config) freeciv.register(self.global_update_tile) freeciv.register(self.global_set_mapview_center) freeciv.register(self.global_update_everything) def send_config(self, m): stream.add_message({'type': 'tile_config', 'tile_size': self.tile_size}) def back(self): self.client.escape() def event(self, ev): if ev.type in (graphics.const.KEYDOWN, graphics.const.KEYUP): self.client.key_event(ev.type, ev.key) elif ev.type == graphics.const.MOUSEBUTTONDOWN: try: pos = ev.data['tile_pos'] except (AttributeError, KeyError): pass else: self.click(pos) def click(self, pos): x, y = pos self.drawer.click(x, y) def draw(self, surf, pos): surf.draw_rect((255, 255, 255, 0), pos + self.size, blend=graphics.MODE_NONE) stream.add_message({'type': 'tile', 'draw_at': pos + self.size}) self.tick() ui.layer_hooks.execute(id='map', surf=None, pos=pos, offset=(0, 0), size=self.size) def tick(self): need_redraw = self.redraw_queue & set(self.get_screen_tiles()) can_redraw = 5 if self.redraw_queue: print 'queue', len(self.redraw_queue), 'need', len(need_redraw) for tile in list(need_redraw)[:can_redraw]: self.update_tile(*tile) can_redraw -= len(need_redraw) for tile in list(self.redraw_queue)[:can_redraw]: self.update_tile(*tile) for i, j in self.get_screen_tiles(): self.push_tile(i, j) def get_screen_tiles(self): tile_pos = self.screen_pos[0] // self.tile_size, \ self.screen_pos[1] // self.tile_size return [ (i * self.tile_size, j * self.tile_size) for i in range_around(tile_pos[0], self.screen_tiles[0]) for j in range_around(tile_pos[1], self.screen_tiles[1]) ] def global_update_tile(self, x, y): # find nearest tiles by_dist = sorted(self.tile_map_pos.items(), key=lambda (k, v): abs(v[0] - x) + abs(v[1] - y) if v else 100000) by_dist = by_dist[:5] print 'update', by_dist # and queue update for k, v in by_dist: self.redraw_queue.add(k) def global_update_everything(self): print 'update everything' self.redraw_queue |= set(self.tile_storage.keys()) def global_set_mapview_center(self, x, y): stream.add_message({'type': 'tiles_center_at', 'pos': (x, y)}) def push_tile(self, x, y): self.init_tile(x, y) new_data = self.tile_storage[x, y] if new_data != self.tile_client_cache.get((x, y)): self.tile_client_cache[x, y] = new_data stream.add_message({'type': 'tile', 'id': '%d,%d' % (x, y), 'data': new_data}) def init_tile(self, x, y): if (x, y) not in self.tile_storage: self.update_tile(x, y) def update_tile(self, x, y): start = time.time() img, tile_pos = self.drawer.draw_fragment((x, y, self.tile_size, self.tile_size)) print 'updated %s in %d ms' % ((x, y), (time.time() - start) * 1000) new_data = stream.get_texture_data(img) self.tile_storage[x, y] = new_data self.tile_map_pos[x, y] = tile_pos self.tile_draw_time[x, y] = time.time() self.redraw_queue -= {(x, y)} def client_init(self, message): self.tile_client_cache = {} def pos_notify(self, message): x, y = message['pos'] self.screen_pos = -x, -y def range_around(x, phi): return range(x - phi/2, x - phi/2 + phi) def nround(a, r): return int(a // r) * r class TileDrawer(object): def __init__(self, client): self.map_size = (100, 100) self.client = client def draw_fragment(self, rect): with self.save_state(): self.set_map_size((rect[2], rect[3])) self.set_map_origin(rect[0], rect[1]) surf = graphics.create_surface(rect[2], rect[3]) surf.fill((255, 0, 255, 255), blend=graphics.MODE_NONE) self.client.draw_map(surf, (0, 0)) tile_pos = freeciv.func.py_canvas_to_map(rect[2] / 2, rect[3] / 2) return surf, tile_pos def set_map_size(self, size): self.map_size = size self.client.set_map_size(size) def set_map_origin(self, x, y): freeciv.func.base_set_mapview_origin(x, y) def click(self, x, y): with self.save_state(): self.set_map_origin(x, y) freeciv.func.action_button_pressed(0, 0, SELECT_POPUP) @contextlib.contextmanager def save_state(self): origin = freeciv.func.get_map_view_origin() size = self.map_size try: yield finally: self.map_size = size freeciv.func.base_set_mapview_origin(origin[0], origin[1])
gpl-2.0
-5,600,320,362,428,581,000
32.782123
91
0.539937
false
Distrotech/intellij-community
python/lib/Lib/site-packages/django/contrib/gis/tests/test_spatialrefsys.py
94
6686
from django.db import connection from django.contrib.gis.tests.utils import mysql, no_mysql, oracle, postgis, spatialite from django.utils import unittest test_srs = ({'srid' : 4326, 'auth_name' : ('EPSG', True), 'auth_srid' : 4326, 'srtext' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]', 'srtext14' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]', 'proj4' : '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ', 'spheroid' : 'WGS 84', 'name' : 'WGS 84', 'geographic' : True, 'projected' : False, 'spatialite' : True, 'ellipsoid' : (6378137.0, 6356752.3, 298.257223563), # From proj's "cs2cs -le" and Wikipedia (semi-minor only) 'eprec' : (1, 1, 9), }, {'srid' : 32140, 'auth_name' : ('EPSG', False), 'auth_srid' : 32140, 'srtext' : 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]', 'srtext14': 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],AUTHORITY["EPSG","32140"],AXIS["X",EAST],AXIS["Y",NORTH]]', 'proj4' : '+proj=lcc +lat_1=30.28333333333333 +lat_2=28.38333333333333 +lat_0=27.83333333333333 +lon_0=-99 +x_0=600000 +y_0=4000000 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ', 'spheroid' : 'GRS 1980', 'name' : 'NAD83 / Texas South Central', 'geographic' : False, 'projected' : True, 'spatialite' : False, 'ellipsoid' : (6378137.0, 6356752.31414, 298.257222101), # From proj's "cs2cs -le" and Wikipedia (semi-minor only) 'eprec' : (1, 5, 10), }, ) if oracle: from django.contrib.gis.db.backends.oracle.models import SpatialRefSys elif postgis: from django.contrib.gis.db.backends.postgis.models import SpatialRefSys elif spatialite: from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys class SpatialRefSysTest(unittest.TestCase): @no_mysql def test01_retrieve(self): "Testing retrieval of SpatialRefSys model objects." for sd in test_srs: srs = SpatialRefSys.objects.get(srid=sd['srid']) self.assertEqual(sd['srid'], srs.srid) # Some of the authority names are borked on Oracle, e.g., SRID=32140. # also, Oracle Spatial seems to add extraneous info to fields, hence the # the testing with the 'startswith' flag. auth_name, oracle_flag = sd['auth_name'] if postgis or (oracle and oracle_flag): self.assertEqual(True, srs.auth_name.startswith(auth_name)) self.assertEqual(sd['auth_srid'], srs.auth_srid) # No proj.4 and different srtext on oracle backends :( if postgis: if connection.ops.spatial_version >= (1, 4, 0): srtext = sd['srtext14'] else: srtext = sd['srtext'] self.assertEqual(srtext, srs.wkt) self.assertEqual(sd['proj4'], srs.proj4text) @no_mysql def test02_osr(self): "Testing getting OSR objects from SpatialRefSys model objects." for sd in test_srs: sr = SpatialRefSys.objects.get(srid=sd['srid']) self.assertEqual(True, sr.spheroid.startswith(sd['spheroid'])) self.assertEqual(sd['geographic'], sr.geographic) self.assertEqual(sd['projected'], sr.projected) if not (spatialite and not sd['spatialite']): # Can't get 'NAD83 / Texas South Central' from PROJ.4 string # on SpatiaLite self.assertEqual(True, sr.name.startswith(sd['name'])) # Testing the SpatialReference object directly. if postgis or spatialite: srs = sr.srs self.assertEqual(sd['proj4'], srs.proj4) # No `srtext` field in the `spatial_ref_sys` table in SpatiaLite if not spatialite: if connection.ops.spatial_version >= (1, 4, 0): srtext = sd['srtext14'] else: srtext = sd['srtext'] self.assertEqual(srtext, srs.wkt) @no_mysql def test03_ellipsoid(self): "Testing the ellipsoid property." for sd in test_srs: # Getting the ellipsoid and precision parameters. ellps1 = sd['ellipsoid'] prec = sd['eprec'] # Getting our spatial reference and its ellipsoid srs = SpatialRefSys.objects.get(srid=sd['srid']) ellps2 = srs.ellipsoid for i in range(3): param1 = ellps1[i] param2 = ellps2[i] self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i]) def suite(): s = unittest.TestSuite() s.addTest(unittest.makeSuite(SpatialRefSysTest)) return s def run(verbosity=2): unittest.TextTestRunner(verbosity=verbosity).run(suite())
apache-2.0
-8,447,798,340,356,719,000
58.168142
737
0.615465
false
MelanieBittl/dolfin
demo/undocumented/functional/python/demo_functional.py
3
1638
"""This demo program computes the value of the functional M(v) = int v^2 + (grad v)^2 dx on the unit square for v = sin(x) + cos(y). The exact value of the functional is M(v) = 2 + 2*sin(1)*(1 - cos(1)) The functional M corresponds to the energy norm for a simple reaction-diffusion equation.""" # Copyright (C) 2007 Kristian B. Oelgaard # # This file is part of DOLFIN. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see <http://www.gnu.org/licenses/>. # # Modified by Anders Logg, 2008. # # First added: 2007-11-14 # Last changed: 2012-11-12 from __future__ import print_function from dolfin import * # Create mesh and define function space mesh = UnitSquareMesh(16, 16) V = FunctionSpace(mesh, "CG", 2) # Define the function v v = Expression("sin(x[0]) + cos(x[1])", element=FiniteElement("CG", triangle, 2)) # Define functional M = (v*v + dot(grad(v), grad(v)))*dx(mesh) # Evaluate functional value = assemble(M) exact_value = 2.0 + 2.0*sin(1.0)*(1.0 - cos(1.0)) print("The energy norm of v is: %.15g" % value) print("It should be: %.15g" % exact_value)
gpl-3.0
-199,221,757,950,484,350
31.117647
81
0.704518
false
CalSol/Impulse
Tracker/register.py
1
3076
# Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Request handlers for the OAuth authorization process.""" __author__ = 'Ka-Ping Yee <[email protected]>' from google.appengine.ext import db import datetime import latitude import model import oauth import oauth_webapp import utils class RegisterHandler(utils.Handler): """Registration and Latitude API authorization for new users.""" def get(self): self.require_user() nickname = self.request.get('nickname', '') next = self.request.get('next', '') duration = utils.describe_delta( datetime.timedelta(0, int(self.request.get('duration', '0')))) if not nickname: self.render('templates/register.html', next=next, duration=duration, nickname=self.user.nickname().split('@')[0]) else: # Then proceed to the OAuth authorization page. parameters = { 'scope': latitude.LatitudeOAuthClient.SCOPE, 'domain': model.Config.get('oauth_consumer_key'), 'granularity': 'best', 'location': 'current' } callback_url = self.request.host_url + '/_oauth_callback?' + \ utils.urlencode(nickname=nickname, next=next) oauth_webapp.redirect_to_authorization_page( self, latitude.LatitudeOAuthClient(utils.oauth_consumer), callback_url, parameters) class OAuthCallbackHandler(utils.Handler): """Handler for the OAuth callback after a user has granted permission.""" def get(self): self.require_user() next = self.request.get('next', '') access_token = oauth_webapp.handle_authorization_finished( self, latitude.LatitudeOAuthClient(utils.oauth_consumer)) # Store a new Member object, including the user's current location. member = model.Member.create(self.user) member.nickname = self.request.get('nickname') member.latitude_key = access_token.key member.latitude_secret = access_token.secret member.location = utils.get_location(member) member.location_time = datetime.datetime.utcnow() if not member.location: raise utils.ErrorMessage(400, ''' Sorry, Google Latitude has no current location for you. ''') member.put() raise utils.Redirect(next or '/') if __name__ == '__main__': utils.run([ ('/_register', RegisterHandler), ('/_oauth_callback', OAuthCallbackHandler) ])
apache-2.0
-3,292,369,212,837,635,600
36.060241
80
0.645969
false
livc/Paddle
python/paddle/utils/preprocess_util.py
18
13149
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import math import cPickle as pickle import random import collections def save_file(data, filename): """ Save data into pickle format. data: the data to save. filename: the output filename. """ pickle.dump(data, open(filename, 'wb'), protocol=pickle.HIGHEST_PROTOCOL) def save_list(l, outfile): """ Save a list of string into a text file. There is one line for each string. l: the list of string to save outfile: the output file """ open(outfile, "w").write("\n".join(l)) def exclude_pattern(f): """ Return whether f is in the exlucde pattern. Exclude the files that starts with . or ends with ~. """ return f.startswith(".") or f.endswith("~") def list_dirs(path): """ Return a list of directories in path. Exclude all the directories that start with '.'. path: the base directory to search over. """ return [ os.path.join(path, d) for d in next(os.walk(path))[1] if not exclude_pattern(d) ] def list_images(path, exts=set(["jpg", "png", "bmp", "jpeg"])): """ Return a list of images in path. path: the base directory to search over. exts: the extensions of the images to find. """ return [os.path.join(path, d) for d in os.listdir(path) \ if os.path.isfile(os.path.join(path, d)) and not exclude_pattern(d)\ and os.path.splitext(d)[-1][1:] in exts] def list_files(path): """ Return a list of files in path. path: the base directory to search over. exts: the extensions of the images to find. """ return [os.path.join(path, d) for d in os.listdir(path) \ if os.path.isfile(os.path.join(path, d)) and not exclude_pattern(d)] def get_label_set_from_dir(path): """ Return a dictionary of the labels and label ids from a path. Assume each direcotry in the path corresponds to a unique label. The keys of the dictionary is the label name. The values of the dictionary is the label id. """ dirs = list_dirs(path) return dict([(os.path.basename(d), i) for i, d in enumerate(sorted(dirs))]) class Label: """ A class of label data. """ def __init__(self, label, name): """ label: the id of the label. name: the name of the label. """ self.label = label self.name = name def convert_to_paddle_format(self): """ convert the image into the paddle batch format. """ return int(self.label) def __hash__(self): return hash((self.label)) class Dataset: """ A class to represent a dataset. A dataset contains a set of items. Each item contains multiple slots of data. For example: in image classification dataset, each item contains two slot, The first slot is an image, and the second slot is a label. """ def __init__(self, data, keys): """ data: a list of data. Each data is a tuple containing multiple slots of data. Each slot is an object with convert_to_paddle_format function. keys: contains a list of keys for all the slots. """ self.data = data self.keys = keys def check_valid(self): for d in self.data: assert (len(d) == len(self.keys)) def permute(self, key_id, num_per_batch): """ Permuate data for batching. It supports two types now: 1. if key_id == None, the batching process is completely random. 2. if key_id is not None. The batching process Permuate the data so that the key specified by key_id are uniformly distributed in batches. See the comments of permute_by_key for details. """ if key_id is None: self.uniform_permute() else: self.permute_by_key(key_id, num_per_batch) def uniform_permute(self): """ Permuate the data randomly. """ random.shuffle(self.data) def permute_by_key(self, key_id, num_per_batch): """ Permuate the data so that the key specified by key_id are uniformly distributed in batches. For example: if we have three labels, and the number of data for each label are 100, 200, and 300, respectively. The number of batches is 4. Then, the number of data for these labels is 25, 50, and 75. """ # Store the indices of the data that has the key value # specified by key_id. keyvalue_indices = collections.defaultdict(list) for idx in range(len(self.data)): keyvalue_indices[self.data[idx][key_id].label].append(idx) for k in keyvalue_indices: random.shuffle(keyvalue_indices[k]) num_data_per_key_batch = \ math.ceil(num_per_batch / float(len(keyvalue_indices.keys()))) if num_data_per_key_batch < 2: raise Exception("The number of data in a batch is too small") permuted_data = [] keyvalue_readpointer = collections.defaultdict(int) while len(permuted_data) < len(self.data): for k in keyvalue_indices: begin_idx = keyvalue_readpointer[k] end_idx = int( min(begin_idx + num_data_per_key_batch, len(keyvalue_indices[k]))) print "begin_idx, end_idx" print begin_idx, end_idx for idx in range(begin_idx, end_idx): permuted_data.append(self.data[keyvalue_indices[k][idx]]) keyvalue_readpointer[k] = end_idx self.data = permuted_data class DataBatcher: """ A class that is used to create batches for both training and testing datasets. """ def __init__(self, train_data, test_data, label_set): """ train_data, test_data: Each one is a dataset object repesenting training and testing data, respectively. label_set: a dictionary storing the mapping from label name to label id. """ self.train_data = train_data self.test_data = test_data self.label_set = label_set self.num_per_batch = 5000 assert (self.train_data.keys == self.test_data.keys) def create_batches_and_list(self, output_path, train_list_name, test_list_name, label_set_name): """ Create batches for both training and testing objects. It also create train.list and test.list to indicate the list of the batch files for training and testing data, respectively. """ train_list = self.create_batches(self.train_data, output_path, "train_", self.num_per_batch) test_list = self.create_batches(self.test_data, output_path, "test_", self.num_per_batch) save_list(train_list, os.path.join(output_path, train_list_name)) save_list(test_list, os.path.join(output_path, test_list_name)) save_file(self.label_set, os.path.join(output_path, label_set_name)) def create_batches(self, data, output_path, prefix="", num_data_per_batch=5000): """ Create batches for a Dataset object. data: the Dataset object to process. output_path: the output path of the batches. prefix: the prefix of each batch. num_data_per_batch: number of data in each batch. """ num_batches = int(math.ceil(len(data.data) / float(num_data_per_batch))) batch_names = [] data.check_valid() num_slots = len(data.keys) for i in range(num_batches): batch_name = os.path.join(output_path, prefix + "batch_%03d" % i) out_data = dict([(k, []) for k in data.keys]) begin_idx = i * num_data_per_batch end_idx = min((i + 1) * num_data_per_batch, len(data.data)) for j in range(begin_idx, end_idx): for slot_id in range(num_slots): out_data[data.keys[slot_id]].\ append(data.data[j][slot_id].convert_to_paddle_format()) save_file(out_data, batch_name) batch_names.append(batch_name) return batch_names class DatasetCreater(object): """ A virtual class for creating datasets. The derived clasas needs to implemnt the following methods: - create_dataset() - create_meta_file() """ def __init__(self, data_path): """ data_path: the path to store the training data and batches. train_dir_name: relative training data directory. test_dir_name: relative testing data directory. batch_dir_name: relative batch directory. num_per_batch: the number of data in a batch. meta_filename: the filename of the meta file. train_list_name: training batch list name. test_list_name: testing batch list name. label_set: label set name. overwrite: whether to overwrite the files if the batches are already in the given path. """ self.data_path = data_path self.train_dir_name = 'train' self.test_dir_name = 'test' self.batch_dir_name = 'batches' self.num_per_batch = 50000 self.meta_filename = "batches.meta" self.train_list_name = "train.list" self.test_list_name = "test.list" self.label_set_name = "labels.pkl" self.output_path = os.path.join(self.data_path, self.batch_dir_name) self.overwrite = False self.permutate_key = "labels" self.from_list = False def create_meta_file(self, data): """ Create a meta file from training data. data: training data given in a Dataset format. """ raise NotImplementedError def create_dataset(self, path): """ Create a data set object from a path. It will use directory structure or a file list to determine dataset if self.from_list is True. Otherwise, it will uses a file list to determine the datset. path: the path of the dataset. return a tuple of Dataset object, and a mapping from lable set to label id. """ if self.from_list: return self.create_dataset_from_list(path) else: return self.create_dataset_from_dir(path) def create_dataset_from_list(self, path): """ Create a data set object from a path. It will uses a file list to determine the datset. path: the path of the dataset. return a tuple of Dataset object, and a mapping from lable set to label id """ raise NotImplementedError def create_dataset_from_dir(self, path): """ Create a data set object from a path. It will use directory structure or a file list to determine dataset if self.from_list is True. path: the path of the dataset. return a tuple of Dataset object, and a mapping from lable set to label id """ raise NotImplementedError def create_batches(self): """ create batches and meta file. """ train_path = os.path.join(self.data_path, self.train_dir_name) test_path = os.path.join(self.data_path, self.test_dir_name) out_path = os.path.join(self.data_path, self.batch_dir_name) if not os.path.exists(out_path): os.makedirs(out_path) if (self.overwrite or not os.path.exists( os.path.join(out_path, self.train_list_name))): train_data, train_label_set = \ self.create_dataset(train_path) test_data, test_label_set = \ self.create_dataset(test_path) train_data.permute( self.keys.index(self.permutate_key), self.num_per_batch) assert (train_label_set == test_label_set) data_batcher = DataBatcher(train_data, test_data, train_label_set) data_batcher.num_per_batch = self.num_per_batch data_batcher.create_batches_and_list( self.output_path, self.train_list_name, self.test_list_name, self.label_set_name) self.num_classes = len(train_label_set.keys()) self.create_meta_file(train_data) return out_path
apache-2.0
-5,976,977,793,880,472,000
35.323204
112
0.599437
false
erikr/django
django/db/migrations/operations/models.py
12
33007
from __future__ import unicode_literals from django.db import models from django.db.migrations.operations.base import Operation from django.db.migrations.state import ModelState from django.db.models.options import normalize_together from django.utils import six from django.utils.functional import cached_property from .fields import ( AddField, AlterField, FieldOperation, RemoveField, RenameField, ) def _check_for_duplicates(arg_name, objs): used_vals = set() for val in objs: if val in used_vals: raise ValueError( "Found duplicate value %s in CreateModel %s argument." % (val, arg_name) ) used_vals.add(val) class ModelOperation(Operation): def __init__(self, name): self.name = name @cached_property def name_lower(self): return self.name.lower() def references_model(self, name, app_label=None): return name.lower() == self.name_lower def reduce(self, operation, in_between, app_label=None): return ( super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or not operation.references_model(self.name, app_label) ) class CreateModel(ModelOperation): """ Create a model's table. """ serialization_expand_args = ['fields', 'options', 'managers'] def __init__(self, name, fields, options=None, bases=None, managers=None): self.fields = fields self.options = options or {} self.bases = bases or (models.Model,) self.managers = managers or [] super(CreateModel, self).__init__(name) # Sanity-check that there are no duplicated field names, bases, or # manager names _check_for_duplicates('fields', (name for name, _ in self.fields)) _check_for_duplicates('bases', ( base._meta.label_lower if hasattr(base, '_meta') else base.lower() if isinstance(base, six.string_types) else base for base in self.bases )) _check_for_duplicates('managers', (name for name, _ in self.managers)) def deconstruct(self): kwargs = { 'name': self.name, 'fields': self.fields, } if self.options: kwargs['options'] = self.options if self.bases and self.bases != (models.Model,): kwargs['bases'] = self.bases if self.managers and self.managers != [('objects', models.Manager())]: kwargs['managers'] = self.managers return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): state.add_model(ModelState( app_label, self.name, list(self.fields), dict(self.options), tuple(self.bases), list(self.managers), )) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.create_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.delete_model(model) def describe(self): return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name) def references_model(self, name, app_label=None): name_lower = name.lower() if name_lower == self.name_lower: return True # Check we didn't inherit from the model models_to_check = [ base for base in self.bases if base is not models.Model and isinstance(base, (models.base.ModelBase, six.string_types)) ] # Check we have no FKs/M2Ms with it for fname, field in self.fields: if field.remote_field: models_to_check.append(field.remote_field.model) # Now go over all the models and check against them for model in models_to_check: model_app_label, model_name = self.model_to_key(model) if model_name.lower() == name_lower: if app_label is None or not model_app_label or model_app_label == app_label: return True return False def model_to_key(self, model): """ Take either a model class or an "app_label.ModelName" string and return (app_label, object_name). """ if isinstance(model, six.string_types): return model.split(".", 1) else: return model._meta.app_label, model._meta.object_name def reduce(self, operation, in_between, app_label=None): if (isinstance(operation, DeleteModel) and self.name_lower == operation.name_lower and not self.options.get("proxy", False)): return [] elif isinstance(operation, RenameModel) and self.name_lower == operation.old_name_lower: return [ CreateModel( operation.new_name, fields=self.fields, options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower: if isinstance(operation, AddField): # Don't allow optimizations of FKs through models they reference if hasattr(operation.field, "remote_field") and operation.field.remote_field: for between in in_between: # Check that it doesn't point to the model app_label, object_name = self.model_to_key(operation.field.remote_field.model) if between.references_model(object_name, app_label): return False # Check that it's not through the model if getattr(operation.field.remote_field, "through", None): app_label, object_name = self.model_to_key(operation.field.remote_field.through) if between.references_model(object_name, app_label): return False return [ CreateModel( self.name, fields=self.fields + [(operation.name, operation.field)], options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, AlterField): return [ CreateModel( self.name, fields=[ (n, operation.field if n == operation.name else v) for n, v in self.fields ], options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, RemoveField): return [ CreateModel( self.name, fields=[ (n, v) for n, v in self.fields if n.lower() != operation.name_lower ], options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, RenameField): return [ CreateModel( self.name, fields=[ (operation.new_name if n == operation.old_name else n, v) for n, v in self.fields ], options=self.options, bases=self.bases, managers=self.managers, ), ] return super(CreateModel, self).reduce(operation, in_between, app_label=app_label) class DeleteModel(ModelOperation): """ Drops a model's table. """ def deconstruct(self): kwargs = { 'name': self.name, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): state.remove_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.delete_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.create_model(model) def describe(self): return "Delete model %s" % (self.name, ) class RenameModel(ModelOperation): """ Renames a model. """ def __init__(self, old_name, new_name): self.old_name = old_name self.new_name = new_name super(RenameModel, self).__init__(old_name) @cached_property def old_name_lower(self): return self.old_name.lower() @cached_property def new_name_lower(self): return self.new_name.lower() def deconstruct(self): kwargs = { 'old_name': self.old_name, 'new_name': self.new_name, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): # In cases where state doesn't have rendered apps, prevent subsequent # reload_model() calls from rendering models for performance # reasons. This method should be refactored to avoid relying on # state.apps (#27310). reset_apps = 'apps' not in state.__dict__ apps = state.apps model = apps.get_model(app_label, self.old_name) model._meta.apps = apps # Get all of the related objects we need to repoint all_related_objects = ( f for f in model._meta.get_fields(include_hidden=True) if f.auto_created and not f.concrete and (not f.hidden or f.many_to_many) ) if reset_apps: del state.__dict__['apps'] # Rename the model state.models[app_label, self.new_name_lower] = state.models[app_label, self.old_name_lower] state.models[app_label, self.new_name_lower].name = self.new_name state.remove_model(app_label, self.old_name_lower) # Repoint the FKs and M2Ms pointing to us for related_object in all_related_objects: if related_object.model is not model: # The model being renamed does not participate in this relation # directly. Rather, a superclass does. continue # Use the new related key for self referential related objects. if related_object.related_model == model: related_key = (app_label, self.new_name_lower) else: related_key = ( related_object.related_model._meta.app_label, related_object.related_model._meta.model_name, ) new_fields = [] for name, field in state.models[related_key].fields: if name == related_object.field.name: field = field.clone() field.remote_field.model = "%s.%s" % (app_label, self.new_name) new_fields.append((name, field)) state.models[related_key].fields = new_fields state.reload_model(*related_key) # Repoint M2Ms with through pointing to us related_models = { f.remote_field.model for f in model._meta.fields if getattr(f.remote_field, 'model', None) } model_name = '%s.%s' % (app_label, self.old_name) for related_model in related_models: if related_model == model: related_key = (app_label, self.new_name_lower) else: related_key = (related_model._meta.app_label, related_model._meta.model_name) new_fields = [] changed = False for name, field in state.models[related_key].fields: if field.is_relation and field.many_to_many and field.remote_field.through == model_name: field = field.clone() field.remote_field.through = '%s.%s' % (app_label, self.new_name) changed = True new_fields.append((name, field)) if changed: state.models[related_key].fields = new_fields state.reload_model(*related_key) state.reload_model(app_label, self.new_name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.new_name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.old_name) # Move the main table schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Alter the fields pointing to us for related_object in old_model._meta.related_objects: if related_object.related_model == old_model: model = new_model related_key = (app_label, self.new_name_lower) else: model = related_object.related_model related_key = ( related_object.related_model._meta.app_label, related_object.related_model._meta.model_name, ) to_field = to_state.apps.get_model( *related_key )._meta.get_field(related_object.field.name) schema_editor.alter_field( model, related_object.field, to_field, ) # Rename M2M fields whose name is based on this model's name. fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many) for (old_field, new_field) in fields: # Skip self-referential fields as these are renamed above. if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created: continue # Rename the M2M table that's based on this model's name. old_m2m_model = old_field.remote_field.through new_m2m_model = new_field.remote_field.through schema_editor.alter_db_table( new_m2m_model, old_m2m_model._meta.db_table, new_m2m_model._meta.db_table, ) # Rename the column in the M2M table that's based on this # model's name. schema_editor.alter_field( new_m2m_model, old_m2m_model._meta.get_field(old_model._meta.model_name), new_m2m_model._meta.get_field(new_model._meta.model_name), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower self.new_name, self.old_name = self.old_name, self.new_name self.database_forwards(app_label, schema_editor, from_state, to_state) self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower self.new_name, self.old_name = self.old_name, self.new_name def references_model(self, name, app_label=None): return ( name.lower() == self.old_name_lower or name.lower() == self.new_name_lower ) def describe(self): return "Rename model %s to %s" % (self.old_name, self.new_name) def reduce(self, operation, in_between, app_label=None): if (isinstance(operation, RenameModel) and self.new_name_lower == operation.old_name_lower): return [ RenameModel( self.old_name, operation.new_name, ), ] # Skip `ModelOperation.reduce` as we want to run `references_model` # against self.new_name. return ( super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or not operation.references_model(self.new_name, app_label) ) class AlterModelTable(ModelOperation): """ Renames a model's table """ def __init__(self, name, table): self.table = table super(AlterModelTable, self).__init__(name) def deconstruct(self): kwargs = { 'name': self.name, 'table': self.table, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): state.models[app_label, self.name_lower].options["db_table"] = self.table state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Rename M2M fields whose name is based on this model's db_table for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many): if new_field.remote_field.through._meta.auto_created: schema_editor.alter_db_table( new_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def describe(self): return "Rename table for %s to %s" % ( self.name, self.table if self.table is not None else "(default)" ) def reduce(self, operation, in_between, app_label=None): if isinstance(operation, (AlterModelTable, DeleteModel)) and self.name_lower == operation.name_lower: return [operation] return super(AlterModelTable, self).reduce(operation, in_between, app_label=app_label) class ModelOptionOperation(ModelOperation): def reduce(self, operation, in_between, app_label=None): if isinstance(operation, (self.__class__, DeleteModel)) and self.name_lower == operation.name_lower: return [operation] return super(ModelOptionOperation, self).reduce(operation, in_between, app_label=app_label) class FieldRelatedOptionOperation(ModelOptionOperation): def reduce(self, operation, in_between, app_label=None): if (isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower and not self.references_field(operation.model_name, operation.name)): return [operation, self] return super(FieldRelatedOptionOperation, self).reduce(operation, in_between, app_label=app_label) class AlterUniqueTogether(FieldRelatedOptionOperation): """ Changes the value of unique_together to the target one. Input value of unique_together must be a set of tuples. """ option_name = "unique_together" def __init__(self, name, unique_together): unique_together = normalize_together(unique_together) self.unique_together = set(tuple(cons) for cons in unique_together) super(AlterUniqueTogether, self).__init__(name) def deconstruct(self): kwargs = { 'name': self.name, 'unique_together': self.unique_together, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.options[self.option_name] = self.unique_together state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_unique_together( new_model, getattr(old_model._meta, self.option_name, set()), getattr(new_model._meta, self.option_name, set()), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def references_field(self, model_name, name, app_label=None): return ( self.references_model(model_name, app_label) and ( not self.unique_together or any((name in together) for together in self.unique_together) ) ) def describe(self): return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together or '')) class AlterIndexTogether(FieldRelatedOptionOperation): """ Changes the value of index_together to the target one. Input value of index_together must be a set of tuples. """ option_name = "index_together" def __init__(self, name, index_together): index_together = normalize_together(index_together) self.index_together = set(tuple(cons) for cons in index_together) super(AlterIndexTogether, self).__init__(name) def deconstruct(self): kwargs = { 'name': self.name, 'index_together': self.index_together, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.options[self.option_name] = self.index_together state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_index_together( new_model, getattr(old_model._meta, self.option_name, set()), getattr(new_model._meta, self.option_name, set()), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def references_field(self, model_name, name, app_label=None): return ( self.references_model(model_name, app_label) and ( not self.index_together or any((name in together) for together in self.index_together) ) ) def describe(self): return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together or '')) class AlterOrderWithRespectTo(FieldRelatedOptionOperation): """ Represents a change with the order_with_respect_to option. """ def __init__(self, name, order_with_respect_to): self.order_with_respect_to = order_with_respect_to super(AlterOrderWithRespectTo, self).__init__(name) def deconstruct(self): kwargs = { 'name': self.name, 'order_with_respect_to': self.order_with_respect_to, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.options['order_with_respect_to'] = self.order_with_respect_to state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.name) # Remove a field if we need to if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to: schema_editor.remove_field(from_model, from_model._meta.get_field("_order")) # Add a field if we need to (altering the column is untouched as # it's likely a rename) elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to: field = to_model._meta.get_field("_order") if not field.has_default(): field.default = 0 schema_editor.add_field( from_model, field, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.database_forwards(app_label, schema_editor, from_state, to_state) def references_field(self, model_name, name, app_label=None): return ( self.references_model(model_name, app_label) and ( self.order_with_respect_to is None or name == self.order_with_respect_to ) ) def describe(self): return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to) class AlterModelOptions(ModelOptionOperation): """ Sets new model options that don't directly affect the database schema (like verbose_name, permissions, ordering). Python code in migrations may still need them. """ # Model options we want to compare and preserve in an AlterModelOptions op ALTER_OPTION_KEYS = [ "base_manager_name", "default_manager_name", "get_latest_by", "managed", "ordering", "permissions", "default_permissions", "select_on_save", "verbose_name", "verbose_name_plural", ] def __init__(self, name, options): self.options = options super(AlterModelOptions, self).__init__(name) def deconstruct(self): kwargs = { 'name': self.name, 'options': self.options, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.options = dict(model_state.options) model_state.options.update(self.options) for key in self.ALTER_OPTION_KEYS: if key not in self.options and key in model_state.options: del model_state.options[key] state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass def describe(self): return "Change Meta options on %s" % (self.name, ) class AlterModelManagers(ModelOptionOperation): """ Alters the model's managers """ serialization_expand_args = ['managers'] def __init__(self, name, managers): self.managers = managers super(AlterModelManagers, self).__init__(name) def deconstruct(self): return ( self.__class__.__name__, [self.name, self.managers], {} ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.managers = list(self.managers) state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass def describe(self): return "Change managers on %s" % (self.name, ) class IndexOperation(Operation): option_name = 'indexes' @cached_property def model_name_lower(self): return self.model_name.lower() class AddIndex(IndexOperation): """ Add an index on a model. """ def __init__(self, model_name, index): self.model_name = model_name if not index.name: raise ValueError( "Indexes passed to AddIndex operations require a name " "argument. %r doesn't have one." % index ) self.index = index def state_forwards(self, app_label, state): model_state = state.models[app_label, self.model_name_lower] model_state.options[self.option_name].append(self.index) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.add_index(model, self.index) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.remove_index(model, self.index) def deconstruct(self): kwargs = { 'model_name': self.model_name, 'index': self.index, } return ( self.__class__.__name__, [], kwargs, ) def describe(self): return 'Create index %s on field(s) %s of model %s' % ( self.index.name, ', '.join(self.index.fields), self.model_name, ) class RemoveIndex(IndexOperation): """ Remove an index from a model. """ def __init__(self, model_name, name): self.model_name = model_name self.name = name def state_forwards(self, app_label, state): model_state = state.models[app_label, self.model_name_lower] indexes = model_state.options[self.option_name] model_state.options[self.option_name] = [idx for idx in indexes if idx.name != self.name] def database_forwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): from_model_state = from_state.models[app_label, self.model_name_lower] index = from_model_state.get_index_by_name(self.name) schema_editor.remove_index(model, index) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): to_model_state = to_state.models[app_label, self.model_name_lower] index = to_model_state.get_index_by_name(self.name) schema_editor.add_index(model, index) def deconstruct(self): kwargs = { 'model_name': self.model_name, 'name': self.name, } return ( self.__class__.__name__, [], kwargs, ) def describe(self): return 'Remove index %s from %s' % (self.name, self.model_name)
bsd-3-clause
-6,790,830,960,815,898,000
37.604678
119
0.570879
false
deniszgonjanin/ckanext-geojsonview
ckanext/geojsonview/plugin.py
1
1204
import ckan.plugins as plugins import ckan.plugins.toolkit as toolkit import ckanext.resourceproxy.plugin as proxy import ckan.lib.datapreview as datapreview from ckan.common import json class GeojsonviewPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer, inherit=True) plugins.implements(plugins.IResourceView, inherit=True) # IConfigurer def update_config(self, config_): toolkit.add_template_directory(config_, 'templates') toolkit.add_public_directory(config_, 'public') toolkit.add_resource('public', 'ckanext-geojsonview') # IResourceView def info(self): return { 'name': 'geojson_view', 'title': 'Map View', 'icon': 'globe', 'iframed': True } def setup_template_variables(self, context, data_dict): proxified_url = proxy.get_proxified_resource_url(data_dict) return { 'proxied_url': json.dumps(proxified_url) } def can_view(self, data_dict): return data_dict['resource'].get('format', '').lower() == 'geojson' def view_template(self, context, data_dict): return 'dataviewer/geojsonview.html'
agpl-3.0
114,481,911,625,623,890
30.684211
75
0.656146
false
cs591B1-Project/Social-Media-Impact-on-Stock-Market-and-Price
data/13 Honeywell/parseJSON.py
26
1412
def getSocialData(post): # Get Thread Object threadObject = post["thread"] domain_rank = threadObject["domain_rank"] #domain_rank #print 'domain_rank:' + str(domain_rank) socialObject = threadObject["social"] #social data object facebookData = socialObject["facebook"] #facebook data #print 'facebook data:' + str(facebookData["likes"]) + ', ' + str(facebookData["comments"]) + ', ' + str(facebookData["shares"]) fb_likes = facebookData["likes"] fb_comments = facebookData["comments"] fb_shares = facebookData["shares"] gplusData = socialObject["gplus"] #gplus data #print 'gplus data:' + str(gplusData["shares"]) g_shares = gplusData["shares"] pinterestData = socialObject["pinterest"] #pinterest data #print 'pinterest data:' + str(pinterestData["shares"]) pin_shares = pinterestData["shares"] linkedinData = socialObject["linkedin"] #linkedin data #print 'linked data:' + str(linkedinData["shares"]) linkedin_shares = linkedinData["shares"] stumbleduponData= socialObject["stumbledupon"] #print 'lstumbleduponData:' + str(stumbleduponData["shares"]) su_shares = stumbleduponData["shares"] vkData = socialObject["vk"] #print 'vkData:' + str(vkData["shares"]) vk_shares = vkData["shares"] social_impact = (fb_likes + fb_comments + fb_shares + g_shares + pin_shares + linkedin_shares + su_shares + vk_shares) #print str(social_impact) return social_impact
mit
6,337,773,367,350,701,000
34.325
129
0.706799
false
mbareta/edx-platform-ft
common/lib/calc/calc/tests/test_preview.py
257
8723
# -*- coding: utf-8 -*- """ Unit tests for preview.py """ import unittest from calc import preview import pyparsing class LatexRenderedTest(unittest.TestCase): """ Test the initializing code for LatexRendered. Specifically that it stores the correct data and handles parens well. """ def test_simple(self): """ Test that the data values are stored without changing. """ math = 'x^2' obj = preview.LatexRendered(math, tall=True) self.assertEquals(obj.latex, math) self.assertEquals(obj.sans_parens, math) self.assertEquals(obj.tall, True) def _each_parens(self, with_parens, math, parens, tall=False): """ Helper method to test the way parens are wrapped. """ obj = preview.LatexRendered(math, parens=parens, tall=tall) self.assertEquals(obj.latex, with_parens) self.assertEquals(obj.sans_parens, math) self.assertEquals(obj.tall, tall) def test_parens(self): """ Test curvy parens. """ self._each_parens('(x+y)', 'x+y', '(') def test_brackets(self): """ Test brackets. """ self._each_parens('[x+y]', 'x+y', '[') def test_squiggles(self): """ Test curly braces. """ self._each_parens(r'\{x+y\}', 'x+y', '{') def test_parens_tall(self): """ Test curvy parens with the tall parameter. """ self._each_parens(r'\left(x^y\right)', 'x^y', '(', tall=True) def test_brackets_tall(self): """ Test brackets, also tall. """ self._each_parens(r'\left[x^y\right]', 'x^y', '[', tall=True) def test_squiggles_tall(self): """ Test tall curly braces. """ self._each_parens(r'\left\{x^y\right\}', 'x^y', '{', tall=True) def test_bad_parens(self): """ Check that we get an error with invalid parens. """ with self.assertRaisesRegexp(Exception, 'Unknown parenthesis'): preview.LatexRendered('x^2', parens='not parens') class LatexPreviewTest(unittest.TestCase): """ Run integrative tests for `latex_preview`. All functionality was tested `RenderMethodsTest`, but see if it combines all together correctly. """ def test_no_input(self): """ With no input (including just whitespace), see that no error is thrown. """ self.assertEquals('', preview.latex_preview('')) self.assertEquals('', preview.latex_preview(' ')) self.assertEquals('', preview.latex_preview(' \t ')) def test_number_simple(self): """ Simple numbers should pass through. """ self.assertEquals(preview.latex_preview('3.1415'), '3.1415') def test_number_suffix(self): """ Suffixes should be escaped. """ self.assertEquals(preview.latex_preview('1.618k'), r'1.618\text{k}') def test_number_sci_notation(self): """ Numbers with scientific notation should display nicely """ self.assertEquals( preview.latex_preview('6.0221413E+23'), r'6.0221413\!\times\!10^{+23}' ) self.assertEquals( preview.latex_preview('-6.0221413E+23'), r'-6.0221413\!\times\!10^{+23}' ) def test_number_sci_notation_suffix(self): """ Test numbers with both of these. """ self.assertEquals( preview.latex_preview('6.0221413E+23k'), r'6.0221413\!\times\!10^{+23}\text{k}' ) self.assertEquals( preview.latex_preview('-6.0221413E+23k'), r'-6.0221413\!\times\!10^{+23}\text{k}' ) def test_variable_simple(self): """ Simple valid variables should pass through. """ self.assertEquals(preview.latex_preview('x', variables=['x']), 'x') def test_greek(self): """ Variable names that are greek should be formatted accordingly. """ self.assertEquals(preview.latex_preview('pi'), r'\pi') def test_variable_subscript(self): """ Things like 'epsilon_max' should display nicely """ self.assertEquals( preview.latex_preview('epsilon_max', variables=['epsilon_max']), r'\epsilon_{max}' ) def test_function_simple(self): """ Valid function names should be escaped. """ self.assertEquals( preview.latex_preview('f(3)', functions=['f']), r'\text{f}(3)' ) def test_function_tall(self): r""" Functions surrounding a tall element should have \left, \right """ self.assertEquals( preview.latex_preview('f(3^2)', functions=['f']), r'\text{f}\left(3^{2}\right)' ) def test_function_sqrt(self): """ Sqrt function should be handled specially. """ self.assertEquals(preview.latex_preview('sqrt(3)'), r'\sqrt{3}') def test_function_log10(self): """ log10 function should be handled specially. """ self.assertEquals(preview.latex_preview('log10(3)'), r'\log_{10}(3)') def test_function_log2(self): """ log2 function should be handled specially. """ self.assertEquals(preview.latex_preview('log2(3)'), r'\log_2(3)') def test_power_simple(self): """ Powers should wrap the elements with braces correctly. """ self.assertEquals(preview.latex_preview('2^3^4'), '2^{3^{4}}') def test_power_parens(self): """ Powers should ignore the parenthesis of the last math. """ self.assertEquals(preview.latex_preview('2^3^(4+5)'), '2^{3^{4+5}}') def test_parallel(self): r""" Parallel items should combine with '\|'. """ self.assertEquals(preview.latex_preview('2||3'), r'2\|3') def test_product_mult_only(self): r""" Simple products should combine with a '\cdot'. """ self.assertEquals(preview.latex_preview('2*3'), r'2\cdot 3') def test_product_big_frac(self): """ Division should combine with '\frac'. """ self.assertEquals( preview.latex_preview('2*3/4/5'), r'\frac{2\cdot 3}{4\cdot 5}' ) def test_product_single_frac(self): """ Division should ignore parens if they are extraneous. """ self.assertEquals( preview.latex_preview('(2+3)/(4+5)'), r'\frac{2+3}{4+5}' ) def test_product_keep_going(self): """ Complex products/quotients should split into many '\frac's when needed. """ self.assertEquals( preview.latex_preview('2/3*4/5*6'), r'\frac{2}{3}\cdot \frac{4}{5}\cdot 6' ) def test_sum(self): """ Sums should combine its elements. """ # Use 'x' as the first term (instead of, say, '1'), so it can't be # interpreted as a negative number. self.assertEquals( preview.latex_preview('-x+2-3+4', variables=['x']), '-x+2-3+4' ) def test_sum_tall(self): """ A complicated expression should not hide the tallness. """ self.assertEquals( preview.latex_preview('(2+3^2)'), r'\left(2+3^{2}\right)' ) def test_complicated(self): """ Given complicated input, ensure that exactly the correct string is made. """ self.assertEquals( preview.latex_preview('11*f(x)+x^2*(3||4)/sqrt(pi)'), r'11\cdot \text{f}(x)+\frac{x^{2}\cdot (3\|4)}{\sqrt{\pi}}' ) self.assertEquals( preview.latex_preview('log10(1+3/4/Cos(x^2)*(x+1))', case_sensitive=True), (r'\log_{10}\left(1+\frac{3}{4\cdot \text{Cos}\left(x^{2}\right)}' r'\cdot (x+1)\right)') ) def test_syntax_errors(self): """ Test a lot of math strings that give syntax errors Rather than have a lot of self.assertRaises, make a loop and keep track of those that do not throw a `ParseException`, and assert at the end. """ bad_math_list = [ '11+', '11*', 'f((x)', 'sqrt(x^)', '3f(x)', # Not 3*f(x) '3|4', '3|||4' ] bad_exceptions = {} for math in bad_math_list: try: preview.latex_preview(math) except pyparsing.ParseException: pass # This is what we were expecting. (not excepting :P) except Exception as error: # pragma: no cover bad_exceptions[math] = error else: # pragma: no cover # If there is no exception thrown, this is a problem bad_exceptions[math] = None self.assertEquals({}, bad_exceptions)
agpl-3.0
3,517,765,545,005,773,300
33.752988
80
0.561848
false
nicanor-romero/OctoPrint
src/octoprint/printer/standard.py
7
31006
# coding=utf-8 """ This module holds the standard implementation of the :class:`PrinterInterface` and it helpers. """ from __future__ import absolute_import __author__ = "Gina Häußge <[email protected]>" __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' __copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License" import copy import logging import os import threading import time from octoprint import util as util from octoprint.events import eventManager, Events from octoprint.filemanager import FileDestinations from octoprint.plugin import plugin_manager, ProgressPlugin from octoprint.printer import PrinterInterface, PrinterCallback, UnknownScript from octoprint.printer.estimation import TimeEstimationHelper from octoprint.settings import settings from octoprint.util import comm as comm from octoprint.util import InvariantContainer class Printer(PrinterInterface, comm.MachineComPrintCallback): """ Default implementation of the :class:`PrinterInterface`. Manages the communication layer object and registers itself with it as a callback to react to changes on the communication layer. """ def __init__(self, fileManager, analysisQueue, printerProfileManager): from collections import deque self._logger = logging.getLogger(__name__) self._analysisQueue = analysisQueue self._fileManager = fileManager self._printerProfileManager = printerProfileManager # state # TODO do we really need to hold the temperature here? self._temp = None self._bedTemp = None self._targetTemp = None self._targetBedTemp = None self._temps = TemperatureHistory(cutoff=settings().getInt(["temperature", "cutoff"])*60) self._tempBacklog = [] self._latestMessage = None self._messages = deque([], 300) self._messageBacklog = [] self._latestLog = None self._log = deque([], 300) self._logBacklog = [] self._state = None self._currentZ = None self._progress = None self._printTime = None self._printTimeLeft = None self._printAfterSelect = False # sd handling self._sdPrinting = False self._sdStreaming = False self._sdFilelistAvailable = threading.Event() self._streamingFinishedCallback = None self._selectedFile = None self._timeEstimationData = None # comm self._comm = None # callbacks self._callbacks = [] # progress plugins self._lastProgressReport = None self._progressPlugins = plugin_manager().get_implementations(ProgressPlugin) self._stateMonitor = StateMonitor( interval=0.5, on_update=self._sendCurrentDataCallbacks, on_add_temperature=self._sendAddTemperatureCallbacks, on_add_log=self._sendAddLogCallbacks, on_add_message=self._sendAddMessageCallbacks ) self._stateMonitor.reset( state={"text": self.get_state_string(), "flags": self._getStateFlags()}, job_data={ "file": { "name": None, "size": None, "origin": None, "date": None }, "estimatedPrintTime": None, "lastPrintTime": None, "filament": { "length": None, "volume": None } }, progress={"completion": None, "filepos": None, "printTime": None, "printTimeLeft": None}, current_z=None ) eventManager().subscribe(Events.METADATA_ANALYSIS_FINISHED, self._on_event_MetadataAnalysisFinished) eventManager().subscribe(Events.METADATA_STATISTICS_UPDATED, self._on_event_MetadataStatisticsUpdated) #~~ handling of PrinterCallbacks def register_callback(self, callback): if not isinstance(callback, PrinterCallback): self._logger.warn("Registering an object as printer callback which doesn't implement the PrinterCallback interface") self._callbacks.append(callback) self._sendInitialStateUpdate(callback) def unregister_callback(self, callback): if callback in self._callbacks: self._callbacks.remove(callback) def _sendAddTemperatureCallbacks(self, data): for callback in self._callbacks: try: callback.on_printer_add_temperature(data) except: self._logger.exception("Exception while adding temperature data point") def _sendAddLogCallbacks(self, data): for callback in self._callbacks: try: callback.on_printer_add_log(data) except: self._logger.exception("Exception while adding communication log entry") def _sendAddMessageCallbacks(self, data): for callback in self._callbacks: try: callback.on_printer_add_message(data) except: self._logger.exception("Exception while adding printer message") def _sendCurrentDataCallbacks(self, data): for callback in self._callbacks: try: callback.on_printer_send_current_data(copy.deepcopy(data)) except: self._logger.exception("Exception while pushing current data") #~~ callback from metadata analysis event def _on_event_MetadataAnalysisFinished(self, event, data): if self._selectedFile: self._setJobData(self._selectedFile["filename"], self._selectedFile["filesize"], self._selectedFile["sd"]) def _on_event_MetadataStatisticsUpdated(self, event, data): self._setJobData(self._selectedFile["filename"], self._selectedFile["filesize"], self._selectedFile["sd"]) #~~ progress plugin reporting def _reportPrintProgressToPlugins(self, progress): if not progress or not self._selectedFile or not "sd" in self._selectedFile or not "filename" in self._selectedFile: return storage = "sdcard" if self._selectedFile["sd"] else "local" filename = self._selectedFile["filename"] def call_plugins(storage, filename, progress): for plugin in self._progressPlugins: try: plugin.on_print_progress(storage, filename, progress) except: self._logger.exception("Exception while sending print progress to plugin %s" % plugin._identifier) thread = threading.Thread(target=call_plugins, args=(storage, filename, progress)) thread.daemon = False thread.start() #~~ PrinterInterface implementation def connect(self, port=None, baudrate=None, profile=None): """ Connects to the printer. If port and/or baudrate is provided, uses these settings, otherwise autodetection will be attempted. """ if self._comm is not None: self._comm.close() self._printerProfileManager.select(profile) self._comm = comm.MachineCom(port, baudrate, callbackObject=self, printerProfileManager=self._printerProfileManager) def disconnect(self): """ Closes the connection to the printer. """ if self._comm is not None: self._comm.close() self._comm = None self._printerProfileManager.deselect() eventManager().fire(Events.DISCONNECTED) def get_transport(self): if self._comm is None: return None return self._comm.getTransport() getTransport = util.deprecated("getTransport has been renamed to get_transport", since="1.2.0-dev-590", includedoc="Replaced by :func:`get_transport`") def fake_ack(self): if self._comm is None: return self._comm.fakeOk() def commands(self, commands): """ Sends one or more gcode commands to the printer. """ if self._comm is None: return if not isinstance(commands, (list, tuple)): commands = [commands] for command in commands: self._comm.sendCommand(command) def script(self, name, context=None): if self._comm is None: return if name is None or not name: raise ValueError("name must be set") result = self._comm.sendGcodeScript(name, replacements=context) if not result: raise UnknownScript(name) def jog(self, axis, amount): if not isinstance(axis, (str, unicode)): raise ValueError("axis must be a string: {axis}".format(axis=axis)) axis = axis.lower() if not axis in PrinterInterface.valid_axes: raise ValueError("axis must be any of {axes}: {axis}".format(axes=", ".join(PrinterInterface.valid_axes), axis=axis)) if not isinstance(amount, (int, long, float)): raise ValueError("amount must be a valid number: {amount}".format(amount=amount)) printer_profile = self._printerProfileManager.get_current_or_default() movement_speed = printer_profile["axes"][axis]["speed"] self.commands(["G91", "G1 %s%.4f F%d" % (axis.upper(), amount, movement_speed), "G90"]) def home(self, axes): if not isinstance(axes, (list, tuple)): if isinstance(axes, (str, unicode)): axes = [axes] else: raise ValueError("axes is neither a list nor a string: {axes}".format(axes=axes)) validated_axes = filter(lambda x: x in PrinterInterface.valid_axes, map(lambda x: x.lower(), axes)) if len(axes) != len(validated_axes): raise ValueError("axes contains invalid axes: {axes}".format(axes=axes)) self.commands(["G91", "G28 %s" % " ".join(map(lambda x: "%s0" % x.upper(), validated_axes)), "G90"]) def extrude(self, amount): if not isinstance(amount, (int, long, float)): raise ValueError("amount must be a valid number: {amount}".format(amount=amount)) printer_profile = self._printerProfileManager.get_current_or_default() extrusion_speed = printer_profile["axes"]["e"]["speed"] self.commands(["G91", "G1 E%s F%d" % (amount, extrusion_speed), "G90"]) def change_tool(self, tool): if not PrinterInterface.valid_tool_regex.match(tool): raise ValueError("tool must match \"tool[0-9]+\": {tool}".format(tool=tool)) tool_num = int(tool[len("tool"):]) self.commands("T%d" % tool_num) def set_temperature(self, heater, value): if not PrinterInterface.valid_heater_regex.match(heater): raise ValueError("heater must match \"tool[0-9]+\" or \"bed\": {heater}".format(type=heater)) if not isinstance(value, (int, long, float)) or value < 0: raise ValueError("value must be a valid number >= 0: {value}".format(value=value)) if heater.startswith("tool"): printer_profile = self._printerProfileManager.get_current_or_default() extruder_count = printer_profile["extruder"]["count"] if extruder_count > 1: toolNum = int(heater[len("tool"):]) self.commands("M104 T%d S%f" % (toolNum, value)) else: self.commands("M104 S%f" % value) elif heater == "bed": self.commands("M140 S%f" % value) def set_temperature_offset(self, offsets=None): if offsets is None: offsets = dict() if not isinstance(offsets, dict): raise ValueError("offsets must be a dict") validated_keys = filter(lambda x: PrinterInterface.valid_heater_regex.match(x), offsets.keys()) validated_values = filter(lambda x: isinstance(x, (int, long, float)), offsets.values()) if len(validated_keys) != len(offsets): raise ValueError("offsets contains invalid keys: {offsets}".format(offsets=offsets)) if len(validated_values) != len(offsets): raise ValueError("offsets contains invalid values: {offsets}".format(offsets=offsets)) if self._comm is None: return self._comm.setTemperatureOffset(offsets) self._stateMonitor.set_temp_offsets(offsets) def _convert_rate_value(self, factor, min=0, max=200): if not isinstance(factor, (int, float, long)): raise ValueError("factor is not a number") if isinstance(factor, float): factor = int(factor * 100.0) if factor < min or factor > max: raise ValueError("factor must be a value between %f and %f" % (min, max)) return factor def feed_rate(self, factor): factor = self._convert_rate_value(factor, min=50, max=200) self.commands("M220 S%d" % factor) def flow_rate(self, factor): factor = self._convert_rate_value(factor, min=75, max=125) self.commands("M221 S%d" % factor) def select_file(self, path, sd, printAfterSelect=False): if self._comm is None or (self._comm.isBusy() or self._comm.isStreaming()): self._logger.info("Cannot load file: printer not connected or currently busy") return self._printAfterSelect = printAfterSelect self._comm.selectFile("/" + path if sd else path, sd) self._setProgressData(0, None, None, None) self._setCurrentZ(None) def unselect_file(self): if self._comm is not None and (self._comm.isBusy() or self._comm.isStreaming()): return self._comm.unselectFile() self._setProgressData(0, None, None, None) self._setCurrentZ(None) def start_print(self): """ Starts the currently loaded print job. Only starts if the printer is connected and operational, not currently printing and a printjob is loaded """ if self._comm is None or not self._comm.isOperational() or self._comm.isPrinting(): return if self._selectedFile is None: return rolling_window = None threshold = None countdown = None if self._selectedFile["sd"]: # we are interesting in a rolling window of roughly the last 15s, so the number of entries has to be derived # by that divided by the sd status polling interval rolling_window = 15 / settings().get(["serial", "timeout", "sdStatus"]) # we are happy if the average of the estimates stays within 60s of the prior one threshold = 60 # we are happy when one rolling window has been stable countdown = rolling_window self._timeEstimationData = TimeEstimationHelper(rolling_window=rolling_window, threshold=threshold, countdown=countdown) self._lastProgressReport = None self._setProgressData(0, None, None, None) self._setCurrentZ(None) self._comm.startPrint() def toggle_pause_print(self): """ Pause the current printjob. """ if self._comm is None: return self._comm.setPause(not self._comm.isPaused()) def cancel_print(self): """ Cancel the current printjob. """ if self._comm is None: return self._comm.cancelPrint() # reset progress, height, print time self._setCurrentZ(None) self._setProgressData(None, None, None, None) # mark print as failure if self._selectedFile is not None: self._fileManager.log_print(FileDestinations.SDCARD if self._selectedFile["sd"] else FileDestinations.LOCAL, self._selectedFile["filename"], time.time(), self._comm.getPrintTime(), False, self._printerProfileManager.get_current_or_default()["id"]) payload = { "file": self._selectedFile["filename"], "origin": FileDestinations.LOCAL } if self._selectedFile["sd"]: payload["origin"] = FileDestinations.SDCARD eventManager().fire(Events.PRINT_FAILED, payload) def get_state_string(self): """ Returns a human readable string corresponding to the current communication state. """ if self._comm is None: return "Offline" else: return self._comm.getStateString() def get_current_data(self): return self._stateMonitor.get_current_data() def get_current_job(self): currentData = self._stateMonitor.get_current_data() return currentData["job"] def get_current_temperatures(self): if self._comm is not None: offsets = self._comm.getOffsets() else: offsets = dict() result = {} if self._temp is not None: for tool in self._temp.keys(): result["tool%d" % tool] = { "actual": self._temp[tool][0], "target": self._temp[tool][1], "offset": offsets[tool] if tool in offsets and offsets[tool] is not None else 0 } if self._bedTemp is not None: result["bed"] = { "actual": self._bedTemp[0], "target": self._bedTemp[1], "offset": offsets["bed"] if "bed" in offsets and offsets["bed"] is not None else 0 } return result def get_temperature_history(self): return self._temps def get_current_connection(self): if self._comm is None: return "Closed", None, None, None port, baudrate = self._comm.getConnection() printer_profile = self._printerProfileManager.get_current_or_default() return self._comm.getStateString(), port, baudrate, printer_profile def is_closed_or_error(self): return self._comm is None or self._comm.isClosedOrError() def is_operational(self): return self._comm is not None and self._comm.isOperational() def is_printing(self): return self._comm is not None and self._comm.isPrinting() def is_paused(self): return self._comm is not None and self._comm.isPaused() def is_error(self): return self._comm is not None and self._comm.isError() def is_ready(self): return self.is_operational() and not self._comm.isStreaming() def is_sd_ready(self): if not settings().getBoolean(["feature", "sdSupport"]) or self._comm is None: return False else: return self._comm.isSdReady() #~~ sd file handling def get_sd_files(self): if self._comm is None or not self._comm.isSdReady(): return [] return map(lambda x: (x[0][1:], x[1]), self._comm.getSdFiles()) def add_sd_file(self, filename, absolutePath, streamingFinishedCallback): if not self._comm or self._comm.isBusy() or not self._comm.isSdReady(): self._logger.error("No connection to printer or printer is busy") return self._streamingFinishedCallback = streamingFinishedCallback self.refresh_sd_files(blocking=True) existingSdFiles = map(lambda x: x[0], self._comm.getSdFiles()) remoteName = util.get_dos_filename(filename, existing_filenames=existingSdFiles, extension="gco") self._timeEstimationData = TimeEstimationHelper() self._comm.startFileTransfer(absolutePath, filename, "/" + remoteName) return remoteName def delete_sd_file(self, filename): if not self._comm or not self._comm.isSdReady(): return self._comm.deleteSdFile("/" + filename) def init_sd_card(self): if not self._comm or self._comm.isSdReady(): return self._comm.initSdCard() def release_sd_card(self): if not self._comm or not self._comm.isSdReady(): return self._comm.releaseSdCard() def refresh_sd_files(self, blocking=False): """ Refreshs the list of file stored on the SD card attached to printer (if available and printer communication available). Optional blocking parameter allows making the method block (max 10s) until the file list has been received (and can be accessed via self._comm.getSdFiles()). Defaults to an asynchronous operation. """ if not self._comm or not self._comm.isSdReady(): return self._sdFilelistAvailable.clear() self._comm.refreshSdFiles() if blocking: self._sdFilelistAvailable.wait(10000) #~~ state monitoring def _setCurrentZ(self, currentZ): self._currentZ = currentZ self._stateMonitor.set_current_z(self._currentZ) def _setState(self, state): self._state = state self._stateMonitor.set_state({"text": self.get_state_string(), "flags": self._getStateFlags()}) def _addLog(self, log): self._log.append(log) self._stateMonitor.add_log(log) def _addMessage(self, message): self._messages.append(message) self._stateMonitor.add_message(message) def _estimateTotalPrintTime(self, progress, printTime): if not progress or not printTime or not self._timeEstimationData: return None else: newEstimate = printTime / progress self._timeEstimationData.update(newEstimate) result = None if self._timeEstimationData.is_stable(): result = self._timeEstimationData.average_total_rolling return result def _setProgressData(self, progress, filepos, printTime, cleanedPrintTime): estimatedTotalPrintTime = self._estimateTotalPrintTime(progress, cleanedPrintTime) totalPrintTime = estimatedTotalPrintTime if self._selectedFile and "estimatedPrintTime" in self._selectedFile and self._selectedFile["estimatedPrintTime"]: statisticalTotalPrintTime = self._selectedFile["estimatedPrintTime"] if progress and cleanedPrintTime: if estimatedTotalPrintTime is None: totalPrintTime = statisticalTotalPrintTime else: if progress < 0.5: sub_progress = progress * 2 else: sub_progress = 1.0 totalPrintTime = (1 - sub_progress) * statisticalTotalPrintTime + sub_progress * estimatedTotalPrintTime self._progress = progress self._printTime = printTime self._printTimeLeft = totalPrintTime - cleanedPrintTime if (totalPrintTime is not None and cleanedPrintTime is not None) else None self._stateMonitor.set_progress({ "completion": self._progress * 100 if self._progress is not None else None, "filepos": filepos, "printTime": int(self._printTime) if self._printTime is not None else None, "printTimeLeft": int(self._printTimeLeft) if self._printTimeLeft is not None else None }) if progress: progress_int = int(progress * 100) if self._lastProgressReport != progress_int: self._lastProgressReport = progress_int self._reportPrintProgressToPlugins(progress_int) def _addTemperatureData(self, temp, bedTemp): currentTimeUtc = int(time.time()) data = { "time": currentTimeUtc } for tool in temp.keys(): data["tool%d" % tool] = { "actual": temp[tool][0], "target": temp[tool][1] } if bedTemp is not None and isinstance(bedTemp, tuple): data["bed"] = { "actual": bedTemp[0], "target": bedTemp[1] } self._temps.append(data) self._temp = temp self._bedTemp = bedTemp self._stateMonitor.add_temperature(data) def _setJobData(self, filename, filesize, sd): if filename is not None: if sd: path_in_storage = filename path_on_disk = None else: path_in_storage = self._fileManager.path_in_storage(FileDestinations.LOCAL, filename) path_on_disk = self._fileManager.path_on_disk(FileDestinations.LOCAL, filename) self._selectedFile = { "filename": path_in_storage, "filesize": filesize, "sd": sd, "estimatedPrintTime": None } else: self._selectedFile = None self._stateMonitor.set_job_data({ "file": { "name": None, "origin": None, "size": None, "date": None }, "estimatedPrintTime": None, "averagePrintTime": None, "lastPrintTime": None, "filament": None, }) return estimatedPrintTime = None lastPrintTime = None averagePrintTime = None date = None filament = None if path_on_disk: # Use a string for mtime because it could be float and the # javascript needs to exact match if not sd: date = int(os.stat(path_on_disk).st_ctime) try: fileData = self._fileManager.get_metadata(FileDestinations.SDCARD if sd else FileDestinations.LOCAL, path_on_disk) except: fileData = None if fileData is not None: if "analysis" in fileData: if estimatedPrintTime is None and "estimatedPrintTime" in fileData["analysis"]: estimatedPrintTime = fileData["analysis"]["estimatedPrintTime"] if "filament" in fileData["analysis"].keys(): filament = fileData["analysis"]["filament"] if "statistics" in fileData: printer_profile = self._printerProfileManager.get_current_or_default()["id"] if "averagePrintTime" in fileData["statistics"] and printer_profile in fileData["statistics"]["averagePrintTime"]: averagePrintTime = fileData["statistics"]["averagePrintTime"][printer_profile] if "lastPrintTime" in fileData["statistics"] and printer_profile in fileData["statistics"]["lastPrintTime"]: lastPrintTime = fileData["statistics"]["lastPrintTime"][printer_profile] if averagePrintTime is not None: self._selectedFile["estimatedPrintTime"] = averagePrintTime elif estimatedPrintTime is not None: # TODO apply factor which first needs to be tracked! self._selectedFile["estimatedPrintTime"] = estimatedPrintTime self._stateMonitor.set_job_data({ "file": { "name": path_in_storage, "origin": FileDestinations.SDCARD if sd else FileDestinations.LOCAL, "size": filesize, "date": date }, "estimatedPrintTime": estimatedPrintTime, "averagePrintTime": averagePrintTime, "lastPrintTime": lastPrintTime, "filament": filament, }) def _sendInitialStateUpdate(self, callback): try: data = self._stateMonitor.get_current_data() data.update({ "temps": list(self._temps), "logs": list(self._log), "messages": list(self._messages) }) callback.on_printer_send_initial_data(data) except Exception, err: import sys sys.stderr.write("ERROR: %s\n" % str(err)) pass def _getStateFlags(self): return { "operational": self.is_operational(), "printing": self.is_printing(), "closedOrError": self.is_closed_or_error(), "error": self.is_error(), "paused": self.is_paused(), "ready": self.is_ready(), "sdReady": self.is_sd_ready() } #~~ comm.MachineComPrintCallback implementation def on_comm_log(self, message): """ Callback method for the comm object, called upon log output. """ self._addLog(message) def on_comm_temperature_update(self, temp, bedTemp): self._addTemperatureData(temp, bedTemp) def on_comm_state_change(self, state): """ Callback method for the comm object, called if the connection state changes. """ oldState = self._state # forward relevant state changes to gcode manager if oldState == comm.MachineCom.STATE_PRINTING: if self._selectedFile is not None: if state == comm.MachineCom.STATE_CLOSED or state == comm.MachineCom.STATE_ERROR or state == comm.MachineCom.STATE_CLOSED_WITH_ERROR: self._fileManager.log_print(FileDestinations.SDCARD if self._selectedFile["sd"] else FileDestinations.LOCAL, self._selectedFile["filename"], time.time(), self._comm.getPrintTime(), False, self._printerProfileManager.get_current_or_default()["id"]) self._analysisQueue.resume() # printing done, put those cpu cycles to good use elif state == comm.MachineCom.STATE_PRINTING: self._analysisQueue.pause() # do not analyse files while printing elif state == comm.MachineCom.STATE_CLOSED or state == comm.MachineCom.STATE_CLOSED_WITH_ERROR: if self._comm is not None: self._comm = None self._setProgressData(0, None, None, None) self._setCurrentZ(None) self._setJobData(None, None, None) self._setState(state) def on_comm_message(self, message): """ Callback method for the comm object, called upon message exchanges via serial. Stores the message in the message buffer, truncates buffer to the last 300 lines. """ self._addMessage(message) def on_comm_progress(self): """ Callback method for the comm object, called upon any change in progress of the printjob. Triggers storage of new values for printTime, printTimeLeft and the current progress. """ self._setProgressData(self._comm.getPrintProgress(), self._comm.getPrintFilepos(), self._comm.getPrintTime(), self._comm.getCleanedPrintTime()) def on_comm_z_change(self, newZ): """ Callback method for the comm object, called upon change of the z-layer. """ oldZ = self._currentZ if newZ != oldZ: # we have to react to all z-changes, even those that might "go backward" due to a slicer's retraction or # anti-backlash-routines. Event subscribes should individually take care to filter out "wrong" z-changes eventManager().fire(Events.Z_CHANGE, {"new": newZ, "old": oldZ}) self._setCurrentZ(newZ) def on_comm_sd_state_change(self, sdReady): self._stateMonitor.set_state({"text": self.get_state_string(), "flags": self._getStateFlags()}) def on_comm_sd_files(self, files): eventManager().fire(Events.UPDATED_FILES, {"type": "gcode"}) self._sdFilelistAvailable.set() def on_comm_file_selected(self, filename, filesize, sd): self._setJobData(filename, filesize, sd) self._stateMonitor.set_state({"text": self.get_state_string(), "flags": self._getStateFlags()}) if self._printAfterSelect: self.start_print() def on_comm_print_job_done(self): self._fileManager.log_print(FileDestinations.SDCARD if self._selectedFile["sd"] else FileDestinations.LOCAL, self._selectedFile["filename"], time.time(), self._comm.getPrintTime(), True, self._printerProfileManager.get_current_or_default()["id"]) self._setProgressData(1.0, self._selectedFile["filesize"], self._comm.getPrintTime(), 0) self._stateMonitor.set_state({"text": self.get_state_string(), "flags": self._getStateFlags()}) def on_comm_file_transfer_started(self, filename, filesize): self._sdStreaming = True self._setJobData(filename, filesize, True) self._setProgressData(0.0, 0, 0, None) self._stateMonitor.set_state({"text": self.get_state_string(), "flags": self._getStateFlags()}) def on_comm_file_transfer_done(self, filename): self._sdStreaming = False if self._streamingFinishedCallback is not None: # in case of SD files, both filename and absolutePath are the same, so we set the (remote) filename for # both parameters self._streamingFinishedCallback(filename, filename, FileDestinations.SDCARD) self._setCurrentZ(None) self._setJobData(None, None, None) self._setProgressData(None, None, None, None) self._stateMonitor.set_state({"text": self.get_state_string(), "flags": self._getStateFlags()}) def on_comm_force_disconnect(self): self.disconnect() class StateMonitor(object): def __init__(self, interval=0.5, on_update=None, on_add_temperature=None, on_add_log=None, on_add_message=None): self._interval = interval self._update_callback = on_update self._on_add_temperature = on_add_temperature self._on_add_log = on_add_log self._on_add_message = on_add_message self._state = None self._job_data = None self._gcode_data = None self._sd_upload_data = None self._current_z = None self._progress = None self._offsets = {} self._change_event = threading.Event() self._state_lock = threading.Lock() self._last_update = time.time() self._worker = threading.Thread(target=self._work) self._worker.daemon = True self._worker.start() def reset(self, state=None, job_data=None, progress=None, current_z=None): self.set_state(state) self.set_job_data(job_data) self.set_progress(progress) self.set_current_z(current_z) def add_temperature(self, temperature): self._on_add_temperature(temperature) self._change_event.set() def add_log(self, log): self._on_add_log(log) self._change_event.set() def add_message(self, message): self._on_add_message(message) self._change_event.set() def set_current_z(self, current_z): self._current_z = current_z self._change_event.set() def set_state(self, state): with self._state_lock: self._state = state self._change_event.set() def set_job_data(self, job_data): self._job_data = job_data self._change_event.set() def set_progress(self, progress): self._progress = progress self._change_event.set() def set_temp_offsets(self, offsets): self._offsets = offsets self._change_event.set() def _work(self): while True: self._change_event.wait() with self._state_lock: now = time.time() delta = now - self._last_update additional_wait_time = self._interval - delta if additional_wait_time > 0: time.sleep(additional_wait_time) data = self.get_current_data() self._update_callback(data) self._last_update = time.time() self._change_event.clear() def get_current_data(self): return { "state": self._state, "job": self._job_data, "currentZ": self._current_z, "progress": self._progress, "offsets": self._offsets } class TemperatureHistory(InvariantContainer): def __init__(self, cutoff=30 * 60): def temperature_invariant(data): data.sort(key=lambda x: x["time"]) now = int(time.time()) return [item for item in data if item["time"] >= now - cutoff] InvariantContainer.__init__(self, guarantee_invariant=temperature_invariant)
agpl-3.0
-1,232,424,329,876,951,300
31.670179
252
0.708005
false
stevekuznetsov/ansible
test/units/modules/network/nxos/test_nxos_system.py
51
6189
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.compat.tests.mock import patch from ansible.modules.network.nxos import nxos_system from .nxos_module import TestNxosModule, load_fixture, set_module_args class TestNxosSystemModule(TestNxosModule): module = nxos_system def setUp(self): self.mock_get_config = patch('ansible.modules.network.nxos.nxos_system.get_config') self.get_config = self.mock_get_config.start() self.mock_load_config = patch('ansible.modules.network.nxos.nxos_system.load_config') self.load_config = self.mock_load_config.start() def tearDown(self): self.mock_get_config.stop() self.mock_load_config.stop() def load_fixtures(self, commands=None): self.get_config.return_value = load_fixture('nxos_system_config.cfg') self.load_config.return_value = None def test_nxos_system_hostname_changed(self): set_module_args(dict(hostname='foo')) commands = ['hostname foo'] self.execute_module(changed=True, commands=commands) def test_nxos_system_domain_lookup(self): set_module_args(dict(domain_lookup=True)) commands = ['ip domain-lookup'] self.execute_module(changed=True, commands=commands) def test_nxos_system_missing_vrf(self): domain_name = dict(name='example.com', vrf='example') set_module_args(dict(domain_name=domain_name)) self.execute_module(failed=True) def test_nxos_system_domain_name(self): set_module_args(dict(domain_name=['example.net'])) commands = ['no ip domain-name ansible.com', 'vrf context management', 'no ip domain-name eng.ansible.com', 'exit', 'ip domain-name example.net'] self.execute_module(changed=True, commands=commands) def test_nxos_system_domain_name_complex(self): domain_name = dict(name='example.net', vrf='management') set_module_args(dict(domain_name=[domain_name])) commands = ['no ip domain-name ansible.com', 'vrf context management', 'no ip domain-name eng.ansible.com', 'exit', 'vrf context management', 'ip domain-name example.net', 'exit'] self.execute_module(changed=True, commands=commands) def test_nxos_system_domain_search(self): set_module_args(dict(domain_search=['example.net'])) commands = ['vrf context management', 'no ip domain-list ansible.com', 'exit', 'vrf context management', 'no ip domain-list redhat.com', 'exit', 'no ip domain-list ansible.com', 'no ip domain-list redhat.com', 'ip domain-list example.net'] self.execute_module(changed=True, commands=commands) def test_nxos_system_domain_search_complex(self): domain_search = dict(name='example.net', vrf='management') set_module_args(dict(domain_search=[domain_search])) commands = ['vrf context management', 'no ip domain-list ansible.com', 'exit', 'vrf context management', 'no ip domain-list redhat.com', 'exit', 'no ip domain-list ansible.com', 'no ip domain-list redhat.com', 'vrf context management', 'ip domain-list example.net', 'exit'] self.execute_module(changed=True, commands=commands) def test_nxos_system_name_servers(self): set_module_args(dict(name_servers=['1.2.3.4', '8.8.8.8'])) commands = ['no ip name-server 172.26.1.1', 'vrf context management', 'no ip name-server 8.8.8.8', 'exit', 'vrf context management', 'no ip name-server 172.26.1.1', 'exit', 'ip name-server 1.2.3.4'] self.execute_module(changed=True, commands=commands) def test_nxos_system_name_servers_complex(self): name_servers = dict(server='1.2.3.4', vrf='management') set_module_args(dict(name_servers=[name_servers])) commands = ['no ip name-server 8.8.8.8', 'no ip name-server 172.26.1.1', 'vrf context management', 'no ip name-server 8.8.8.8', 'exit', 'vrf context management', 'no ip name-server 172.26.1.1', 'exit', 'vrf context management', 'ip name-server 1.2.3.4', 'exit'] self.execute_module(changed=True, commands=commands) def test_nxos_system_system_mtu(self): set_module_args(dict(system_mtu=2000)) commands = ['system jumbomtu 2000'] self.execute_module(changed=True, commands=commands) def test_nxos_system_state_absent(self): set_module_args(dict(state='absent')) commands = ['no hostname', 'no ip domain-name ansible.com', 'vrf context management', 'no ip domain-name eng.ansible.com', 'exit', 'no ip domain-list ansible.com', 'no ip domain-list redhat.com', 'vrf context management', 'no ip domain-list ansible.com', 'exit', 'vrf context management', 'no ip domain-list redhat.com', 'exit', 'no ip name-server 8.8.8.8', 'no ip name-server 172.26.1.1', 'vrf context management', 'no ip name-server 8.8.8.8', 'exit', 'vrf context management', 'no ip name-server 172.26.1.1', 'exit', 'no system jumbomtu'] self.execute_module(changed=True, commands=commands)
gpl-3.0
-6,410,508,891,350,388,000
46.607692
93
0.635482
false
vponomaryov/manila
manila/share/drivers/hpe/hpe_3par_mediator.py
1
70220
# Copyright 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HPE 3PAR Mediator for OpenStack Manila. This 'mediator' de-couples the 3PAR focused client from the OpenStack focused driver. """ from oslo_log import log from oslo_utils import importutils from oslo_utils import units import six from manila.data import utils as data_utils from manila import exception from manila import utils from manila.i18n import _, _LE, _LI, _LW hpe3parclient = importutils.try_import("hpe3parclient") if hpe3parclient: from hpe3parclient import file_client LOG = log.getLogger(__name__) MIN_CLIENT_VERSION = (4, 0, 0) DENY = '-' ALLOW = '+' OPEN_STACK_MANILA = 'OpenStack Manila' FULL = 1 THIN = 2 DEDUPE = 6 ENABLED = 1 DISABLED = 2 CACHE = 'cache' CONTINUOUS_AVAIL = 'continuous_avail' ACCESS_BASED_ENUM = 'access_based_enum' SMB_EXTRA_SPECS_MAP = { CACHE: CACHE, CONTINUOUS_AVAIL: 'ca', ACCESS_BASED_ENUM: 'abe', } IP_ALREADY_EXISTS = 'IP address %s already exists' USER_ALREADY_EXISTS = '"allow" permission already exists for "%s"' DOES_NOT_EXIST = 'does not exist, cannot' LOCAL_IP = '127.0.0.1' LOCAL_IP_RO = '127.0.0.2' SUPER_SHARE = 'OPENSTACK_SUPER_SHARE' TMP_RO_SNAP_EXPORT = "Temp RO snapshot export as source for creating RW share." class HPE3ParMediator(object): """3PAR client-facing code for the 3PAR driver. Version history: 1.0.0 - Begin Liberty development (post-Kilo) 1.0.1 - Report thin/dedup/hp_flash_cache capabilities 1.0.2 - Add share server/share network support 1.0.3 - Use hp3par prefix for share types and capabilities 2.0.0 - Rebranded HP to HPE 2.0.1 - Add access_level (e.g. read-only support) 2.0.2 - Add extend/shrink 2.0.3 - Fix SMB read-only access (added in 2.0.1) 2.0.4 - Remove file tree on delete when using nested shares #1538800 2.0.5 - Reduce the fsquota by share size when a share is deleted #1582931 2.0.6 - Read-write share from snapshot (using driver mount and copy) 2.0.7 - Add update_access support 2.0.8 - Multi pools support per backend 2.0.9 - Fix get_vfs() to correctly validate conf IP addresses at boot up #1621016 """ VERSION = "2.0.9" def __init__(self, **kwargs): self.hpe3par_username = kwargs.get('hpe3par_username') self.hpe3par_password = kwargs.get('hpe3par_password') self.hpe3par_api_url = kwargs.get('hpe3par_api_url') self.hpe3par_debug = kwargs.get('hpe3par_debug') self.hpe3par_san_ip = kwargs.get('hpe3par_san_ip') self.hpe3par_san_login = kwargs.get('hpe3par_san_login') self.hpe3par_san_password = kwargs.get('hpe3par_san_password') self.hpe3par_san_ssh_port = kwargs.get('hpe3par_san_ssh_port') self.hpe3par_san_private_key = kwargs.get('hpe3par_san_private_key') self.hpe3par_fstore_per_share = kwargs.get('hpe3par_fstore_per_share') self.hpe3par_require_cifs_ip = kwargs.get('hpe3par_require_cifs_ip') self.hpe3par_cifs_admin_access_username = ( kwargs.get('hpe3par_cifs_admin_access_username')) self.hpe3par_cifs_admin_access_password = ( kwargs.get('hpe3par_cifs_admin_access_password')) self.hpe3par_cifs_admin_access_domain = ( kwargs.get('hpe3par_cifs_admin_access_domain')) self.hpe3par_share_mount_path = kwargs.get('hpe3par_share_mount_path') self.my_ip = kwargs.get('my_ip') self.ssh_conn_timeout = kwargs.get('ssh_conn_timeout') self._client = None self.client_version = None @staticmethod def no_client(): return hpe3parclient is None def do_setup(self): if self.no_client(): msg = _('You must install hpe3parclient before using the 3PAR ' 'driver. Run "pip install --upgrade python-3parclient" ' 'to upgrade the hpe3parclient.') LOG.error(msg) raise exception.HPE3ParInvalidClient(message=msg) self.client_version = hpe3parclient.version_tuple if self.client_version < MIN_CLIENT_VERSION: msg = (_('Invalid hpe3parclient version found (%(found)s). ' 'Version %(minimum)s or greater required. Run "pip' ' install --upgrade python-3parclient" to upgrade' ' the hpe3parclient.') % {'found': '.'.join(map(six.text_type, self.client_version)), 'minimum': '.'.join(map(six.text_type, MIN_CLIENT_VERSION))}) LOG.error(msg) raise exception.HPE3ParInvalidClient(message=msg) try: self._client = file_client.HPE3ParFilePersonaClient( self.hpe3par_api_url) except Exception as e: msg = (_('Failed to connect to HPE 3PAR File Persona Client: %s') % six.text_type(e)) LOG.exception(msg) raise exception.ShareBackendException(message=msg) try: ssh_kwargs = {} if self.hpe3par_san_ssh_port: ssh_kwargs['port'] = self.hpe3par_san_ssh_port if self.ssh_conn_timeout: ssh_kwargs['conn_timeout'] = self.ssh_conn_timeout if self.hpe3par_san_private_key: ssh_kwargs['privatekey'] = self.hpe3par_san_private_key self._client.setSSHOptions( self.hpe3par_san_ip, self.hpe3par_san_login, self.hpe3par_san_password, **ssh_kwargs ) except Exception as e: msg = (_('Failed to set SSH options for HPE 3PAR File Persona ' 'Client: %s') % six.text_type(e)) LOG.exception(msg) raise exception.ShareBackendException(message=msg) LOG.info(_LI("HPE3ParMediator %(version)s, " "hpe3parclient %(client_version)s"), {"version": self.VERSION, "client_version": hpe3parclient.get_version_string()}) try: wsapi_version = self._client.getWsApiVersion()['build'] LOG.info(_LI("3PAR WSAPI %s"), wsapi_version) except Exception as e: msg = (_('Failed to get 3PAR WSAPI version: %s') % six.text_type(e)) LOG.exception(msg) raise exception.ShareBackendException(message=msg) if self.hpe3par_debug: self._client.debug_rest(True) # Includes SSH debug (setSSH above) def _wsapi_login(self): try: self._client.login(self.hpe3par_username, self.hpe3par_password) except Exception as e: msg = (_("Failed to Login to 3PAR (%(url)s) as %(user)s " "because: %(err)s") % {'url': self.hpe3par_api_url, 'user': self.hpe3par_username, 'err': six.text_type(e)}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def _wsapi_logout(self): try: self._client.http.unauthenticate() except Exception as e: msg = _LW("Failed to Logout from 3PAR (%(url)s) because %(err)s") LOG.warning(msg, {'url': self.hpe3par_api_url, 'err': six.text_type(e)}) # don't raise exception on logout() @staticmethod def build_export_locations(protocol, ips, path): if not ips: message = _('Failed to build export location due to missing IP.') raise exception.InvalidInput(reason=message) if not path: message = _('Failed to build export location due to missing path.') raise exception.InvalidInput(reason=message) share_proto = HPE3ParMediator.ensure_supported_protocol(protocol) if share_proto == 'nfs': return ['%s:%s' % (ip, path) for ip in ips] else: return [r'\\%s\%s' % (ip, path) for ip in ips] def get_provisioned_gb(self, fpg): total_mb = 0 try: result = self._client.getfsquota(fpg=fpg) except Exception as e: result = {'message': six.text_type(e)} error_msg = result.get('message') if error_msg: message = (_('Error while getting fsquotas for FPG ' '%(fpg)s: %(msg)s') % {'fpg': fpg, 'msg': error_msg}) LOG.error(message) raise exception.ShareBackendException(msg=message) for fsquota in result['members']: total_mb += float(fsquota['hardBlock']) return total_mb / units.Ki def get_fpg_status(self, fpg): """Get capacity and capabilities for FPG.""" try: result = self._client.getfpg(fpg) except Exception as e: msg = (_('Failed to get capacity for fpg %(fpg)s: %(e)s') % {'fpg': fpg, 'e': six.text_type(e)}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) if result['total'] != 1: msg = (_('Failed to get capacity for fpg %s.') % fpg) LOG.error(msg) raise exception.ShareBackendException(msg=msg) member = result['members'][0] total_capacity_gb = float(member['capacityKiB']) / units.Mi free_capacity_gb = float(member['availCapacityKiB']) / units.Mi volumes = member['vvs'] if isinstance(volumes, list): volume = volumes[0] # Use first name from list else: volume = volumes # There is just a name self._wsapi_login() try: volume_info = self._client.getVolume(volume) volume_set = self._client.getVolumeSet(fpg) finally: self._wsapi_logout() provisioning_type = volume_info['provisioningType'] if provisioning_type not in (THIN, FULL, DEDUPE): msg = (_('Unexpected provisioning type for FPG %(fpg)s: ' '%(ptype)s.') % {'fpg': fpg, 'ptype': provisioning_type}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) dedupe = provisioning_type == DEDUPE thin_provisioning = provisioning_type in (THIN, DEDUPE) flash_cache_policy = volume_set.get('flashCachePolicy', DISABLED) hpe3par_flash_cache = flash_cache_policy == ENABLED status = { 'pool_name': fpg, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'thin_provisioning': thin_provisioning, 'dedupe': dedupe, 'hpe3par_flash_cache': hpe3par_flash_cache, 'hp3par_flash_cache': hpe3par_flash_cache, } if thin_provisioning: status['provisioned_capacity_gb'] = self.get_provisioned_gb(fpg) return status @staticmethod def ensure_supported_protocol(share_proto): protocol = share_proto.lower() if protocol == 'cifs': protocol = 'smb' if protocol not in ['smb', 'nfs']: message = (_('Invalid protocol. Expected nfs or smb. Got %s.') % protocol) LOG.error(message) raise exception.InvalidShareAccess(reason=message) return protocol @staticmethod def other_protocol(share_proto): """Given 'nfs' or 'smb' (or equivalent) return the other one.""" protocol = HPE3ParMediator.ensure_supported_protocol(share_proto) return 'nfs' if protocol == 'smb' else 'smb' @staticmethod def ensure_prefix(uid, protocol=None, readonly=False): if uid.startswith('osf-'): return uid if protocol: proto = '-%s' % HPE3ParMediator.ensure_supported_protocol(protocol) else: proto = '' if readonly: ro = '-ro' else: ro = '' # Format is osf[-ro]-{nfs|smb}-uid return 'osf%s%s-%s' % (proto, ro, uid) @staticmethod def _get_nfs_options(extra_specs, readonly): """Validate the NFS extra_specs and return the options to use.""" nfs_options = extra_specs.get('hpe3par:nfs_options') if nfs_options is None: nfs_options = extra_specs.get('hp3par:nfs_options') if nfs_options: msg = _LW("hp3par:nfs_options is deprecated. Use " "hpe3par:nfs_options instead.") LOG.warning(msg) if nfs_options: options = nfs_options.split(',') else: options = [] # rw, ro, and (no)root_squash (in)secure options are not allowed in # extra_specs because they will be forcibly set below. # no_subtree_check and fsid are not allowed per 3PAR support. # Other strings will be allowed to be sent to the 3PAR which will do # further validation. options_not_allowed = ['ro', 'rw', 'no_root_squash', 'root_squash', 'secure', 'insecure', 'no_subtree_check', 'fsid'] invalid_options = [ option for option in options if option in options_not_allowed ] if invalid_options: raise exception.InvalidInput(_('Invalid hp3par:nfs_options or ' 'hpe3par:nfs_options in ' 'extra-specs. The following ' 'options are not allowed: %s') % invalid_options) options.append('ro' if readonly else 'rw') options.append('no_root_squash') options.append('insecure') return ','.join(options) def _build_createfshare_kwargs(self, protocol, fpg, fstore, readonly, sharedir, extra_specs, comment, client_ip=None): createfshare_kwargs = dict(fpg=fpg, fstore=fstore, sharedir=sharedir, comment=comment) if 'hp3par_flash_cache' in extra_specs: msg = _LW("hp3par_flash_cache is deprecated. Use " "hpe3par_flash_cache instead.") LOG.warning(msg) if protocol == 'nfs': if client_ip: createfshare_kwargs['clientip'] = client_ip else: # New NFS shares needs seed IP to prevent "all" access. # Readonly and readwrite NFS shares client IPs cannot overlap. if readonly: createfshare_kwargs['clientip'] = LOCAL_IP_RO else: createfshare_kwargs['clientip'] = LOCAL_IP options = self._get_nfs_options(extra_specs, readonly) createfshare_kwargs['options'] = options else: # To keep the original (Kilo, Liberty) behavior where CIFS IP # access rules were required in addition to user rules enable # this to use a seed IP instead of the default (all allowed). if self.hpe3par_require_cifs_ip: if client_ip: createfshare_kwargs['allowip'] = client_ip else: createfshare_kwargs['allowip'] = LOCAL_IP smb_opts = (ACCESS_BASED_ENUM, CONTINUOUS_AVAIL, CACHE) for smb_opt in smb_opts: opt_value = extra_specs.get('hpe3par:smb_%s' % smb_opt) if opt_value is None: opt_value = extra_specs.get('hp3par:smb_%s' % smb_opt) if opt_value: msg = _LW("hp3par:smb_* is deprecated. Use " "hpe3par:smb_* instead.") LOG.warning(msg) if opt_value: opt_key = SMB_EXTRA_SPECS_MAP[smb_opt] createfshare_kwargs[opt_key] = opt_value return createfshare_kwargs def _update_capacity_quotas(self, fstore, new_size, old_size, fpg, vfs): @utils.synchronized('hpe3par-update-quota-' + fstore) def _sync_update_capacity_quotas(fstore, new_size, old_size, fpg, vfs): """Update 3PAR quotas and return setfsquota output.""" if self.hpe3par_fstore_per_share: hcapacity = six.text_type(new_size * units.Ki) scapacity = hcapacity else: hard_size_mb = (new_size - old_size) * units.Ki soft_size_mb = hard_size_mb result = self._client.getfsquota( fpg=fpg, vfs=vfs, fstore=fstore) LOG.debug("getfsquota result=%s", result) quotas = result['members'] if len(quotas) == 1: hard_size_mb += int(quotas[0].get('hardBlock', '0')) soft_size_mb += int(quotas[0].get('softBlock', '0')) hcapacity = six.text_type(hard_size_mb) scapacity = six.text_type(soft_size_mb) return self._client.setfsquota(vfs, fpg=fpg, fstore=fstore, scapacity=scapacity, hcapacity=hcapacity) try: result = _sync_update_capacity_quotas( fstore, new_size, old_size, fpg, vfs) LOG.debug("setfsquota result=%s", result) except Exception as e: msg = (_('Failed to update capacity quota ' '%(size)s on %(fstore)s with exception: %(e)s') % {'size': new_size - old_size, 'fstore': fstore, 'e': six.text_type(e)}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) # Non-empty result is an error message returned from the 3PAR if result: msg = (_('Failed to update capacity quota ' '%(size)s on %(fstore)s with error: %(error)s') % {'size': new_size - old_size, 'fstore': fstore, 'error': result}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def _create_share(self, project_id, share_id, protocol, extra_specs, fpg, vfs, fstore, sharedir, readonly, size, comment, client_ip=None): share_name = self.ensure_prefix(share_id, readonly=readonly) if not (sharedir or self.hpe3par_fstore_per_share): sharedir = share_name if fstore: use_existing_fstore = True else: use_existing_fstore = False if self.hpe3par_fstore_per_share: # Do not use -ro in the fstore name. fstore = self.ensure_prefix(share_id, readonly=False) else: fstore = self.ensure_prefix(project_id, protocol) createfshare_kwargs = self._build_createfshare_kwargs( protocol, fpg, fstore, readonly, sharedir, extra_specs, comment, client_ip=client_ip) if not use_existing_fstore: try: result = self._client.createfstore( vfs, fstore, fpg=fpg, comment=comment) LOG.debug("createfstore result=%s", result) except Exception as e: msg = (_('Failed to create fstore %(fstore)s: %(e)s') % {'fstore': fstore, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) if size: self._update_capacity_quotas(fstore, size, 0, fpg, vfs) try: if readonly and protocol == 'nfs': # For NFS, RO is a 2nd 3PAR share pointing to same sharedir share_name = self.ensure_prefix(share_id, readonly=readonly) result = self._client.createfshare(protocol, vfs, share_name, **createfshare_kwargs) LOG.debug("createfshare result=%s", result) except Exception as e: msg = (_('Failed to create share %(share_name)s: %(e)s') % {'share_name': share_name, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) try: result = self._client.getfshare( protocol, share_name, fpg=fpg, vfs=vfs, fstore=fstore) LOG.debug("getfshare result=%s", result) except Exception as e: msg = (_('Failed to get fshare %(share_name)s after creating it: ' '%(e)s') % {'share_name': share_name, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) if result['total'] != 1: msg = (_('Failed to get fshare %(share_name)s after creating it. ' 'Expected to get 1 fshare. Got %(total)s.') % {'share_name': share_name, 'total': result['total']}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) return result['members'][0] def create_share(self, project_id, share_id, share_proto, extra_specs, fpg, vfs, fstore=None, sharedir=None, readonly=False, size=None, comment=OPEN_STACK_MANILA, client_ip=None): """Create the share and return its path. This method can create a share when called by the driver or when called locally from create_share_from_snapshot(). The optional parameters allow re-use. :param project_id: The tenant ID. :param share_id: The share-id with or without osf- prefix. :param share_proto: The protocol (to map to smb or nfs) :param extra_specs: The share type extra-specs :param fpg: The file provisioning group :param vfs: The virtual file system :param fstore: (optional) The file store. When provided, an existing file store is used. Otherwise one is created. :param sharedir: (optional) Share directory. :param readonly: (optional) Create share as read-only. :param size: (optional) Size limit for file store if creating one. :param comment: (optional) Comment to set on the share. :param client_ip: (optional) IP address to give access to. :return: share path string """ protocol = self.ensure_supported_protocol(share_proto) share = self._create_share(project_id, share_id, protocol, extra_specs, fpg, vfs, fstore, sharedir, readonly, size, comment, client_ip=client_ip) if protocol == 'nfs': return share['sharePath'] else: return share['shareName'] def create_share_from_snapshot(self, share_id, share_proto, extra_specs, orig_project_id, orig_share_id, snapshot_id, fpg, vfs, ips, size=None, comment=OPEN_STACK_MANILA): protocol = self.ensure_supported_protocol(share_proto) snapshot_tag = self.ensure_prefix(snapshot_id) orig_share_name = self.ensure_prefix(orig_share_id) snapshot = self._find_fsnap(orig_project_id, orig_share_name, protocol, snapshot_tag, fpg, vfs) if not snapshot: msg = (_('Failed to create share from snapshot for ' 'FPG/VFS/tag %(fpg)s/%(vfs)s/%(tag)s. ' 'Snapshot not found.') % { 'fpg': fpg, 'vfs': vfs, 'tag': snapshot_tag}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) fstore = snapshot['fstoreName'] if fstore == orig_share_name: # No subdir for original share created with fstore_per_share sharedir = '.snapshot/%s' % snapshot['snapName'] else: sharedir = '.snapshot/%s/%s' % (snapshot['snapName'], orig_share_name) if protocol == "smb" and (not self.hpe3par_cifs_admin_access_username or not self.hpe3par_cifs_admin_access_password): LOG.warning(_LW("hpe3par_cifs_admin_access_username and " "hpe3par_cifs_admin_access_password must be " "provided in order for CIFS shares created from " "snapshots to be writable.")) return self.create_share( orig_project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, sharedir=sharedir, readonly=True, comment=comment, ) # Export the snapshot as read-only to copy from. temp = ' '.join((comment, TMP_RO_SNAP_EXPORT)) source_path = self.create_share( orig_project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, sharedir=sharedir, readonly=True, comment=temp, client_ip=self.my_ip ) try: share_name = self.ensure_prefix(share_id) dest_path = self.create_share( orig_project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, readonly=False, size=size, comment=comment, client_ip=','.join((self.my_ip, LOCAL_IP)) ) try: if protocol == 'smb': self._grant_admin_smb_access( protocol, fpg, vfs, fstore, comment, share=share_name) ro_share_name = self.ensure_prefix(share_id, readonly=True) self._grant_admin_smb_access( protocol, fpg, vfs, fstore, temp, share=ro_share_name) source_locations = self.build_export_locations( protocol, ips, source_path) dest_locations = self.build_export_locations( protocol, ips, dest_path) self._copy_share_data( share_id, source_locations[0], dest_locations[0], protocol) # Revoke the admin access that was needed to copy to the dest. if protocol == 'nfs': self._change_access(DENY, orig_project_id, share_id, protocol, 'ip', self.my_ip, 'rw', fpg, vfs) else: self._revoke_admin_smb_access( protocol, fpg, vfs, fstore, comment) except Exception as e: msg = _LE('Exception during mount and copy from RO snapshot ' 'to RW share: %s') LOG.error(msg, e) self._delete_share(share_name, protocol, fpg, vfs, fstore) raise finally: self._delete_ro_share( orig_project_id, share_id, protocol, fpg, vfs, fstore) return dest_path def _copy_share_data(self, dest_id, source_location, dest_location, protocol): mount_location = "%s%s" % (self.hpe3par_share_mount_path, dest_id) source_share_dir = '/'.join((mount_location, "source_snap")) dest_share_dir = '/'.join((mount_location, "dest_share")) dirs_to_remove = [] dirs_to_unmount = [] try: utils.execute('mkdir', '-p', source_share_dir, run_as_root=True) dirs_to_remove.append(source_share_dir) self._mount_share(protocol, source_location, source_share_dir) dirs_to_unmount.append(source_share_dir) utils.execute('mkdir', dest_share_dir, run_as_root=True) dirs_to_remove.append(dest_share_dir) self._mount_share(protocol, dest_location, dest_share_dir) dirs_to_unmount.append(dest_share_dir) self._copy_data(source_share_dir, dest_share_dir) finally: for d in dirs_to_unmount: self._unmount_share(d) if dirs_to_remove: dirs_to_remove.append(mount_location) utils.execute('rmdir', *dirs_to_remove, run_as_root=True) def _copy_data(self, source_share_dir, dest_share_dir): err_msg = None err_data = None try: copy = data_utils.Copy(source_share_dir, dest_share_dir, '') copy.run() progress = copy.get_progress()['total_progress'] if progress != 100: err_msg = _("Failed to copy data, reason: " "Total progress %d != 100.") err_data = progress except Exception as err: err_msg = _("Failed to copy data, reason: %s.") err_data = six.text_type(err) if err_msg: raise exception.ShareBackendException(msg=err_msg % err_data) def _delete_share(self, share_name, protocol, fpg, vfs, fstore): try: self._client.removefshare( protocol, vfs, share_name, fpg=fpg, fstore=fstore) except Exception as e: msg = (_('Failed to remove share %(share_name)s: %(e)s') % {'share_name': share_name, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) def _delete_ro_share(self, project_id, share_id, protocol, fpg, vfs, fstore): share_name_ro = self.ensure_prefix(share_id, readonly=True) if not fstore: fstore = self._find_fstore(project_id, share_name_ro, protocol, fpg, vfs, allow_cross_protocol=True) if fstore: self._delete_share(share_name_ro, protocol, fpg, vfs, fstore) return fstore def delete_share(self, project_id, share_id, share_size, share_proto, fpg, vfs, share_ip): protocol = self.ensure_supported_protocol(share_proto) share_name = self.ensure_prefix(share_id) fstore = self._find_fstore(project_id, share_name, protocol, fpg, vfs, allow_cross_protocol=True) removed_writable = False if fstore: self._delete_share(share_name, protocol, fpg, vfs, fstore) removed_writable = True # Try to delete the read-only twin share, too. fstore = self._delete_ro_share( project_id, share_id, protocol, fpg, vfs, fstore) if fstore == share_name: try: self._client.removefstore(vfs, fstore, fpg=fpg) except Exception as e: msg = (_('Failed to remove fstore %(fstore)s: %(e)s') % {'fstore': fstore, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) elif removed_writable: try: # Attempt to remove file tree on delete when using nested # shares. If the file tree cannot be removed for whatever # reason, we will not treat this as an error_deleting # issue. We will allow the delete to continue as requested. self._delete_file_tree( share_name, protocol, fpg, vfs, fstore, share_ip) # reduce the fsquota by share size when a tree is deleted. self._update_capacity_quotas( fstore, 0, share_size, fpg, vfs) except Exception as e: msg = _LW('Exception during cleanup of deleted ' 'share %(share)s in filestore %(fstore)s: %(e)s') data = { 'fstore': fstore, 'share': share_name, 'e': six.text_type(e), } LOG.warning(msg, data) def _delete_file_tree(self, share_name, protocol, fpg, vfs, fstore, share_ip): # If the share protocol is CIFS, we need to make sure the admin # provided the proper config values. If they have not, we can simply # return out and log a warning. if protocol == "smb" and (not self.hpe3par_cifs_admin_access_username or not self.hpe3par_cifs_admin_access_password): LOG.warning(_LW("hpe3par_cifs_admin_access_username and " "hpe3par_cifs_admin_access_password must be " "provided in order for the file tree to be " "properly deleted.")) return mount_location = "%s%s" % (self.hpe3par_share_mount_path, share_name) share_dir = mount_location + "/%s" % share_name # Create the super share. self._create_super_share(protocol, fpg, vfs, fstore) # Create the mount directory. self._create_mount_directory(mount_location) # Mount the super share. self._mount_super_share(protocol, mount_location, fpg, vfs, fstore, share_ip) # Delete the share from the super share. self._delete_share_directory(share_dir) # Unmount the super share. self._unmount_share(mount_location) # Delete the mount directory. self._delete_share_directory(mount_location) def _grant_admin_smb_access(self, protocol, fpg, vfs, fstore, comment, share=SUPER_SHARE): user = '+%s:fullcontrol' % self.hpe3par_cifs_admin_access_username setfshare_kwargs = { 'fpg': fpg, 'fstore': fstore, 'comment': comment, 'allowperm': user, } try: self._client.setfshare( protocol, vfs, share, **setfshare_kwargs) except Exception as err: raise exception.ShareBackendException( msg=_("There was an error adding permissions: %s") % err) def _revoke_admin_smb_access(self, protocol, fpg, vfs, fstore, comment, share=SUPER_SHARE): user = '-%s:fullcontrol' % self.hpe3par_cifs_admin_access_username setfshare_kwargs = { 'fpg': fpg, 'fstore': fstore, 'comment': comment, 'allowperm': user, } try: self._client.setfshare( protocol, vfs, share, **setfshare_kwargs) except Exception as err: raise exception.ShareBackendException( msg=_("There was an error revoking permissions: %s") % err) def _create_super_share(self, protocol, fpg, vfs, fstore, readonly=False): sharedir = '' extra_specs = {} comment = 'OpenStack super share used to delete nested shares.' createfshare_kwargs = self._build_createfshare_kwargs(protocol, fpg, fstore, readonly, sharedir, extra_specs, comment) # If the share is NFS, we need to give the host access to the share in # order to properly mount it. if protocol == 'nfs': createfshare_kwargs['clientip'] = self.my_ip else: createfshare_kwargs['allowip'] = self.my_ip try: result = self._client.createfshare(protocol, vfs, SUPER_SHARE, **createfshare_kwargs) LOG.debug("createfshare for %(name)s, result=%(result)s", {'name': SUPER_SHARE, 'result': result}) except Exception as e: msg = (_('Failed to create share %(share_name)s: %(e)s'), {'share_name': SUPER_SHARE, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # If the share is CIFS, we need to grant access to the specified admin. if protocol == 'smb': self._grant_admin_smb_access(protocol, fpg, vfs, fstore, comment) def _create_mount_directory(self, mount_location): try: utils.execute('mkdir', mount_location, run_as_root=True) except Exception as err: message = (_LW("There was an error creating mount directory: " "%s. The nested file tree will not be deleted."), six.text_type(err)) LOG.warning(message) def _mount_share(self, protocol, export_location, mount_dir): if protocol == 'nfs': cmd = ('mount', '-t', 'nfs', export_location, mount_dir) utils.execute(*cmd, run_as_root=True) else: export_location = export_location.replace('\\', '/') cred = ('username=' + self.hpe3par_cifs_admin_access_username + ',password=' + self.hpe3par_cifs_admin_access_password + ',domain=' + self.hpe3par_cifs_admin_access_domain) cmd = ('mount', '-t', 'cifs', export_location, mount_dir, '-o', cred) utils.execute(*cmd, run_as_root=True) def _mount_super_share(self, protocol, mount_dir, fpg, vfs, fstore, share_ip): try: mount_location = self._generate_mount_path( protocol, fpg, vfs, fstore, share_ip) self._mount_share(protocol, mount_location, mount_dir) except Exception as err: message = (_LW("There was an error mounting the super share: " "%s. The nested file tree will not be deleted."), six.text_type(err)) LOG.warning(message) def _unmount_share(self, mount_location): try: utils.execute('umount', mount_location, run_as_root=True) except Exception as err: message = _LW("There was an error unmounting the share at " "%(mount_location)s: %(error)s") msg_data = { 'mount_location': mount_location, 'error': six.text_type(err), } LOG.warning(message, msg_data) def _delete_share_directory(self, directory): try: utils.execute('rm', '-rf', directory, run_as_root=True) except Exception as err: message = (_LW("There was an error removing the share: " "%s. The nested file tree will not be deleted."), six.text_type(err)) LOG.warning(message) def _generate_mount_path(self, protocol, fpg, vfs, fstore, share_ip): path = None if protocol == 'nfs': path = (("%(share_ip)s:/%(fpg)s/%(vfs)s/%(fstore)s/") % {'share_ip': share_ip, 'fpg': fpg, 'vfs': vfs, 'fstore': fstore}) else: path = (("//%(share_ip)s/%(share_name)s/") % {'share_ip': share_ip, 'share_name': SUPER_SHARE}) return path def get_vfs(self, fpg, vfs=None): """Get the VFS or raise an exception.""" try: result = self._client.getvfs(fpg=fpg, vfs=vfs) except Exception as e: msg = (_('Exception during getvfs %(vfs)s: %(e)s') % {'vfs': vfs, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) if result['total'] != 1: error_msg = result.get('message') if error_msg: message = (_('Error while validating FPG/VFS ' '(%(fpg)s/%(vfs)s): %(msg)s') % {'fpg': fpg, 'vfs': vfs, 'msg': error_msg}) LOG.error(message) raise exception.ShareBackendException(msg=message) else: message = (_('Error while validating FPG/VFS ' '(%(fpg)s/%(vfs)s): Expected 1, ' 'got %(total)s.') % {'fpg': fpg, 'vfs': vfs, 'total': result['total']}) LOG.error(message) raise exception.ShareBackendException(msg=message) value = result['members'][0] if isinstance(value['vfsip'], dict): # This is for 3parclient returning only one VFS entry LOG.debug("3parclient version up to 4.2.1 is in use. Client " "upgrade may be needed if using a VFS with multiple " "IP addresses.") value['vfsip']['address'] = [value['vfsip']['address']] else: # This is for 3parclient returning list of VFS entries # Format get_vfs ret value to combine all IP addresses discovered_vfs_ips = [] for vfs_entry in value['vfsip']: if vfs_entry['address']: discovered_vfs_ips.append(vfs_entry['address']) value['vfsip'] = value['vfsip'][0] value['vfsip']['address'] = discovered_vfs_ips return value @staticmethod def _is_share_from_snapshot(fshare): path = fshare.get('shareDir') if path: return '.snapshot' in path.split('/') path = fshare.get('sharePath') return path and '.snapshot' in path.split('/') def create_snapshot(self, orig_project_id, orig_share_id, orig_share_proto, snapshot_id, fpg, vfs): """Creates a snapshot of a share.""" fshare = self._find_fshare(orig_project_id, orig_share_id, orig_share_proto, fpg, vfs) if not fshare: msg = (_('Failed to create snapshot for FPG/VFS/fshare ' '%(fpg)s/%(vfs)s/%(fshare)s: Failed to find fshare.') % {'fpg': fpg, 'vfs': vfs, 'fshare': orig_share_id}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) if self._is_share_from_snapshot(fshare): msg = (_('Failed to create snapshot for FPG/VFS/fshare ' '%(fpg)s/%(vfs)s/%(fshare)s: Share is a read-only ' 'share of an existing snapshot.') % {'fpg': fpg, 'vfs': vfs, 'fshare': orig_share_id}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) fstore = fshare.get('fstoreName') snapshot_tag = self.ensure_prefix(snapshot_id) try: result = self._client.createfsnap( vfs, fstore, snapshot_tag, fpg=fpg) LOG.debug("createfsnap result=%s", result) except Exception as e: msg = (_('Failed to create snapshot for FPG/VFS/fstore ' '%(fpg)s/%(vfs)s/%(fstore)s: %(e)s') % {'fpg': fpg, 'vfs': vfs, 'fstore': fstore, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) def delete_snapshot(self, orig_project_id, orig_share_id, orig_proto, snapshot_id, fpg, vfs): """Deletes a snapshot of a share.""" snapshot_tag = self.ensure_prefix(snapshot_id) snapshot = self._find_fsnap(orig_project_id, orig_share_id, orig_proto, snapshot_tag, fpg, vfs) if not snapshot: return fstore = snapshot.get('fstoreName') for protocol in ('nfs', 'smb'): try: shares = self._client.getfshare(protocol, fpg=fpg, vfs=vfs, fstore=fstore) except Exception as e: msg = (_('Unexpected exception while getting share list. ' 'Cannot delete snapshot without checking for ' 'dependent shares first: %s') % six.text_type(e)) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) for share in shares['members']: if protocol == 'nfs': path = share['sharePath'][1:].split('/') dot_snapshot_index = 3 else: if share['shareDir']: path = share['shareDir'].split('/') else: path = None dot_snapshot_index = 0 snapshot_index = dot_snapshot_index + 1 if path and len(path) > snapshot_index: if (path[dot_snapshot_index] == '.snapshot' and path[snapshot_index].endswith(snapshot_tag)): msg = (_('Cannot delete snapshot because it has a ' 'dependent share.')) raise exception.Invalid(msg) snapname = snapshot['snapName'] try: result = self._client.removefsnap( vfs, fstore, snapname=snapname, fpg=fpg) LOG.debug("removefsnap result=%s", result) except Exception as e: msg = (_('Failed to delete snapshot for FPG/VFS/fstore/snapshot ' '%(fpg)s/%(vfs)s/%(fstore)s/%(snapname)s: %(e)s') % { 'fpg': fpg, 'vfs': vfs, 'fstore': fstore, 'snapname': snapname, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # Try to reclaim the space try: self._client.startfsnapclean(fpg, reclaimStrategy='maxspeed') except Exception: # Remove already happened so only log this. LOG.exception(_LE('Unexpected exception calling startfsnapclean ' 'for FPG %(fpg)s.'), {'fpg': fpg}) @staticmethod def _validate_access_type(protocol, access_type): if access_type not in ('ip', 'user'): msg = (_("Invalid access type. Expected 'ip' or 'user'. " "Actual '%s'.") % access_type) LOG.error(msg) raise exception.InvalidInput(reason=msg) if protocol == 'nfs' and access_type != 'ip': msg = (_("Invalid NFS access type. HPE 3PAR NFS supports 'ip'. " "Actual '%s'.") % access_type) LOG.error(msg) raise exception.HPE3ParInvalid(err=msg) return protocol @staticmethod def _validate_access_level(protocol, access_type, access_level, fshare): readonly = access_level == 'ro' snapshot = HPE3ParMediator._is_share_from_snapshot(fshare) if snapshot and not readonly: reason = _('3PAR shares from snapshots require read-only access') LOG.error(reason) raise exception.InvalidShareAccess(reason=reason) if protocol == 'smb' and access_type == 'ip' and snapshot != readonly: msg = (_("Invalid CIFS access rule. HPE 3PAR optionally supports " "IP access rules for CIFS shares, but they must be " "read-only for shares from snapshots and read-write for " "other shares. Use the required CIFS 'user' access rules " "to refine access.")) LOG.error(msg) raise exception.InvalidShareAccess(reason=msg) @staticmethod def ignore_benign_access_results(plus_or_minus, access_type, access_to, result): # TODO(markstur): Remove the next line when hpe3parclient is fixed. result = [x for x in result if x != '\r'] if result: if plus_or_minus == DENY: if DOES_NOT_EXIST in result[0]: return None else: if access_type == 'user': if USER_ALREADY_EXISTS % access_to in result[0]: return None elif IP_ALREADY_EXISTS % access_to in result[0]: return None return result def _change_access(self, plus_or_minus, project_id, share_id, share_proto, access_type, access_to, access_level, fpg, vfs, extra_specs=None): """Allow or deny access to a share. Plus_or_minus character indicates add to allow list (+) or remove from allow list (-). """ readonly = access_level == 'ro' protocol = self.ensure_supported_protocol(share_proto) try: self._validate_access_type(protocol, access_type) except Exception: if plus_or_minus == DENY: # Catch invalid rules for deny. Allow them to be deleted. return else: raise fshare = self._find_fshare(project_id, share_id, protocol, fpg, vfs, readonly=readonly) if not fshare: # Change access might apply to the share with the name that # does not match the access_level prefix. other_fshare = self._find_fshare(project_id, share_id, protocol, fpg, vfs, readonly=not readonly) if other_fshare: if plus_or_minus == DENY: # Try to deny rule from 'other' share for SMB or legacy. fshare = other_fshare elif self._is_share_from_snapshot(other_fshare): # Found a share-from-snapshot from before # "-ro" was added to the name. Use it. fshare = other_fshare elif protocol == 'nfs': # We don't have the RO|RW share we need, but the # opposite one already exists. It is OK to create # the one we need for ALLOW with NFS (not from snapshot). fstore = other_fshare.get('fstoreName') sharedir = other_fshare.get('shareDir') comment = other_fshare.get('comment') fshare = self._create_share(project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, sharedir=sharedir, readonly=readonly, size=None, comment=comment) else: # SMB only has one share for RO and RW. Try to use it. fshare = other_fshare if not fshare: msg = _('Failed to change (%(change)s) access ' 'to FPG/share %(fpg)s/%(share)s ' 'for %(type)s %(to)s %(level)s): ' 'Share does not exist on 3PAR.') msg_data = { 'change': plus_or_minus, 'fpg': fpg, 'share': share_id, 'type': access_type, 'to': access_to, 'level': access_level, } if plus_or_minus == DENY: LOG.warning(msg, msg_data) return else: raise exception.HPE3ParInvalid(err=msg % msg_data) try: self._validate_access_level( protocol, access_type, access_level, fshare) except exception.InvalidShareAccess as e: if plus_or_minus == DENY: # Allow invalid access rules to be deleted. msg = _('Ignoring deny invalid access rule ' 'for FPG/share %(fpg)s/%(share)s ' 'for %(type)s %(to)s %(level)s): %(e)s') msg_data = { 'change': plus_or_minus, 'fpg': fpg, 'share': share_id, 'type': access_type, 'to': access_to, 'level': access_level, 'e': six.text_type(e), } LOG.info(msg, msg_data) return else: raise share_name = fshare.get('shareName') setfshare_kwargs = { 'fpg': fpg, 'fstore': fshare.get('fstoreName'), 'comment': fshare.get('comment'), } if protocol == 'nfs': access_change = '%s%s' % (plus_or_minus, access_to) setfshare_kwargs['clientip'] = access_change elif protocol == 'smb': if access_type == 'ip': access_change = '%s%s' % (plus_or_minus, access_to) setfshare_kwargs['allowip'] = access_change else: access_str = 'read' if readonly else 'fullcontrol' perm = '%s%s:%s' % (plus_or_minus, access_to, access_str) setfshare_kwargs['allowperm'] = perm try: result = self._client.setfshare( protocol, vfs, share_name, **setfshare_kwargs) result = self.ignore_benign_access_results( plus_or_minus, access_type, access_to, result) except Exception as e: result = six.text_type(e) LOG.debug("setfshare result=%s", result) if result: msg = (_('Failed to change (%(change)s) access to FPG/share ' '%(fpg)s/%(share)s for %(type)s %(to)s %(level)s: ' '%(error)s') % {'change': plus_or_minus, 'fpg': fpg, 'share': share_id, 'type': access_type, 'to': access_to, 'level': access_level, 'error': result}) raise exception.ShareBackendException(msg=msg) def _find_fstore(self, project_id, share_id, share_proto, fpg, vfs, allow_cross_protocol=False): share = self._find_fshare(project_id, share_id, share_proto, fpg, vfs, allow_cross_protocol=allow_cross_protocol) return share.get('fstoreName') if share else None def _find_fshare(self, project_id, share_id, share_proto, fpg, vfs, allow_cross_protocol=False, readonly=False): share = self._find_fshare_with_proto(project_id, share_id, share_proto, fpg, vfs, readonly=readonly) if not share and allow_cross_protocol: other_proto = self.other_protocol(share_proto) share = self._find_fshare_with_proto(project_id, share_id, other_proto, fpg, vfs, readonly=readonly) return share def _find_fshare_with_proto(self, project_id, share_id, share_proto, fpg, vfs, readonly=False): protocol = self.ensure_supported_protocol(share_proto) share_name = self.ensure_prefix(share_id, readonly=readonly) project_fstore = self.ensure_prefix(project_id, share_proto) search_order = [ {'fpg': fpg, 'vfs': vfs, 'fstore': project_fstore}, {'fpg': fpg, 'vfs': vfs, 'fstore': share_name}, {'fpg': fpg}, {} ] try: for search_params in search_order: result = self._client.getfshare(protocol, share_name, **search_params) shares = result.get('members', []) if len(shares) == 1: return shares[0] except Exception as e: msg = (_('Unexpected exception while getting share list: %s') % six.text_type(e)) raise exception.ShareBackendException(msg=msg) def _find_fsnap(self, project_id, share_id, orig_proto, snapshot_tag, fpg, vfs): share_name = self.ensure_prefix(share_id) osf_project_id = self.ensure_prefix(project_id, orig_proto) pattern = '*_%s' % self.ensure_prefix(snapshot_tag) search_order = [ {'pat': True, 'fpg': fpg, 'vfs': vfs, 'fstore': osf_project_id}, {'pat': True, 'fpg': fpg, 'vfs': vfs, 'fstore': share_name}, {'pat': True, 'fpg': fpg}, {'pat': True}, ] try: for search_params in search_order: result = self._client.getfsnap(pattern, **search_params) snapshots = result.get('members', []) if len(snapshots) == 1: return snapshots[0] except Exception as e: msg = (_('Unexpected exception while getting snapshots: %s') % six.text_type(e)) raise exception.ShareBackendException(msg=msg) def update_access(self, project_id, share_id, share_proto, extra_specs, access_rules, add_rules, delete_rules, fpg, vfs): """Update access to a share.""" protocol = self.ensure_supported_protocol(share_proto) if not (delete_rules or add_rules): # We need to re add all the rules. Check with 3PAR on it's current # list and only add the deltas. share = self._find_fshare(project_id, share_id, share_proto, fpg, vfs) ref_users = [] ro_ref_rules = [] if protocol == 'nfs': ref_rules = share['clients'] # Check for RO rules. ro_share = self._find_fshare(project_id, share_id, share_proto, fpg, vfs, readonly=True) if ro_share: ro_ref_rules = ro_share['clients'] else: ref_rules = [x[0] for x in share['allowPerm']] ref_users = ref_rules[:] # Get IP access as well ips = share['allowIP'] if not isinstance(ips, list): # If there is only one IP, the API returns a string # rather than a list. We need to account for that. ips = [ips] ref_rules += ips # Retrieve base rules. base_rules = [] for rule in access_rules: base_rules.append(rule['access_to']) # Check if we need to remove any rules from 3PAR. for rule in ref_rules: if rule in ref_users: rule_type = 'user' else: rule_type = 'ip' if rule not in base_rules + [LOCAL_IP, LOCAL_IP_RO]: self._change_access(DENY, project_id, share_id, share_proto, rule_type, rule, None, fpg, vfs) # Check to see if there are any RO rules to remove. for rule in ro_ref_rules: if rule not in base_rules + [LOCAL_IP, LOCAL_IP_RO]: self._change_access(DENY, project_id, share_id, share_proto, rule_type, rule, 'ro', fpg, vfs) # Check the rules we need to add. for rule in access_rules: if rule['access_to'] not in ref_rules and ( rule['access_to'] not in ro_ref_rules): # Rule does not exist, we need to add it self._change_access(ALLOW, project_id, share_id, share_proto, rule['access_type'], rule['access_to'], rule['access_level'], fpg, vfs, extra_specs=extra_specs) else: # We have deltas of the rules that need to be added and deleted. for rule in delete_rules: self._change_access(DENY, project_id, share_id, share_proto, rule['access_type'], rule['access_to'], rule['access_level'], fpg, vfs) for rule in add_rules: self._change_access(ALLOW, project_id, share_id, share_proto, rule['access_type'], rule['access_to'], rule['access_level'], fpg, vfs, extra_specs=extra_specs) def resize_share(self, project_id, share_id, share_proto, new_size, old_size, fpg, vfs): """Extends or shrinks size of existing share.""" share_name = self.ensure_prefix(share_id) fstore = self._find_fstore(project_id, share_name, share_proto, fpg, vfs, allow_cross_protocol=False) if not fstore: msg = (_('Cannot resize share because it was not found.')) raise exception.InvalidShare(reason=msg) self._update_capacity_quotas(fstore, new_size, old_size, fpg, vfs) def fsip_exists(self, fsip): """Try to get FSIP. Return True if it exists.""" vfs = fsip['vfs'] fpg = fsip['fspool'] try: result = self._client.getfsip(vfs, fpg=fpg) LOG.debug("getfsip result: %s", result) except Exception: msg = (_('Failed to get FSIPs for FPG/VFS %(fspool)s/%(vfs)s.') % fsip) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) for member in result['members']: if all(item in member.items() for item in fsip.items()): return True return False def create_fsip(self, ip, subnet, vlantag, fpg, vfs): vlantag_str = six.text_type(vlantag) if vlantag else '0' # Try to create it. It's OK if it already exists. try: result = self._client.createfsip(ip, subnet, vfs, fpg=fpg, vlantag=vlantag_str) LOG.debug("createfsip result: %s", result) except Exception: msg = (_('Failed to create FSIP for %s') % ip) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # Verify that it really exists. fsip = { 'fspool': fpg, 'vfs': vfs, 'address': ip, 'prefixLen': subnet, 'vlanTag': vlantag_str, } if not self.fsip_exists(fsip): msg = (_('Failed to get FSIP after creating it for ' 'FPG/VFS/IP/subnet/VLAN ' '%(fspool)s/%(vfs)s/' '%(address)s/%(prefixLen)s/%(vlanTag)s.') % fsip) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def remove_fsip(self, ip, fpg, vfs): if not (vfs and ip): # If there is no VFS and/or IP, then there is no FSIP to remove. return try: result = self._client.removefsip(vfs, ip, fpg=fpg) LOG.debug("removefsip result: %s", result) except Exception: msg = (_('Failed to remove FSIP %s') % ip) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # Verify that it really no longer exists. fsip = { 'fspool': fpg, 'vfs': vfs, 'address': ip, } if self.fsip_exists(fsip): msg = (_('Failed to remove FSIP for FPG/VFS/IP ' '%(fspool)s/%(vfs)s/%(address)s.') % fsip) LOG.error(msg) raise exception.ShareBackendException(msg=msg)
apache-2.0
-3,297,285,315,249,804,300
39.636574
79
0.48358
false
Opentaste/bombolone
bombolone/routes/content.py
1
5760
# -*- coding: utf-8 -*- """ content.py ~~~~~~ :copyright: (c) 2014 by @zizzamia :license: BSD (See LICENSE for details) """ from flask import (Blueprint, abort, request, session, g, current_app, render_template, send_from_directory) # Imports inside Bombolone import bombolone.model.pages from bombolone.core.utils import get_content_dict content = Blueprint('content', __name__) def get_page_content(code_lan, num_of_path, path): """ By passing the language code and path, is return the page content object """ # Inside any page it saved the path with this format url = "url_{}.{}".format(num_of_path, code_lan) # Create a list of pages list_pages = [ page for page in bombolone.model.pages.find(field=url, field_value={ "$exists" : True })] for page in list_pages: count = 0 # Any time the "path" is the same or we have some # value like "<i_am_variable>" increase the counter for i in range(num_of_path): print page["url_"+str(num_of_path)] page_by_lang = page["url_"+str(num_of_path)].get(code_lan, None) if page_by_lang: word = page_by_lang[i] if path[i] == word: count += 1 #if word[0] == '<' and word[-1] == '>': # count += 1 # If the counter is the same of num_of_path # means we found the page we need it if count == num_of_path: return page return None def render_content_page(num_of_path, path): """ Using the path of the url, look inside the collection of pages that matches the page. If it matches, then it is rendered. The main for loop is searching the "page_document" by the languages "code_lan", inside every page we serch the kind of url with a specific "num_of_path", like url_1.en or url_2.it { "_id" : ObjectId("123456"), ... "url" : { "en" : "about/story", "it" : "chi_siamo/storia" }, "url_2" : { "en" : [ "about", "story" ], "it" : [ "chi_siamo", "storia" ] }, ... } """ languages = g.languages_object.available_lang_code # Retrive page document by g.lan code = g.lang page_document = get_page_content(code, num_of_path, path) if page_document is None: # Retrive page document by one of the available languages for code_lan in languages: code = code_lan page_document = get_page_content(code, num_of_path, path) if page_document is not None: break # If page is None then there doesn't exist # the page for that url if page_document is None: abort(404) else: # 1) dinamic page # =============================================================== page_from = page_document['from'] page_import = page_document['import'] if page_from and page_import: page_from = "pages."+page_from modules = page_from.split(".") if len(modules) == 1: module = __import__(page_from, globals(), locals(), [], -1) method_to_call = getattr(module, page_import) else: module = __import__(page_from, globals(), locals(), [], -1) module_called = getattr(module, modules[1]) method_to_call = getattr(module_called, page_import) return method_to_call(page_document, path, code) # 2) static page # =============================================================== title = page_document['title'].get(code, '') description = page_document['description'].get(code, '') content = {} if page_document['content']: content = get_content_dict(page_document, code) # For every page you must specify the file where you want # to use the contents stored in the database. template_file = 'pages/{0}.html'.format(page_document['file']) return render_template(template_file, **locals()) @content.route('/api/1.0/<three>/', methods=['POST', 'GET']) @content.route('/api/1.0/<three>/<four>', methods=['POST', 'GET']) def api_404(three, four): abort(404) @content.route('/', methods=['POST', 'GET']) def home(): """Path home page level deep""" path = [''] return render_content_page(1, path) @content.route('/robots.txt/') @content.route('/sitemap.xml/') @content.route('/favicon.ico/') def static_from_root(): return send_from_directory(current_app.static_folder, request.path[1:]) @content.route('/<regex("((?!static).*)"):one>/', methods=['POST', 'GET']) def one(one): """Path one level deep""" path = [one] return render_content_page(1, path) @content.route('/<regex("((?!static).*)"):one>/<two>/', methods=['POST', 'GET']) def two(one, two): """Path two level deep""" path = [one, two] return render_content_page(2, path) @content.route('/<regex("((?!static).*)"):one>/<two>/<three>/', methods=['POST', 'GET']) def three(one, two, three): """Path three level deep""" path = [one, two, three] return render_content_page(3, path) @content.route('/<regex("((?!static).*)"):one>/<two>/<three>/<four>/', methods=['POST', 'GET']) def four(one, two, three, four): """Path four level deep""" path = [one, two, three, four] return render_content_page(4, path)
bsd-3-clause
-3,717,585,810,710,158,300
34.226415
108
0.530556
false
fujunwei/chromium-crosswalk
tools/resources/find_used_resources.py
24
2073
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import argparse import re import sys USAGE = """find_used_resources.py [-h] [-i INPUT] [-o OUTPUT] Outputs the sorted list of resource ids that are part of unknown pragma warning in the given build log. This script is used to find the resources that are actually compiled in Chrome in order to only include the needed strings/images in Chrome PAK files. The script parses out the list of used resource ids. These resource ids show up in the build output after building Chrome with gyp variable enable_resource_whitelist_generation set to 1. This gyp flag causes the compiler to print out a UnknownPragma message every time a resource id is used. E.g.: foo.cc:22:0: warning: ignoring #pragma whitelisted_resource_12345 [-Wunknown-pragmas] On Windows, the message is simply a message via __pragma(message(...)). """ def GetResourceIdsInPragmaWarnings(input): """Returns sorted set of resource ids that are inside unknown pragma warnings for the given input. """ used_resources = set() unknown_pragma_warning_pattern = re.compile( 'whitelisted_resource_(?P<resource_id>[0-9]+)') for ln in input: match = unknown_pragma_warning_pattern.search(ln) if match: resource_id = int(match.group('resource_id')) used_resources.add(resource_id) return sorted(used_resources) def Main(): parser = argparse.ArgumentParser(usage=USAGE) parser.add_argument( '-i', '--input', type=argparse.FileType('r'), default=sys.stdin, help='The build log to read (default stdin)') parser.add_argument( '-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help='The resource list path to write (default stdout)') args = parser.parse_args() used_resources = GetResourceIdsInPragmaWarnings(args.input) for resource_id in used_resources: args.output.write('%d\n' % resource_id) if __name__ == '__main__': Main()
bsd-3-clause
-8,252,867,062,735,365,000
34.135593
80
0.723589
false
mavenlin/tensorflow
tensorflow/contrib/slim/python/slim/data/data_decoder.py
146
2302
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains helper functions and classes necessary for decoding data. While data providers read data from disk, sstables or other formats, data decoders decode the data (if necessary). A data decoder is provided with a serialized or encoded piece of data as well as a list of items and returns a set of tensors, each of which correspond to the requested list of items extracted from the data: def Decode(self, data, items): ... For example, if data is a compressed map, the implementation might be: def Decode(self, data, items): decompressed_map = _Decompress(data) outputs = [] for item in items: outputs.append(decompressed_map[item]) return outputs. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc class DataDecoder(object): """An abstract class which is used to decode data for a provider.""" __metaclass__ = abc.ABCMeta @abc.abstractmethod def decode(self, data, items): """Decodes the data to returns the tensors specified by the list of items. Args: data: A possibly encoded data format. items: A list of strings, each of which indicate a particular data type. Returns: A list of `Tensors`, whose length matches the length of `items`, where each `Tensor` corresponds to each item. Raises: ValueError: If any of the items cannot be satisfied. """ pass @abc.abstractmethod def list_items(self): """Lists the names of the items that the decoder can decode. Returns: A list of string names. """ pass
apache-2.0
3,815,528,316,752,580,000
30.972222
80
0.694179
false
BryceBrown/LinkstrDjango
rest_framework/tests/hyperlinkedserializers.py
1
9456
from __future__ import unicode_literals import json from django.test import TestCase from django.test.client import RequestFactory from rest_framework import generics, status, serializers from rest_framework.compat import patterns, url from rest_framework.tests.models import Anchor, BasicModel, ManyToManyModel, BlogPost, BlogPostComment, Album, Photo, OptionalRelationModel factory = RequestFactory() class BlogPostCommentSerializer(serializers.ModelSerializer): url = serializers.HyperlinkedIdentityField(view_name='blogpostcomment-detail') text = serializers.CharField() blog_post_url = serializers.HyperlinkedRelatedField(source='blog_post', view_name='blogpost-detail') class Meta: model = BlogPostComment fields = ('text', 'blog_post_url', 'url') class PhotoSerializer(serializers.Serializer): description = serializers.CharField() album_url = serializers.HyperlinkedRelatedField(source='album', view_name='album-detail', queryset=Album.objects.all(), slug_field='title', slug_url_kwarg='title') def restore_object(self, attrs, instance=None): return Photo(**attrs) class BasicList(generics.ListCreateAPIView): model = BasicModel model_serializer_class = serializers.HyperlinkedModelSerializer class BasicDetail(generics.RetrieveUpdateDestroyAPIView): model = BasicModel model_serializer_class = serializers.HyperlinkedModelSerializer class AnchorDetail(generics.RetrieveAPIView): model = Anchor model_serializer_class = serializers.HyperlinkedModelSerializer class ManyToManyList(generics.ListAPIView): model = ManyToManyModel model_serializer_class = serializers.HyperlinkedModelSerializer class ManyToManyDetail(generics.RetrieveAPIView): model = ManyToManyModel model_serializer_class = serializers.HyperlinkedModelSerializer class BlogPostCommentListCreate(generics.ListCreateAPIView): model = BlogPostComment serializer_class = BlogPostCommentSerializer class BlogPostCommentDetail(generics.RetrieveAPIView): model = BlogPostComment serializer_class = BlogPostCommentSerializer class BlogPostDetail(generics.RetrieveAPIView): model = BlogPost class PhotoListCreate(generics.ListCreateAPIView): model = Photo model_serializer_class = PhotoSerializer class AlbumDetail(generics.RetrieveAPIView): model = Album class OptionalRelationDetail(generics.RetrieveUpdateDestroyAPIView): model = OptionalRelationModel model_serializer_class = serializers.HyperlinkedModelSerializer urlpatterns = patterns('', url(r'^basic/$', BasicList.as_view(), name='basicmodel-list'), url(r'^basic/(?P<pk>\d+)/$', BasicDetail.as_view(), name='basicmodel-detail'), url(r'^anchor/(?P<pk>\d+)/$', AnchorDetail.as_view(), name='anchor-detail'), url(r'^manytomany/$', ManyToManyList.as_view(), name='manytomanymodel-list'), url(r'^manytomany/(?P<pk>\d+)/$', ManyToManyDetail.as_view(), name='manytomanymodel-detail'), url(r'^posts/(?P<pk>\d+)/$', BlogPostDetail.as_view(), name='blogpost-detail'), url(r'^comments/$', BlogPostCommentListCreate.as_view(), name='blogpostcomment-list'), url(r'^comments/(?P<pk>\d+)/$', BlogPostCommentDetail.as_view(), name='blogpostcomment-detail'), url(r'^albums/(?P<title>\w[\w-]*)/$', AlbumDetail.as_view(), name='album-detail'), url(r'^photos/$', PhotoListCreate.as_view(), name='photo-list'), url(r'^optionalrelation/(?P<pk>\d+)/$', OptionalRelationDetail.as_view(), name='optionalrelationmodel-detail'), ) class TestBasicHyperlinkedView(TestCase): urls = 'rest_framework.tests.hyperlinkedserializers' def setUp(self): """ Create 3 BasicModel instances. """ items = ['foo', 'bar', 'baz'] for item in items: BasicModel(text=item).save() self.objects = BasicModel.objects self.data = [ {'url': 'http://testserver/basic/%d/' % obj.id, 'text': obj.text} for obj in self.objects.all() ] self.list_view = BasicList.as_view() self.detail_view = BasicDetail.as_view() def test_get_list_view(self): """ GET requests to ListCreateAPIView should return list of objects. """ request = factory.get('/basic/') response = self.list_view(request).render() self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, self.data) def test_get_detail_view(self): """ GET requests to ListCreateAPIView should return list of objects. """ request = factory.get('/basic/1') response = self.detail_view(request, pk=1).render() self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, self.data[0]) class TestManyToManyHyperlinkedView(TestCase): urls = 'rest_framework.tests.hyperlinkedserializers' def setUp(self): """ Create 3 BasicModel instances. """ items = ['foo', 'bar', 'baz'] anchors = [] for item in items: anchor = Anchor(text=item) anchor.save() anchors.append(anchor) manytomany = ManyToManyModel() manytomany.save() manytomany.rel.add(*anchors) self.data = [{ 'url': 'http://testserver/manytomany/1/', 'rel': [ 'http://testserver/anchor/1/', 'http://testserver/anchor/2/', 'http://testserver/anchor/3/', ] }] self.list_view = ManyToManyList.as_view() self.detail_view = ManyToManyDetail.as_view() def test_get_list_view(self): """ GET requests to ListCreateAPIView should return list of objects. """ request = factory.get('/manytomany/') response = self.list_view(request) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, self.data) def test_get_detail_view(self): """ GET requests to ListCreateAPIView should return list of objects. """ request = factory.get('/manytomany/1/') response = self.detail_view(request, pk=1) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, self.data[0]) class TestCreateWithForeignKeys(TestCase): urls = 'rest_framework.tests.hyperlinkedserializers' def setUp(self): """ Create a blog post """ self.post = BlogPost.objects.create(title="Test post") self.create_view = BlogPostCommentListCreate.as_view() def test_create_comment(self): data = { 'text': 'A test comment', 'blog_post_url': 'http://testserver/posts/1/' } request = factory.post('/comments/', data=data) response = self.create_view(request) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response['Location'], 'http://testserver/comments/1/') self.assertEqual(self.post.blogpostcomment_set.count(), 1) self.assertEqual(self.post.blogpostcomment_set.all()[0].text, 'A test comment') class TestCreateWithForeignKeysAndCustomSlug(TestCase): urls = 'rest_framework.tests.hyperlinkedserializers' def setUp(self): """ Create an Album """ self.post = Album.objects.create(title='test-album') self.list_create_view = PhotoListCreate.as_view() def test_create_photo(self): data = { 'description': 'A test photo', 'album_url': 'http://testserver/albums/test-album/' } request = factory.post('/photos/', data=data) response = self.list_create_view(request) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertNotIn('Location', response, msg='Location should only be included if there is a "url" field on the serializer') self.assertEqual(self.post.photo_set.count(), 1) self.assertEqual(self.post.photo_set.all()[0].description, 'A test photo') class TestOptionalRelationHyperlinkedView(TestCase): urls = 'rest_framework.tests.hyperlinkedserializers' def setUp(self): """ Create 1 OptionalRelationModel instances. """ OptionalRelationModel().save() self.objects = OptionalRelationModel.objects self.detail_view = OptionalRelationDetail.as_view() self.data = {"url": "http://testserver/optionalrelation/1/", "other": None} def test_get_detail_view(self): """ GET requests to RetrieveAPIView with optional relations should return None for non existing relations. """ request = factory.get('/optionalrelationmodel-detail/1') response = self.detail_view(request, pk=1) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, self.data) def test_put_detail_view(self): """ PUT requests to RetrieveUpdateDestroyAPIView with optional relations should accept None for non existing relations. """ response = self.client.put('/optionalrelation/1/', data=json.dumps(self.data), content_type='application/json') self.assertEqual(response.status_code, status.HTTP_200_OK)
apache-2.0
5,860,538,968,587,696,000
34.954373
167
0.662119
false
bisphon/pontiac
settings.py
1
5654
import multiprocessing from six.moves import queue DEBUG = True LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'simple': { 'format': '%(levelname)s %(message)s' }, 'standard': { 'format': '[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s', # 'datefmt': '%Y-%m-%d %H:%M:%S.%f %z' }, 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s(%(name)s:%(lineno)s) P %(process)d (%(processName)s) T %(thread)d (%(threadName)s) %(message)s' }, 'email': { 'format': 'Timestamp: %(asctime)s\nModule: %(module)s\nLine: %(lineno)d\nMessage: %(message)s', }, }, 'filters': { 'require_debug_true': { '()': 'log_utils.RequireDebugTrue', }, 'require_debug_false': { '()': 'log_utils.RequireDebugFalse' }, }, 'handlers': { 'null': { 'level': 'DEBUG', 'class': 'logging.NullHandler', }, 'stderr': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple', # 'filters': ['require_debug_true'], }, 'file_watched': { 'level': 'DEBUG', 'class': 'logging.handlers.WatchedFileHandler', 'filename': './logs/pontiac.log', 'formatter': 'verbose', }, 'file_rotating': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': './logs/pontiac.log', 'maxBytes': 1024 * 1024, 'backupCount': 5, 'formatter': 'verbose', }, 'socket_tcp': { 'level': 'DEBUG', 'class': 'logging.handlers.SocketHandler', 'host': 'localhost', 'port': '12345', 'formatter': 'standard', 'filters': ['require_debug_false'], }, 'syslog': { 'level': 'DEBUG', 'class': 'logging.handlers.SysLogHandler', 'address': '/dev/log', 'facility': 'LOG_USER', 'formatter': 'standard', 'filters': ['require_debug_false'], }, 'smtp': { 'level': 'DEBUG', 'class': 'logging.handlers.SMTPHandler', 'mailhost': '(localhost, 25)', 'fromaddr': '[email protected]', 'toaddrs': ['[email protected]'], 'subject': 'Pontiac Message', 'credentials': '(username, password)', 'formatter': 'email', 'filters': ['require_debug_false'], }, 'http': { 'level': 'DEBUG', 'class': 'logging.handlers.HTTPHandler', 'host': 'localhost', 'url': '/log', 'method': 'GET', 'filters': ['require_debug_false'], }, # 'queue': { # only available on python 3.2+ # 'level': 'DEBUG', # 'class': 'logging.handlers.QueueHandler', # 'filters': ['require_debug_false'], # }, 'logutils_queue': { 'level': 'DEBUG', 'class': 'logutils.queue.QueueHandler', 'queue': queue.Queue() }, # 'logutils_redis': { # 'level': 'DEBUG', # 'class': 'logutils.redis.RedisQueueHandler', # 'key': 'pontiac.logging', # }, 'redis': { 'level': 'DEBUG', 'class': 'log_utils.RedisLogHandler', 'host': 'localhost', 'port': 6379, 'log_key': 'pontiac.logging', }, 'rlog_redis': { 'level': 'DEBUG', 'class': 'rlog.RedisHandler', 'host': 'localhost', 'password': 'password', 'port': 6379, 'channel': 'pontiac_logs' }, 'logstash': { 'level': 'DEBUG', 'class': 'logstash.LogstashHandler', 'host': 'localhost', 'port': 5959, 'version': 1, 'message_type': 'logstash', 'fqdn': False, 'tags': None, }, }, 'loggers': { '': { 'handlers': ['stderr'], 'level': 'DEBUG', 'propagate': True }, 'webservice': { 'handlers': ['stderr', 'file_watched'], 'level': 'DEBUG', 'propagate': False }, 'notifier': { 'handlers': ['stderr', 'file_watched'], 'level': 'DEBUG', 'propagate': False, }, }, 'root': { 'handlers': ['stderr', 'file_watched'], 'level': 'NOTSET', } } HTTP_SOCKET = { 'host': '0.0.0.0', 'port': 1234 } SCHEMA = { 'NOTIFICATION': 'schemas/notification.schema.json', } QUEUE_MAX_SIZE = 1000000 REDIS = { 'host': 'localhost', 'port': 6379, 'password': '', # empty string or None disables 'db': 0, 'max_size': 0, # 0 disables 'expires': 300 # in seconds } try: CPU_COUNT = multiprocessing.cpu_count() except NotImplementedError: CPU_COUNT = 1 THREAD_COUNT = { 'WEBSERVICE': 1, 'NOTIFICATION': CPU_COUNT * 2, } FCM = { #'proxy': 'http://localhost:8000', 'api_key': 'AIzaSyDKsu9nrr9YRVzwNPw7XamW1x6zoYkIjBo', 'proto': 'xmpp', # low_priority # delay_while_idle # time_to_live # restricted_package_name # dry_run } APNS = { #'proxy': 'http://localhost:8000', 'cert': 'certs/cert.pem', 'key': 'certs/key.pem', 'dist': False, }
mit
-2,026,968,043,772,551,200
26.990099
155
0.454192
false
sontek/rethinkdb
test/interface/table_wait.py
13
5807
#!/usr/bin/env python # Copyright 2014 RethinkDB, all rights reserved. """The `interface.table_wait` test checks that waiting for a table returns when the table is available for writing.""" from __future__ import print_function import multiprocessing, os, sys, time, traceback, pprint startTime = time.time() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import driver, scenario_common, utils, vcoptparse r = utils.import_python_driver() op = vcoptparse.OptParser() scenario_common.prepare_option_parser_mode_flags(op) _, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv)) db = "test" tables = ["table1", "table2", "table3"] delete_table = "delete" def check_table_states(conn, ready): statuses = r.expr(tables).map(r.db(db).table(r.row).status()).run(conn) return all(map(lambda s: (s["status"]['ready_for_writes'] == ready), statuses)) def wait_for_table_states(conn, ready): while not check_table_states(conn, ready=ready): time.sleep(0.1) def create_tables(conn): r.expr(tables).for_each(r.db(db).table_create(r.row)).run(conn) r.db(db).table_create(delete_table).run(conn) # An extra table to be deleted during a wait r.db(db).table_list().for_each(r.db(db).table(r.row).insert(r.range(200).map(lambda i: {'id':i}))).run(conn) r.db(db).reconfigure(shards=2, replicas=2).run(conn) r.db(db).wait().run(conn) assert check_table_states(conn, ready=True), \ "Wait after reconfigure returned before tables were ready, statuses: %s" % str(statuses) def spawn_table_wait(port, tbl): def do_table_wait(port, tbl, done_event): conn = r.connect("localhost", port) try: if tbl is None: r.db(db).wait().run(conn) else: res = r.db(db).table(tbl).wait().run(conn) assert res["ready"] == 1 finally: done_event.set() def do_post_write(port, tbl, start_event): conn = r.connect("localhost", port) start_event.wait() if tbl is None: r.db(db).table_list().for_each(r.db(db).table(r.row).insert({})).run(conn) else: r.db(db).table(tbl).insert({}).run(conn) sync_event = multiprocessing.Event() wait_proc = multiprocessing.Process(target=do_table_wait, args=(port, tbl, sync_event)) write_proc = multiprocessing.Process(target=do_post_write, args=(port, tbl, sync_event)) wait_proc.start() write_proc.start() return write_proc print("Spinning up two servers (%.2fs)" % (time.time() - startTime)) with driver.Cluster(initial_servers=['a', 'b'], output_folder='.', command_prefix=command_prefix, extra_options=serve_options) as cluster: cluster.check() proc1 = cluster[0] proc2 = cluster[1] files2 = proc2.files print("Establishing ReQL connection (%.2fs)" % (time.time() - startTime)) conn = r.connect("localhost", proc1.driver_port) if db not in r.db_list().run(conn): print("Creating db (%.2fs)" % (time.time() - startTime)) r.db_create(db).run(conn) print("Testing simple table (several times) (%.2fs)" % (time.time() - startTime)) for i in xrange(5): res = r.db(db).table_create("simple").run(conn) assert res["tables_created"] == 1 r.db(db).table("simple").reconfigure(shards=12, replicas=1).run(conn) r.db(db).table("simple").wait().run(conn) count = r.db(db).table("simple").count().run(conn) assert count == 0 res = r.db(db).table_drop("simple").run(conn) assert res["tables_dropped"] == 1 print("Creating %d tables (%.2fs)" % (len(tables) + 1, time.time() - startTime)) create_tables(conn) print("Killing second server (%.2fs)" % (time.time() - startTime)) proc2.close() wait_for_table_states(conn, ready=False) print("Spawning waiters (%.2fs)" % (time.time() - startTime)) waiter_procs = [ spawn_table_wait(proc1.driver_port, tables[0]), spawn_table_wait(proc1.driver_port, tables[1]), spawn_table_wait(proc1.driver_port, None) # Wait on all tables ] print("Waiting on a deleted table (%.2fs)" % (time.time() - startTime)) def wait_for_deleted_table(port, db, table): c = r.connect("localhost", port) try: r.db(db).table(table).wait().run(c) raise RuntimeError("`table_wait` did not error when waiting on a deleted table.") except r.ReqlRuntimeError as ex: assert ex.message == "Table `%s.%s` does not exist." % (db, table), \ "Unexpected error when waiting for a deleted table: %s" % ex.message error_wait_proc = multiprocessing.Process(target=wait_for_deleted_table, args=(proc1.driver_port, db, delete_table)) error_wait_proc.start() r.db(db).table_drop(delete_table).run(conn) error_wait_proc.join() print("Waiting 15 seconds (%.2fs)" % (time.time() - startTime)) # Wait some time to make sure the wait doesn't return early waiter_procs[0].join(15) assert all(map(lambda w: w.is_alive(), waiter_procs)), "Wait returned while a server was still down." print("Restarting second server (%.2fs)" % (time.time() - startTime)) proc2 = driver.Process(cluster, files2, console_output=True, command_prefix=command_prefix, extra_options=serve_options) proc2.wait_until_started_up() print("Waiting for table readiness (%.2fs)" % (time.time() - startTime)) map(lambda w: w.join(), waiter_procs) assert check_table_states(conn, ready=True), "`wait` returned, but not all tables are ready" print("Cleaning up (%.2fs)" % (time.time() - startTime)) print("Done. (%.2fs)" % (time.time() - startTime))
agpl-3.0
7,869,045,691,783,210,000
40.184397
138
0.632685
false
Impactstory/sherlockoa
endpoint.py
1
25387
import datetime import json import os from random import random from time import sleep from time import time import requests import shortuuid from sickle import Sickle, oaiexceptions from sickle.iterator import OAIItemIterator from sickle.models import ResumptionToken from sickle.oaiexceptions import NoRecordsMatch, BadArgument from sickle.response import OAIResponse from sqlalchemy import or_ import pmh_record from app import db from app import logger from http_cache import request_ua_headers from repository import Repository from util import elapsed from util import safe_commit def lookup_endpoint_by_pmh_url(pmh_url_query=None): endpoints = Endpoint.query.filter(Endpoint.pmh_url.ilike(u"%{}%".format(pmh_url_query))).all() return endpoints class Endpoint(db.Model): id = db.Column(db.Text, primary_key=True) id_old = db.Column(db.Text) repo_unique_id = db.Column(db.Text, db.ForeignKey(Repository.id)) pmh_url = db.Column(db.Text) pmh_set = db.Column(db.Text) last_harvest_started = db.Column(db.DateTime) last_harvest_finished = db.Column(db.DateTime) most_recent_year_harvested = db.Column(db.DateTime) earliest_timestamp = db.Column(db.DateTime) email = db.Column(db.Text) # to help us figure out what kind of repo it is error = db.Column(db.Text) repo_request_id = db.Column(db.Text) harvest_identify_response = db.Column(db.Text) harvest_test_recent_dates = db.Column(db.Text) sample_pmh_record = db.Column(db.Text) contacted = db.Column(db.DateTime) contacted_text = db.Column(db.Text) policy_promises_no_submitted = db.Column(db.Boolean) policy_promises_no_submitted_evidence = db.Column(db.Text) ready_to_run = db.Column(db.Boolean) metadata_prefix = db.Column(db.Text) retry_interval = db.Column(db.Interval) retry_at = db.Column(db.DateTime) meta = db.relationship( 'Repository', lazy='subquery', cascade="all", backref=db.backref("endpoints", lazy="subquery") ) def __init__(self, **kwargs): super(self.__class__, self).__init__(**kwargs) if not self.id: self.id = shortuuid.uuid()[0:20].lower() if not self.metadata_prefix: self.metadata_prefix = 'oai_dc' @property def repo(self): return self.meta def run_diagnostics(self): response = test_harvest_url(self.pmh_url) self.harvest_identify_response = response["harvest_identify_response"] # self.harvest_test_initial_dates = response["harvest_test_initial_dates"] self.harvest_test_recent_dates = response["harvest_test_recent_dates"] self.sample_pmh_record = response["sample_pmh_record"] def harvest(self): if not self.harvest_identify_response or not self.harvest_test_recent_dates: self.set_identify_and_initial_query() today = datetime.datetime.utcnow().date() tomorrow = today + datetime.timedelta(days=1) yesterday = today - datetime.timedelta(days=1) first = (self.most_recent_year_harvested or datetime.datetime(2000, 1, 1)).date() first = min(first, yesterday) if self.id_old in ['citeseerx.ist.psu.edu/oai2', 'europepmc.org/oai.cgi']: first_plus_delta = first + datetime.timedelta(days=1) elif self.id_old in ['www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi', 'www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi2']: first_plus_delta = first + datetime.timedelta(days=7) elif self.id == '4bd6f8f5107c0df6f48': first_plus_delta = first + datetime.timedelta(days=1) elif self.id == '0d27b133730393e00e1': first_plus_delta = first + datetime.timedelta(days=1) elif self.id == 'jmpfmmfru5pzhy4lbrdm': first_plus_delta = first + datetime.timedelta(days=1) elif self.id_old in ['export.arxiv.org/oai2']: first_plus_delta = first + datetime.timedelta(days=1) elif 'osti.gov/oai' in self.pmh_url: first_plus_delta = first + datetime.timedelta(days=1) elif 'share.osf.io' in self.pmh_url: first_plus_delta = first + datetime.timedelta(days=1) else: first_plus_delta = first + datetime.timedelta(days=7) last = min(first_plus_delta, tomorrow) # now do the harvesting self.call_pmh_endpoint(first=first, last=last) # if success, update so we start at next point next time base_retry_interval = datetime.timedelta(minutes=5) if self.error: logger.info(u"error so not saving finished info: {}".format(self.error)) retry_interval = self.retry_interval or base_retry_interval self.retry_at = datetime.datetime.utcnow() + retry_interval self.retry_interval = retry_interval * 2 self.last_harvest_started = None else: logger.info(u"success! saving info") self.last_harvest_finished = datetime.datetime.utcnow().isoformat() self.most_recent_year_harvested = min(yesterday, last) self.last_harvest_started = None self.retry_at = None self.retry_interval = base_retry_interval def get_pmh_record(self, record_id): my_sickle = _get_my_sickle(self.pmh_url) pmh_input_record = my_sickle.GetRecord(identifier=record_id, metadataPrefix=self.metadata_prefix) my_pmh_record = pmh_record.PmhRecord() my_pmh_record.populate(self.id, pmh_input_record, metadata_prefix=self.metadata_prefix) my_pmh_record.repo_id = self.id_old # delete once endpoint_id is populated return my_pmh_record def set_identify_and_initial_query(self): if not self.pmh_url: self.harvest_identify_response = u"error, no pmh_url given" return my_sickle = None try: # set timeout quick... if it can't do this quickly, won't be good for harvesting logger.debug(u"getting my_sickle for {}".format(self)) my_sickle = _get_my_sickle(self.pmh_url, timeout=10) my_sickle.Identify() self.harvest_identify_response = "SUCCESS!" except Exception as e: logger.exception(u"in set_identify_and_initial_query") self.error = u"error in calling identify: {} {}".format( e.__class__.__name__, unicode(e.message).encode("utf-8")) if my_sickle: self.error += u" calling {}".format(my_sickle.get_http_response_url()) self.harvest_identify_response = self.error self.sample_pmh_record = None try: sample_pmh_record = self.get_recent_pmh_record() if sample_pmh_record: self.harvest_test_recent_dates = "SUCCESS!" self.sample_pmh_record = json.dumps(sample_pmh_record.metadata) else: self.harvest_test_recent_dates = "error, no pmh_input_records returned" except Exception as e: self.error = u"error in get_recent_pmh_record: {} {}".format( e.__class__.__name__, unicode(e.message).encode("utf-8")) self.harvest_test_recent_dates = self.error def get_recent_pmh_record(self): last = datetime.datetime.utcnow() first = last - datetime.timedelta(days=30) args = {'metadataPrefix': self.metadata_prefix} my_sickle = _get_my_sickle(self.pmh_url) logger.info(u"connected to sickle with {}".format(self.pmh_url)) args['from'] = first.isoformat()[0:10] args["until"] = last.isoformat()[0:10] if self.pmh_set: args["set"] = self.pmh_set logger.info(u"calling ListIdentifiers with {} {}".format(self.pmh_url, args)) try: pmh_identifiers = my_sickle.ListIdentifiers(ignore_deleted=True, **args) pmh_identifier = self.safe_get_next_record(pmh_identifiers) if pmh_identifier: return my_sickle.GetRecord(identifier=pmh_identifier.identifier, metadataPrefix=self.metadata_prefix) else: return None except NoRecordsMatch: logger.info(u"no records with {} {}".format(self.pmh_url, args)) return None def get_pmh_input_record(self, first, last, use_date_default_format=True): args = {'metadataPrefix': self.metadata_prefix} pmh_records = [] self.error = None my_sickle = _get_my_sickle(self.pmh_url) logger.info(u"connected to sickle with {}".format(self.pmh_url)) args['from'] = first.isoformat()[0:10] if not use_date_default_format: args['from'] += "T00:00:00Z" if last: args["until"] = last.isoformat()[0:10] if not use_date_default_format: args['until'] += "T00:00:00Z" if self.pmh_set: args["set"] = self.pmh_set logger.info(u"calling ListRecords with {} {}".format(self.pmh_url, args)) try: try: pmh_records = my_sickle.ListRecords(ignore_deleted=True, **args) pmh_input_record = self.safe_get_next_record(pmh_records) except NoRecordsMatch: logger.info(u"no records with {} {}".format(self.pmh_url, args)) pmh_input_record = None except BadArgument as e: if use_date_default_format: return self.get_pmh_input_record(first, last, use_date_default_format=False) else: raise e except Exception as e: logger.exception(u"error with {} {}".format(self.pmh_url, args)) pmh_input_record = None self.error = u"error in get_pmh_input_record: {} {}".format( e.__class__.__name__, unicode(e.message).encode("utf-8")) if my_sickle: self.error += u" calling {}".format(my_sickle.get_http_response_url()) return pmh_input_record, pmh_records, self.error def call_pmh_endpoint(self, first=None, last=None, chunk_size=50, scrape=False): start_time = time() records_to_save = [] num_records_updated = 0 loop_counter = 0 self.error = None (pmh_input_record, pmh_records, error) = self.get_pmh_input_record(first, last) if error: self.error = u"error in get_pmh_input_record: {}".format(error) return while pmh_input_record: loop_counter += 1 # create the record my_pmh_record = pmh_record.PmhRecord() # set its vars my_pmh_record.repo_id = self.id_old # delete once endpoint_ids are all populated my_pmh_record.rand = random() my_pmh_record.populate(self.id, pmh_input_record, metadata_prefix=self.metadata_prefix) if is_complete(my_pmh_record): my_pages = my_pmh_record.mint_pages() my_pmh_record.pages = my_pages if scrape: for my_page in my_pages: my_page.scrape_if_matches_pub() records_to_save.append(my_pmh_record) my_pmh_record.delete_old_record() db.session.merge(my_pmh_record) else: logger.info(u"pmh record is not complete") # print my_pmh_record pass if len(records_to_save) >= chunk_size: num_records_updated += len(records_to_save) safe_commit(db) records_to_save = [] if loop_counter % 100 == 0: logger.info(u"iterated through 100 more items, loop_counter={} for {}".format(loop_counter, self.id)) pmh_input_record = self.safe_get_next_record(pmh_records) # make sure to get the last ones if records_to_save: num_records_updated += len(records_to_save) last_record = records_to_save[-1] logger.info(u"saving {} last ones, last record saved: {} for {}, loop_counter={}".format( len(records_to_save), last_record.id, self.id, loop_counter)) safe_commit(db) else: logger.info(u"finished loop, but no records to save, loop_counter={}".format(loop_counter)) logger.info(u"updated {} PMH records for endpoint_id={}, took {} seconds".format( num_records_updated, self.id, elapsed(start_time, 2))) def safe_get_next_record(self, current_record, tries=3): self.error = None try: next_record = current_record.next() except (requests.exceptions.HTTPError, requests.exceptions.SSLError) as e: if tries > 0: logger.info(u"requests exception! trying again {}".format(e)) return self.safe_get_next_record(current_record, tries-1) else: logger.info(u"requests exception! skipping {}".format(e)) self.error = u"requests error in safe_get_next_record; try again" return None except (KeyboardInterrupt, SystemExit): # done return None except StopIteration: logger.info(u"stop iteration! stopping") return None except Exception as e: logger.exception(u"misc exception!: {} skipping".format(e)) self.error = u"error in safe_get_next_record" return None return next_record def get_num_pmh_records(self): from pmh_record import PmhRecord num = db.session.query(PmhRecord.id).filter(PmhRecord.endpoint_id == self.id).count() return num def get_num_pages(self): from page import PageNew num = db.session.query(PageNew.id).filter(PageNew.endpoint_id == self.id).count() return num def get_num_open_with_dois(self): from page import PageNew num = db.session.query(PageNew.id).\ distinct(PageNew.normalized_title).\ filter(PageNew.endpoint_id == self.id).\ filter(PageNew.num_pub_matches.isnot(None), PageNew.num_pub_matches >= 1).\ filter(or_(PageNew.scrape_pdf_url.isnot(None), PageNew.scrape_metadata_url.isnot(None))).\ count() return num def get_num_title_matching_dois(self): from page import PageNew num = db.session.query(PageNew.id).\ distinct(PageNew.normalized_title).\ filter(PageNew.endpoint_id == self.id).\ filter(PageNew.num_pub_matches.isnot(None), PageNew.num_pub_matches >= 1).\ count() return num def get_open_pages(self, limit=10): from page import PageNew pages = db.session.query(PageNew).\ distinct(PageNew.normalized_title).\ filter(PageNew.endpoint_id == self.id).\ filter(PageNew.num_pub_matches.isnot(None), PageNew.num_pub_matches >= 1).\ filter(or_(PageNew.scrape_pdf_url.isnot(None), PageNew.scrape_metadata_url.isnot(None))).\ limit(limit).all() return [(p.id, p.url, p.normalized_title, p.pub.url, p.pub.unpaywall_api_url, p.scrape_version) for p in pages] def get_closed_pages(self, limit=10): from page import PageNew pages = db.session.query(PageNew).\ distinct(PageNew.normalized_title).\ filter(PageNew.endpoint_id == self.id).\ filter(PageNew.num_pub_matches.isnot(None), PageNew.num_pub_matches >= 1).\ filter(PageNew.scrape_updated.isnot(None), PageNew.scrape_pdf_url.is_(None), PageNew.scrape_metadata_url .is_(None)).\ limit(limit).all() return [(p.id, p.url, p.normalized_title, p.pub.url, p.pub.unpaywall_api_url, p.scrape_updated) for p in pages] def get_num_pages_still_processing(self): from page import PageNew num = db.session.query(PageNew.id).filter(PageNew.endpoint_id == self.id, PageNew.num_pub_matches.is_(None)).count() return num def __repr__(self): return u"<Endpoint ( {} ) {}>".format(self.id, self.pmh_url) def to_dict(self): response = { "_endpoint_id": self.id, "_pmh_url": self.pmh_url, "num_pmh_records": self.get_num_pmh_records(), "num_pages": self.get_num_pages(), "num_open_with_dois": self.get_num_open_with_dois(), "num_title_matching_dois": self.get_num_title_matching_dois(), "num_pages_still_processing": self.get_num_pages_still_processing(), "pages_open": u"{}/debug/repo/{}/examples/open".format("http://localhost:5000", self.repo_unique_id), # self.get_open_pages(), "pages_closed": u"{}/debug/repo/{}/examples/closed".format("http://localhost:5000", self.repo_unique_id), # self.get_closed_pages(), "metadata": {} } if self.meta: response.update({ "metadata": { "home_page": self.repo.home_page, "institution_name": self.repo.institution_name, "repository_name": self.repo.repository_name } }) return response def to_dict_status(self): response = { "results": {}, "metadata": {} } for field in ["id", "repo_unique_id", "pmh_url", "email"]: response[field] = getattr(self, field) for field in ["harvest_identify_response", "harvest_test_recent_dates", "sample_pmh_record"]: response["results"][field] = getattr(self, field) if self.meta: for field in ["home_page", "institution_name", "repository_name"]: response["metadata"][field] = getattr(self.meta, field) return response def to_dict_repo_pulse(self): return { "metadata": { "endpoint_id": self.id, "repository_name": self.repo.repository_name, "institution_name": self.repo.institution_name, "pmh_url": self.pmh_url }, "status": { "check0_identify_status": self.harvest_identify_response, "check1_query_status": self.harvest_test_recent_dates, "num_pmh_records": None, "last_harvest": self.most_recent_year_harvested, "num_pmh_records_matching_dois": None, "num_pmh_records_matching_dois_with_fulltext": None }, "by_version_distinct_pmh_records_matching_dois": {} } def test_harvest_url(pmh_url): response = {} temp_endpoint = Endpoint() temp_endpoint.pmh_url = pmh_url temp_endpoint.set_identify_and_initial_query() response["harvest_identify_response"] = temp_endpoint.harvest_identify_response response["sample_pmh_record"] = temp_endpoint.sample_pmh_record response["harvest_test_recent_dates"] = temp_endpoint.harvest_test_recent_dates return response def is_complete(record): if not record.pmh_id: return False if not record.title: return False if not record.urls: return False if record.oa == "0": logger.info(u"record {} is closed access. skipping.".format(record["id"])) return False return True class MyOAIItemIterator(OAIItemIterator): def _get_resumption_token(self): """Extract and store the resumptionToken from the last response.""" resumption_token_element = self.oai_response.xml.find( './/' + self.sickle.oai_namespace + 'resumptionToken') if resumption_token_element is None: return None token = resumption_token_element.text cursor = resumption_token_element.attrib.get('cursor', None) complete_list_size = resumption_token_element.attrib.get( 'completeListSize', None) expiration_date = resumption_token_element.attrib.get( 'expirationDate', None) resumption_token = ResumptionToken( token=token, cursor=cursor, complete_list_size=complete_list_size, expiration_date=expiration_date ) return resumption_token def get_complete_list_size(self): """Extract and store the resumptionToken from the last response.""" resumption_token_element = self.oai_response.xml.find( './/' + self.sickle.oai_namespace + 'resumptionToken') if resumption_token_element is None: return None complete_list_size = resumption_token_element.attrib.get( 'completeListSize', None) if complete_list_size: return int(complete_list_size) return complete_list_size class OSTIOAIItemIterator(MyOAIItemIterator): def _next_response(self): """Get the next response from the OAI server. Copy-pasted from OAIItemIterator._next_response but adds metadataPrefix to params. """ params = self.params if self.resumption_token: params = { 'resumptionToken': self.resumption_token.token, 'verb': self.verb, 'metadataPrefix': params.get('metadataPrefix') } self.oai_response = self.sickle.harvest(**params) error = self.oai_response.xml.find( './/' + self.sickle.oai_namespace + 'error') if error is not None: code = error.attrib.get('code', 'UNKNOWN') description = error.text or '' try: raise getattr( oaiexceptions, code[0].upper() + code[1:])(description) except AttributeError: raise oaiexceptions.OAIError(description) self.resumption_token = self._get_resumption_token() self._items = self.oai_response.xml.iterfind( './/' + self.sickle.oai_namespace + self.element) def _get_my_sickle(repo_pmh_url, timeout=120): if not repo_pmh_url: return None proxy_url = None if any(fragment in repo_pmh_url for fragment in ["citeseerx"]): proxy_url = os.getenv("STATIC_IP_PROXY") elif any(fragment in repo_pmh_url for fragment in ["pure.coventry.ac.uk"]): proxy_url = os.getenv("VERY_STATIC_IP_PROXY") if proxy_url: proxies = {"https": proxy_url, "http": proxy_url} else: proxies = {} iterator = OSTIOAIItemIterator if 'osti.gov/oai' in repo_pmh_url else MyOAIItemIterator sickle = EuropePMCSickle if 'europepmc.org' in repo_pmh_url else MySickle my_sickle = sickle(repo_pmh_url, proxies=proxies, timeout=timeout, iterator=iterator) return my_sickle # subclass so we can customize the number of retry seconds class MySickle(Sickle): RETRY_SECONDS = 120 def __init__(self, *args, **kwargs): self.http_response_url = None super(MySickle, self).__init__(*args, **kwargs) def get_http_response_url(self): if hasattr(self, "http_response_url"): return self.http_response_url return None def _massage_http_response(self, http_response): return http_response def harvest(self, **kwargs): # pragma: no cover """Make HTTP requests to the OAI server. :param kwargs: OAI HTTP parameters. :rtype: :class:`sickle.OAIResponse` """ start_time = time() verify = not self.endpoint.startswith(u'https://rcin.org.pl') for _ in range(self.max_retries): if self.http_method == 'GET': payload_str = "&".join("%s=%s" % (k, v) for k, v in kwargs.items()) url_without_encoding = u"{}?{}".format(self.endpoint, payload_str) http_response = requests.get(url_without_encoding, headers=request_ua_headers(), verify=verify, **self.request_args) self.http_response_url = http_response.url else: http_response = requests.post(self.endpoint, headers=request_ua_headers(), data=kwargs, **self.request_args) self.http_response_url = http_response.url if http_response.status_code == 503: retry_after = self.RETRY_SECONDS logger.info("HTTP 503! Retrying after %d seconds..." % retry_after) sleep(retry_after) else: logger.info("took {} seconds to call pmh url: {}".format(elapsed(start_time), http_response.url)) http_response = self._massage_http_response(http_response) http_response.raise_for_status() if self.encoding: http_response.encoding = self.encoding return OAIResponse(http_response, params=kwargs) class EuropePMCSickle(MySickle): def _massage_http_response(self, http_response): # server returns a 404 with NoRecordsMatch responses # treat this as a successful http request and handle the OAI-PMH error further up the stack if http_response.status_code == 404: http_response.status_code = 200 return http_response
mit
-3,842,098,944,671,516,000
39.749599
145
0.596486
false
xavfernandez/pip
tests/functional/test_search.py
2
5507
import logging import pretend import pytest from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS from pip._internal.commands import create_command from pip._internal.commands.search import ( highest_version, print_results, transform_hits, ) from tests.lib import pyversion if pyversion >= '3': VERBOSE_FALSE = False else: VERBOSE_FALSE = 0 def test_version_compare(): """ Test version comparison. """ assert highest_version(['1.0', '2.0', '0.1']) == '2.0' assert highest_version(['1.0a1', '1.0']) == '1.0' def test_pypi_xml_transformation(): """ Test transformation of data structures (PyPI xmlrpc to custom list). """ pypi_hits = [ { 'name': 'foo', 'summary': 'foo summary', 'version': '1.0', }, { 'name': 'foo', 'summary': 'foo summary v2', 'version': '2.0', }, { '_pypi_ordering': 50, 'name': 'bar', 'summary': 'bar summary', 'version': '1.0', }, ] expected = [ { 'versions': ['1.0', '2.0'], 'name': 'foo', 'summary': 'foo summary v2', }, { 'versions': ['1.0'], 'name': 'bar', 'summary': 'bar summary', }, ] assert transform_hits(pypi_hits) == expected @pytest.mark.network def test_basic_search(script): """ End to end test of search command. """ output = script.pip('search', 'pip') assert ( 'The PyPA recommended tool for installing ' 'Python packages.' in output.stdout ) @pytest.mark.network @pytest.mark.skip( reason=("Warehouse search behavior is different and no longer returns " "multiple results. See " "https://github.com/pypa/warehouse/issues/3717 for more " "information."), ) def test_multiple_search(script): """ Test searching for multiple packages at once. """ output = script.pip('search', 'pip', 'INITools') assert ( 'The PyPA recommended tool for installing ' 'Python packages.' in output.stdout ) assert 'Tools for parsing and using INI-style files' in output.stdout def test_search_missing_argument(script): """ Test missing required argument for search """ result = script.pip('search', expect_error=True) assert 'ERROR: Missing required argument (search query).' in result.stderr @pytest.mark.network def test_run_method_should_return_success_when_find_packages(): """ Test SearchCommand.run for found package """ command = create_command('search') cmdline = "--index=https://pypi.org/pypi pip" with command.main_context(): options, args = command.parse_args(cmdline.split()) status = command.run(options, args) assert status == SUCCESS @pytest.mark.network def test_run_method_should_return_no_matches_found_when_does_not_find_pkgs(): """ Test SearchCommand.run for no matches """ command = create_command('search') cmdline = "--index=https://pypi.org/pypi nonexistentpackage" with command.main_context(): options, args = command.parse_args(cmdline.split()) status = command.run(options, args) assert status == NO_MATCHES_FOUND @pytest.mark.network def test_search_should_exit_status_code_zero_when_find_packages(script): """ Test search exit status code for package found """ result = script.pip('search', 'pip') assert result.returncode == SUCCESS @pytest.mark.network def test_search_exit_status_code_when_finds_no_package(script): """ Test search exit status code for no matches """ result = script.pip('search', 'nonexistentpackage', expect_error=True) assert result.returncode == NO_MATCHES_FOUND, result.returncode def test_latest_prerelease_install_message(caplog, monkeypatch): """ Test documentation for installing pre-release packages is displayed """ hits = [ { 'name': 'ni', 'summary': 'For knights who say Ni!', 'versions': ['1.0.0', '1.0.1a'] } ] installed_package = pretend.stub(project_name="ni") monkeypatch.setattr("pip._vendor.pkg_resources.working_set", [installed_package]) dist = pretend.stub(version="1.0.0") get_dist = pretend.call_recorder(lambda x: dist) monkeypatch.setattr("pip._vendor.pkg_resources.get_distribution", get_dist) with caplog.at_level(logging.INFO): print_results(hits) message = caplog.records[-1].getMessage() assert 'pre-release; install with "pip install --pre"' in message assert get_dist.calls == [pretend.call('ni')] def test_search_print_results_should_contain_latest_versions(caplog): """ Test that printed search results contain the latest package versions """ hits = [ { 'name': 'testlib1', 'summary': 'Test library 1.', 'versions': ['1.0.5', '1.0.3'] }, { 'name': 'testlib2', 'summary': 'Test library 1.', 'versions': ['2.0.1', '2.0.3'] } ] with caplog.at_level(logging.INFO): print_results(hits) log_messages = sorted([r.getMessage() for r in caplog.records]) assert log_messages[0].startswith('testlib1 (1.0.5)') assert log_messages[1].startswith('testlib2 (2.0.3)')
mit
6,978,525,318,228,855,000
26.262376
79
0.595606
false
kbrebanov/ansible
lib/ansible/modules/monitoring/honeybadger_deployment.py
49
3829
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2014 Benjamin Curtis <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: honeybadger_deployment author: "Benjamin Curtis (@stympy)" version_added: "2.2" short_description: Notify Honeybadger.io about app deployments description: - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking) options: token: description: - API token. required: true environment: description: - The environment name, typically 'production', 'staging', etc. required: true user: description: - The username of the person doing the deployment required: false default: None repo: description: - URL of the project repository required: false default: None revision: description: - A hash, number, tag, or other identifier showing what revision was deployed required: false default: None url: description: - Optional URL to submit the notification to. required: false default: "https://api.honeybadger.io/v1/deploys" validate_certs: description: - If C(no), SSL certificates for the target url will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: 'yes' choices: ['yes', 'no'] requirements: [] ''' EXAMPLES = ''' - honeybadger_deployment: token: AAAAAA environment: staging user: ansible revision: b6826b8 repo: '[email protected]:user/repo.git' ''' RETURN = '''# ''' import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils._text import to_native from ansible.module_utils.urls import fetch_url # =========================================== # Module execution. # def main(): module = AnsibleModule( argument_spec=dict( token=dict(required=True, no_log=True), environment=dict(required=True), user=dict(required=False), repo=dict(required=False), revision=dict(required=False), url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'), validate_certs=dict(default='yes', type='bool'), ), supports_check_mode=True ) params = {} if module.params["environment"]: params["deploy[environment]"] = module.params["environment"] if module.params["user"]: params["deploy[local_username]"] = module.params["user"] if module.params["repo"]: params["deploy[repository]"] = module.params["repo"] if module.params["revision"]: params["deploy[revision]"] = module.params["revision"] params["api_key"] = module.params["token"] url = module.params.get('url') # If we're in check mode, just exit pretending like we succeeded if module.check_mode: module.exit_json(changed=True) try: data = urlencode(params) response, info = fetch_url(module, url, data=data) except Exception as e: module.fail_json(msg='Unable to notify Honeybadger: %s' % to_native(e), exception=traceback.format_exc()) else: if info['status'] == 201: module.exit_json(changed=True) else: module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) if __name__ == '__main__': main()
gpl-3.0
6,300,777,613,414,374,000
27.154412
113
0.640376
false
youdonghai/intellij-community
python/lib/Lib/posixfile.py
87
7843
"""Extended file operations available in POSIX. f = posixfile.open(filename, [mode, [bufsize]]) will create a new posixfile object f = posixfile.fileopen(fileobject) will create a posixfile object from a builtin file object f.file() will return the original builtin file object f.dup() will return a new file object based on a new filedescriptor f.dup2(fd) will return a new file object based on the given filedescriptor f.flags(mode) will turn on the associated flag (merge) mode can contain the following characters: (character representing a flag) a append only flag c close on exec flag n no delay flag s synchronization flag (modifiers) ! turn flags 'off' instead of default 'on' = copy flags 'as is' instead of default 'merge' ? return a string in which the characters represent the flags that are set note: - the '!' and '=' modifiers are mutually exclusive. - the '?' modifier will return the status of the flags after they have been changed by other characters in the mode string f.lock(mode [, len [, start [, whence]]]) will (un)lock a region mode can contain the following characters: (character representing type of lock) u unlock r read lock w write lock (modifiers) | wait until the lock can be granted ? return the first lock conflicting with the requested lock or 'None' if there is no conflict. The lock returned is in the format (mode, len, start, whence, pid) where mode is a character representing the type of lock ('r' or 'w') note: - the '?' modifier prevents a region from being locked; it is query only """ class _posixfile_: """File wrapper class that provides extra POSIX file routines.""" states = ['open', 'closed'] # # Internal routines # def __repr__(self): file = self._file_ return "<%s posixfile '%s', mode '%s' at %s>" % \ (self.states[file.closed], file.name, file.mode, \ hex(id(self))[2:]) # # Initialization routines # def open(self, name, mode='r', bufsize=-1): import __builtin__ return self.fileopen(__builtin__.open(name, mode, bufsize)) def fileopen(self, file): import types if repr(type(file)) != "<type 'file'>": raise TypeError, 'posixfile.fileopen() arg must be file object' self._file_ = file # Copy basic file methods for maybemethod in dir(file): if not maybemethod.startswith('_'): attr = getattr(file, maybemethod) if isinstance(attr, types.BuiltinMethodType): setattr(self, maybemethod, attr) return self # # New methods # def file(self): return self._file_ def dup(self): import posix if not hasattr(posix, 'fdopen'): raise AttributeError, 'dup() method unavailable' return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode) def dup2(self, fd): import posix if not hasattr(posix, 'fdopen'): raise AttributeError, 'dup() method unavailable' posix.dup2(self._file_.fileno(), fd) return posix.fdopen(fd, self._file_.mode) def flags(self, *which): import fcntl, os if which: if len(which) > 1: raise TypeError, 'Too many arguments' which = which[0] else: which = '?' l_flags = 0 if 'n' in which: l_flags = l_flags | os.O_NDELAY if 'a' in which: l_flags = l_flags | os.O_APPEND if 's' in which: l_flags = l_flags | os.O_SYNC file = self._file_ if '=' not in which: cur_fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0) if '!' in which: l_flags = cur_fl & ~ l_flags else: l_flags = cur_fl | l_flags l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFL, l_flags) if 'c' in which: arg = ('!' not in which) # 0 is don't, 1 is do close on exec l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFD, arg) if '?' in which: which = '' # Return current flags l_flags = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0) if os.O_APPEND & l_flags: which = which + 'a' if fcntl.fcntl(file.fileno(), fcntl.F_GETFD, 0) & 1: which = which + 'c' if os.O_NDELAY & l_flags: which = which + 'n' if os.O_SYNC & l_flags: which = which + 's' return which def lock(self, how, *args): import struct, fcntl if 'w' in how: l_type = fcntl.F_WRLCK elif 'r' in how: l_type = fcntl.F_RDLCK elif 'u' in how: l_type = fcntl.F_UNLCK else: raise TypeError, 'no type of lock specified' if '|' in how: cmd = fcntl.F_SETLKW elif '?' in how: cmd = fcntl.F_GETLK else: cmd = fcntl.F_SETLK l_whence = 0 l_start = 0 l_len = 0 if len(args) == 1: l_len = args[0] elif len(args) == 2: l_len, l_start = args elif len(args) == 3: l_len, l_start, l_whence = args elif len(args) > 3: raise TypeError, 'too many arguments' # Hack by [email protected] to get locking to go on freebsd; # additions for AIX by [email protected] import sys, os if sys.platform in ('netbsd1', 'openbsd2', 'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'bsdos2', 'bsdos3', 'bsdos4'): flock = struct.pack('lxxxxlxxxxlhh', \ l_start, l_len, os.getpid(), l_type, l_whence) elif sys.platform in ('aix3', 'aix4'): flock = struct.pack('hhlllii', \ l_type, l_whence, l_start, l_len, 0, 0, 0) else: flock = struct.pack('hhllhh', \ l_type, l_whence, l_start, l_len, 0, 0) flock = fcntl.fcntl(self._file_.fileno(), cmd, flock) if '?' in how: if sys.platform in ('netbsd1', 'openbsd2', 'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5', 'bsdos2', 'bsdos3', 'bsdos4'): l_start, l_len, l_pid, l_type, l_whence = \ struct.unpack('lxxxxlxxxxlhh', flock) elif sys.platform in ('aix3', 'aix4'): l_type, l_whence, l_start, l_len, l_sysid, l_pid, l_vfs = \ struct.unpack('hhlllii', flock) elif sys.platform == "linux2": l_type, l_whence, l_start, l_len, l_pid, l_sysid = \ struct.unpack('hhllhh', flock) else: l_type, l_whence, l_start, l_len, l_sysid, l_pid = \ struct.unpack('hhllhh', flock) if l_type != fcntl.F_UNLCK: if l_type == fcntl.F_RDLCK: return 'r', l_len, l_start, l_whence, l_pid else: return 'w', l_len, l_start, l_whence, l_pid def open(name, mode='r', bufsize=-1): """Public routine to open a file as a posixfile object.""" return _posixfile_().open(name, mode, bufsize) def fileopen(file): """Public routine to get a posixfile object from a Python file object.""" return _posixfile_().fileopen(file) # # Constants # SEEK_SET = 0 SEEK_CUR = 1 SEEK_END = 2 # # End of posixfile.py #
apache-2.0
-2,616,030,543,840,185,000
32.374468
79
0.531429
false
Distrotech/qtwebkit
Tools/Scripts/webkitpy/w3c/test_importer_unittest.py
115
3011
#!/usr/bin/env python # Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, # OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. import optparse import shutil import tempfile import unittest2 as unittest from webkitpy.common.host_mock import MockHost from webkitpy.common.system.filesystem_mock import MockFileSystem from webkitpy.common.system.executive_mock import MockExecutive2, ScriptError from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.w3c.test_importer import TestImporter FAKE_SOURCE_DIR = '/blink/w3c' FAKE_REPO_DIR = '/blink' FAKE_FILES = { '/blink/w3c/empty_dir/README.txt': '', '/mock-checkout/LayoutTests/w3c/README.txt': '', } class TestImporterTest(unittest.TestCase): def test_import_dir_with_no_tests_and_no_hg(self): host = MockHost() host.executive = MockExecutive2(exception=OSError()) host.filesystem = MockFileSystem(files=FAKE_FILES) importer = TestImporter(host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values({"overwrite": False})) oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output() def test_import_dir_with_no_tests(self): host = MockHost() host.executive = MockExecutive2(exception=ScriptError("abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!")) host.filesystem = MockFileSystem(files=FAKE_FILES) importer = TestImporter(host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values({"overwrite": False})) oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output() # FIXME: Needs more tests.
lgpl-3.0
-5,907,856,606,055,863,000
37.113924
164
0.724676
false
jaehyuk/High-Frequency-Trading-Model-with-IB
params/ib_data_types.py
7
1835
""" Author: James Ma Email stuff here: [email protected] """ """ API doumentation: https://www.interactivebrokers.com/en/software/api/apiguide/java/reqhistoricaldata.htm https://www.interactivebrokers.com/en/software/api/apiguide/tables/tick_types.htm """ FIELD_BID_SIZE = 0 FIELD_BID_PRICE = 1 FIELD_ASK_PRICE = 2 FIELD_ASK_SIZE = 3 FIELD_LAST_PRICE = 4 FIELD_LAST_SIZE = 5 FIELD_HIGH = 6 FIELD_LOW = 7 FIELD_VOLUME = 8 FIELD_CLOSE_PRICE = 9 FIELD_AVG_VOLUME = 21 FIELD_BID_EXCH = 32 FIELD_ASK_EXCH = 33 FIELD_AUCTION_VOLUME = 34 FIELD_AUCTION_PRICE = 35 FIELD_LAST_TIMESTAMP = 45 FIELD_HALTED = 49 FIELD_TRADE_COUNT = 54 FIELD_TRADE_RATE = 55 FIELD_VOLUME_RATE = 56 FIELD_HALTED_NOT_HALTED = 0 FIELD_HALTED_IS_HALTED = 1 FIELD_HALTED_BY_VOLATILITY = 2 DURATION_1_HR = '3600 S' DURATION_1_MIN = "60 S" DURATION_1_DAY = '1 D' BAR_SIZE_5_SEC = '5 secs' BAR_SIZE_1_MIN = '1 min' RTH_ALL = 0 RTH_ONLY_TRADING_HRS = 1 WHAT_TO_SHOW_TRADES = "TRADES" WHAT_TO_SHOW_MID_PT = "MIDPOINT" WHAT_TO_SHOW_BID = "BID" WHAT_TO_SHOW_ASK = "ASK" WHAT_TO_SHOW_BID_ASK = "BID_ASK" WHAT_TO_SHOW_HVOL = "HISTORICAL_VOLATILITY" WHAT_TO_SHOW_OPT_IMPV = "OPTION_IMPLIED_VOLATILITY" DATEFORMAT_STRING = 1 DATEFORMAT_UNIX_TS = 2 MSG_TYPE_HISTORICAL_DATA = "historicalData" MSG_TYPE_UPDATE_PORTFOLIO = "updatePortfolio" MSG_TYPE_MANAGED_ACCOUNTS = "managedAccounts" MSG_TYPE_NEXT_ORDER_ID = "nextValidId" MSG_TYPE_TICK_PRICE = "tickPrice" MSG_TYPE_TICK_STRING = "tickString" MSG_TYPE_STICK_SIZE = "tickSize" DATE_TIME_FORMAT = "%Y%m%d %H:%M:%S" DATE_TIME_FORMAT_LONG = "%Y-%m-%d %H:%M:%S" DATE_TIME_FORMAT_LONG_MILLISECS = "%Y-%m-%d %H:%M:%S.%f" GENERIC_TICKS_NONE = '' GENERIC_TICKS_RTVOLUME = "233" SNAPSHOT_NONE = False SNAPSHOT_TRUE = True ORDER_TYPE_MARKET = "MKT" ORDER_TYPE_LIMIT = "LMT" ORDER_ACTION_SELL = "SELL" ORDER_ACTION_BUY = "BUY"
mit
5,982,893,194,443,235,000
21.9375
86
0.711172
false
arante/pyloc
microblog/flask/lib/python3.5/site-packages/whoosh/support/base85.py
95
2473
""" This module contains generic base85 encoding and decoding functions. The whoosh.util.numeric module contains faster variants for encoding and decoding integers. Modified from: http://paste.lisp.org/display/72815 """ import struct from whoosh.compat import xrange # Instead of using the character set from the ascii85 algorithm, I put the # characters in order so that the encoded text sorts properly (my life would be # a lot easier if they had just done that from the start) b85chars = ("!$%&*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ" "^_abcdefghijklmnopqrstuvwxyz{|}~") b85dec = {} for i in range(len(b85chars)): b85dec[b85chars[i]] = i # Integer encoding and decoding functions def to_base85(x, islong=False): "Encodes the given integer using base 85." size = 10 if islong else 5 rems = "" for i in xrange(size): rems = b85chars[x % 85] + rems x //= 85 return rems def from_base85(text): "Decodes the given base 85 text into an integer." acc = 0 for c in text: acc = acc * 85 + b85dec[c] return acc # Bytes encoding and decoding functions def b85encode(text, pad=False): l = len(text) r = l % 4 if r: text += '\0' * (4 - r) longs = len(text) >> 2 out = [] words = struct.unpack('>' + 'L' * longs, text[0:longs * 4]) for word in words: rems = [0, 0, 0, 0, 0] for i in range(4, -1, -1): rems[i] = b85chars[word % 85] word /= 85 out.extend(rems) out = ''.join(out) if pad: return out # Trim padding olen = l % 4 if olen: olen += 1 olen += l / 4 * 5 return out[0:olen] def b85decode(text): l = len(text) out = [] for i in range(0, len(text), 5): chunk = text[i:i + 5] acc = 0 for j in range(len(chunk)): try: acc = acc * 85 + b85dec[chunk[j]] except KeyError: raise TypeError('Bad base85 character at byte %d' % (i + j)) if acc > 4294967295: raise OverflowError('Base85 overflow in hunk starting at byte %d' % i) out.append(acc) # Pad final chunk if necessary cl = l % 5 if cl: acc *= 85 ** (5 - cl) if cl > 1: acc += 0xffffff >> (cl - 2) * 8 out[-1] = acc out = struct.pack('>' + 'L' * ((l + 4) / 5), *out) if cl: out = out[:-(5 - cl)] return out
gpl-3.0
-2,040,205,077,659,435,800
23.009709
82
0.556814
false
freedesktop-unofficial-mirror/gstreamer__sdk__cerbero
cerbero/ide/xcode/fwlib.py
13
8433
#!/usr/bin/env python # cerbero - a multi-platform build system for Open Source software # Copyright (C) 2012 Andoni Morales Alastruey <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Library General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Library General Public License for more details. # # You should have received a copy of the GNU Library General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., 59 Temple Place - Suite 330, # Boston, MA 02111-1307, USA. import os import tempfile import shutil from collections import defaultdict from cerbero.config import Architecture from cerbero.ide.pkgconfig import PkgConfig from cerbero.utils import shell from cerbero.utils import messages as m class FrameworkLibrary(object): ''' Combine several shared library into a single shared library to be used as a Framework. The complete list of shared libraries needed are guessed with pkg-config but full paths can be used too with use_pkgconfig=False ''' def __init__(self, libname, install_name, libraries, arch): self.libname = libname self.install_name = install_name self.libraries = libraries self.arch = arch self.use_pkgconfig = True self.universal_archs = None def create(self): if self.arch == Architecture.X86: self.arch = 'i386' if self.use_pkgconfig: libraries = self._libraries_paths(self.libraries) else: libraries = self.libraries self._create_framework_library(libraries) def _libraries_paths(self, libraries): pkgconfig = PkgConfig(libraries) libdirs = pkgconfig.libraries_dirs() libs = pkgconfig.libraries() libspaths = [] for lib in libs: for libdir in libdirs: libpath = os.path.join(libdir, self._get_lib_file_name (lib)) if not os.path.exists(libpath): continue libspaths.append(os.path.realpath(libpath)) break return libspaths def _create_framework_library(self, libraries): raise NotImplemented def _get_lib_file_name(self, lib): return lib class DynamicFrameworkLibrary(FrameworkLibrary): def _create_framework_library(self, libraries): libraries = ' '.join(['-Wl,-reexport_library %s' % x for x in libraries]) shell.call('clang -dynamiclib -o %s -arch %s -install_name %s %s' % (self.libname, self.arch, self.install_name, libraries)) def _get_lib_file_name(self, lib): return 'lib%s.dylib' % lib class StaticFrameworkLibrary(FrameworkLibrary): def _get_lib_file_name(self, lib): return 'lib%s.a' % lib def _split_static_lib(self, lib, thin_arch=None): '''Splits the static lib @lib into its object files Splits the static lib @lib into its object files and returns a new temporary directory where the .o files should be found. if @thin_arch was provided, it considers the @lib to be a fat binary and takes its thin version for the @thin_arch specified before retrieving the object files. ''' lib_tmpdir = tempfile.mkdtemp() shutil.copy(lib, lib_tmpdir) tmplib = os.path.join(lib_tmpdir, os.path.basename(lib)) if thin_arch: #should be a fat file, split only to the arch we want newname = '%s_%s' % (thin_arch, os.path.basename(lib)) shell.call('lipo %s -thin %s -output %s' % (tmplib, thin_arch, newname), lib_tmpdir) tmplib = os.path.join (lib_tmpdir, newname) shell.call('ar -x %s' % tmplib, lib_tmpdir) # object files with the same name in an archive are overwritten # when they are extracted. osx's ar does not support the N count # modifier so after extracting all the files we remove them from # the archive to extract those with duplicated names. # eg: # ar t libavcodec.a -> mlpdsp.o mlpdsp.o (2 objects with the same name) # ar d libavcodec.a mlpdsp.o (we remove the first one) # ar t libavcodec.a -> mlpdsp.o (we only the second one now) files = shell.check_call('ar -t %s' % tmplib, lib_tmpdir).split('\n') # FIXME: We should use collections.Count but it's only available in # python 2.7+ dups = defaultdict(int) for f in files: dups[f] += 1 for f in dups: if dups[f] <= 1: continue for x in range(dups[f]): path = os.path.join(lib_tmpdir, f) new_path = os.path.join(lib_tmpdir, 'dup%d_' % x + f) # The duplicated overwrote the first one, so extract it again shell.call('ar -x %s %s' % (tmplib, f), lib_tmpdir) shutil.move (path, new_path) shell.call('ar -d %s %s' % (tmplib, f), lib_tmpdir) return lib_tmpdir def _check_duplicated_symbols(self, files, tmpdir): for f in files: syms = defaultdict(list) symbols = shell.check_call('nm -UA %s' % f, tmpdir).split('\n') # nm output is: test.o: 00000000 T _gzwrite # (filename, address, symbol type, symbols_name) for s in symbols: s = s.split(' ') if len(s) == 4 and s[2] == 'T': syms[s[3]].append(s) dups = {} for k,v in syms.iteritems(): if len(v) > 1: dups[k] = v if dups: m.warning ("The static library contains duplicated symbols") for k, v in dups.iteritems(): m.message (k) # symbol name for l in v: m.message (" %s" % l[0]) # file def _create_framework_library(self, libraries): tmpdir = tempfile.mkdtemp() libname = os.path.basename (self.libname) # just to make sure if self.arch == Architecture.UNIVERSAL: archs = self.universal_archs else: archs = [self.arch] archs = [a if a != Architecture.X86 else 'i386' for a in archs] for thin_arch in archs: object_files_md5 = [] shell.call ('mkdir -p %s' % thin_arch, tmpdir) tmpdir_thinarch = os.path.join(tmpdir, thin_arch) for lib in libraries: libprefix = os.path.split(lib)[-1].replace('.', '_') if len(archs) > 1: #should be a fat file, split only to the arch we want libprefix += '_%s_' % thin_arch lib_tmpdir = self._split_static_lib(lib, thin_arch) else: lib_tmpdir = self._split_static_lib(lib) obj_files = shell.ls_files(['*.o'], lib_tmpdir) for obj_f in obj_files: obj_path = os.path.join(lib_tmpdir, obj_f) md5 = shell.check_call('md5 -q %s' % obj_path).split('\n')[0] md5 = '%s-%s' % (md5, os.path.getsize(obj_path)) if md5 not in object_files_md5: shell.call('cp %s %s' % (obj_path, '%s-%s' % (libprefix, obj_f)), tmpdir_thinarch) shell.call('ar -cqS %s %s-%s' % (libname, libprefix, obj_f), tmpdir_thinarch) object_files_md5.append(md5) shutil.rmtree(lib_tmpdir) shell.call('ar -s %s' % (libname), tmpdir_thinarch) files = [os.path.join(tmpdir, arch, libname) for arch in archs] self._check_duplicated_symbols(files, tmpdir) if len(archs) > 1: #merge the final libs into a fat file again shell.call('lipo %s -create -output %s' % (' '.join(files), self.install_name), tmpdir) else: shell.call('cp %s %s' % (os.path.join(tmpdir, self.arch, libname), self.install_name), tmpdir) shutil.rmtree(tmpdir)
lgpl-2.1
-7,344,319,975,056,721,000
39.936893
106
0.583304
false
robertostling/hnmt
hnmt/bleu.py
1
3963
#!/usr/bin/env python3 """calculate BLEU scores script taken from https://github.com/vikasnar/Bleu and adjusted by Jörg Tiedemann """ import sys import codecs import os import math import operator import json import functools def fetch_data(cand, ref): """ Store each reference and candidate sentences as a list """ references = [] if os.path.isdir(ref): for root, dirs, files in os.walk(ref): for f in files: reference_file = codecs.open(os.path.join(root, f), 'r', 'utf-8') references.append(reference_file.readlines()) else: reference_file = codecs.open(ref, 'r', 'utf-8') references.append(reference_file.readlines()) candidate_file = codecs.open(cand, 'r', 'utf-8') candidate = candidate_file.readlines() return candidate, references def count_ngram(candidate, references, n, lowercase): clipped_count = 0 count = 0 r = 0 c = 0 for si in range(len(candidate)): # Calculate precision for each sentence ref_counts = [] ref_lengths = [] # Build dictionary of ngram counts for reference in references: ref_sentence = reference[si] ngram_d = {} words = ref_sentence.strip().split() ref_lengths.append(len(words)) limits = len(words) - n + 1 # loop through the sentance consider the ngram length for i in range(limits): ngram = ' '.join(words[i:i+n]) if lowercase: ngram = ngram.lower() if ngram in ngram_d.keys(): ngram_d[ngram] += 1 else: ngram_d[ngram] = 1 ref_counts.append(ngram_d) # candidate cand_sentence = candidate[si] cand_dict = {} words = cand_sentence.strip().split() limits = len(words) - n + 1 for i in range(0, limits): ngram = ' '.join(words[i:i + n]) if lowercase: ngram = ngram.lower() if ngram in cand_dict: cand_dict[ngram] += 1 else: cand_dict[ngram] = 1 clipped_count += clip_count(cand_dict, ref_counts) count += limits r += best_length_match(ref_lengths, len(words)) c += len(words) if clipped_count == 0: pr = 0 else: pr = float(clipped_count) / count bp = brevity_penalty(c, r) return pr, bp def clip_count(cand_d, ref_ds): """Count the clip count for each ngram considering all references""" count = 0 for m in cand_d.keys(): m_w = cand_d[m] m_max = 0 for ref in ref_ds: if m in ref: m_max = max(m_max, ref[m]) m_w = min(m_w, m_max) count += m_w return count def best_length_match(ref_l, cand_l): """Find the closest length of reference to that of candidate""" least_diff = abs(cand_l-ref_l[0]) best = ref_l[0] for ref in ref_l: if abs(cand_l-ref) < least_diff: least_diff = abs(cand_l-ref) best = ref return best def brevity_penalty(c, r): if c > r: bp = 1 elif c == 0: bp = 0 else: bp = math.exp(1-(float(r)/c)) return bp def geometric_mean(precisions): return (functools.reduce(operator.mul, precisions)) ** (1.0 / len(precisions)) def BLEU(candidate, references, lowercase=False): precisions = [] for i in range(4): pr, bp = count_ngram(candidate, references, i+1, lowercase) precisions.append(pr) bleu = geometric_mean(precisions) * bp return bleu, precisions[0], precisions[1], precisions[2], precisions[3], bp if __name__ == "__main__": candidate, references = fetch_data(sys.argv[1], sys.argv[2]) bleu = BLEU(candidate, references) print('BLEU = %.4f (%.3f, %.3f, %.3f, %.3f, BP = %.3f)' % (bleu))
gpl-3.0
-7,621,562,214,709,178,000
27.919708
82
0.551237
false
philsch/ansible
lib/ansible/modules/cloud/cloudstack/cs_loadbalancer_rule_member.py
11
10209
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, Darren Worrall <[email protected]> # (c) 2015, René Moser <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cs_loadbalancer_rule_member short_description: Manages load balancer rule members on Apache CloudStack based clouds. description: - Add and remove load balancer rule members. version_added: '2.0' author: - "Darren Worrall (@dazworrall)" - "René Moser (@resmo)" options: name: description: - The name of the load balancer rule. required: true ip_address: description: - Public IP address from where the network traffic will be load balanced from. - Only needed to find the rule if C(name) is not unique. required: false default: null aliases: [ 'public_ip' ] vms: description: - List of VMs to assign to or remove from the rule. required: true aliases: [ 'vm' ] state: description: - Should the VMs be present or absent from the rule. required: false default: 'present' choices: [ 'present', 'absent' ] project: description: - Name of the project the firewall rule is related to. required: false default: null domain: description: - Domain the rule is related to. required: false default: null account: description: - Account the rule is related to. required: false default: null zone: description: - Name of the zone in which the rule should be located. - If not set, default zone is used. required: false default: null extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' # Add VMs to an existing load balancer - local_action: module: cs_loadbalancer_rule_member name: balance_http vms: - web01 - web02 # Remove a VM from an existing load balancer - local_action: module: cs_loadbalancer_rule_member name: balance_http vms: - web01 - web02 state: absent # Rolling upgrade of hosts - hosts: webservers serial: 1 pre_tasks: - name: Remove from load balancer local_action: module: cs_loadbalancer_rule_member name: balance_http vm: "{{ ansible_hostname }}" state: absent tasks: # Perform update post_tasks: - name: Add to load balancer local_action: module: cs_loadbalancer_rule_member name: balance_http vm: "{{ ansible_hostname }}" state: present ''' RETURN = ''' --- id: description: UUID of the rule. returned: success type: string sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f zone: description: Name of zone the rule is related to. returned: success type: string sample: ch-gva-2 project: description: Name of project the rule is related to. returned: success type: string sample: Production account: description: Account the rule is related to. returned: success type: string sample: example account domain: description: Domain the rule is related to. returned: success type: string sample: example domain algorithm: description: Load balancer algorithm used. returned: success type: string sample: "source" cidr: description: CIDR to forward traffic from. returned: success type: string sample: "" name: description: Name of the rule. returned: success type: string sample: "http-lb" description: description: Description of the rule. returned: success type: string sample: "http load balancer rule" protocol: description: Protocol of the rule. returned: success type: string sample: "tcp" public_port: description: Public port. returned: success type: string sample: 80 private_port: description: Private IP address. returned: success type: string sample: 80 public_ip: description: Public IP address. returned: success type: string sample: "1.2.3.4" vms: description: Rule members. returned: success type: list sample: '[ "web01", "web02" ]' tags: description: List of resource tags associated with the rule. returned: success type: dict sample: '[ { "key": "foo", "value": "bar" } ]' state: description: State of the rule. returned: success type: string sample: "Add" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.cloudstack import ( AnsibleCloudStack, CloudStackException, cs_argument_spec, cs_required_together, ) class AnsibleCloudStackLBRuleMember(AnsibleCloudStack): def __init__(self, module): super(AnsibleCloudStackLBRuleMember, self).__init__(module) self.returns = { 'publicip': 'public_ip', 'algorithm': 'algorithm', 'cidrlist': 'cidr', 'protocol': 'protocol', } # these values will be casted to int self.returns_to_int = { 'publicport': 'public_port', 'privateport': 'private_port', } def get_rule(self): args = self._get_common_args() args.update({ 'name': self.module.params.get('name'), 'zoneid': self.get_zone(key='id') if self.module.params.get('zone') else None, }) if self.module.params.get('ip_address'): args['publicipid'] = self.get_ip_address(key='id') rules = self.cs.listLoadBalancerRules(**args) if rules: if len(rules['loadbalancerrule']) > 1: self.module.fail_json(msg="More than one rule having name %s. Please pass 'ip_address' as well." % args['name']) return rules['loadbalancerrule'][0] return None def _get_common_args(self): return { 'account': self.get_account(key='name'), 'domainid': self.get_domain(key='id'), 'projectid': self.get_project(key='id'), } def _get_members_of_rule(self, rule): res = self.cs.listLoadBalancerRuleInstances(id=rule['id']) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) return res.get('loadbalancerruleinstance', []) def _ensure_members(self, operation): if operation not in ['add', 'remove']: self.module.fail_json(msg="Bad operation: %s" % operation) rule = self.get_rule() if not rule: self.module.fail_json(msg="Unknown rule: %s" % self.module.params.get('name')) existing = {} for vm in self._get_members_of_rule(rule=rule): existing[vm['name']] = vm['id'] wanted_names = self.module.params.get('vms') if operation == 'add': cs_func = self.cs.assignToLoadBalancerRule to_change = set(wanted_names) - set(existing.keys()) else: cs_func = self.cs.removeFromLoadBalancerRule to_change = set(wanted_names) & set(existing.keys()) if not to_change: return rule args = self._get_common_args() vms = self.cs.listVirtualMachines(**args) to_change_ids = [] for name in to_change: for vm in vms.get('virtualmachine', []): if vm['name'] == name: to_change_ids.append(vm['id']) break else: self.module.fail_json(msg="Unknown VM: %s" % name) if to_change_ids: self.result['changed'] = True if to_change_ids and not self.module.check_mode: res = cs_func( id=rule['id'], virtualmachineids=to_change_ids, ) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if poll_async: self.poll_job(res) rule = self.get_rule() return rule def add_members(self): return self._ensure_members('add') def remove_members(self): return self._ensure_members('remove') def get_result(self, rule): super(AnsibleCloudStackLBRuleMember, self).get_result(rule) if rule: self.result['vms'] = [] for vm in self._get_members_of_rule(rule=rule): self.result['vms'].append(vm['name']) return self.result def main(): argument_spec = cs_argument_spec() argument_spec.update(dict( name=dict(required=True), ip_address=dict(aliases=['public_ip']), vms=dict(required=True, aliases=['vm'], type='list'), state=dict(choices=['present', 'absent'], default='present'), zone=dict(), domain=dict(), project=dict(), account=dict(), poll_async=dict(type='bool', default=True), )) module = AnsibleModule( argument_spec=argument_spec, required_together=cs_required_together(), supports_check_mode=True ) try: acs_lb_rule_member = AnsibleCloudStackLBRuleMember(module) state = module.params.get('state') if state in ['absent']: rule = acs_lb_rule_member.remove_members() else: rule = acs_lb_rule_member.add_members() result = acs_lb_rule_member.get_result(rule) except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
-5,395,444,621,496,917,000
26.964384
128
0.616146
false
GeyerA/android_external_chromium_org
tools/gdb/gdb_chrome.py
30
10090
# Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """GDB support for Chrome types. Add this to your gdb by amending your ~/.gdbinit as follows: python import sys sys.path.insert(0, "/path/to/tools/gdb/") import gdb_chrome end This module relies on the WebKit gdb module already existing in your Python path. Use (gdb) p /r any_variable to print |any_variable| without using any printers. """ import datetime import gdb import webkit # When debugging this module, set the below variable to True, and then use # (gdb) python del sys.modules['gdb_chrome'] # (gdb) python import gdb_chrome # to reload. _DEBUGGING = False pp_set = gdb.printing.RegexpCollectionPrettyPrinter("chromium") def typed_ptr(ptr): """Prints a pointer along with its exact type. By default, gdb would print just the address, which takes more steps to interpret. """ # Returning this as a cast expression surrounded by parentheses # makes it easier to cut+paste inside of gdb. return '((%s)%s)' % (ptr.dynamic_type, ptr) class Printer(object): def __init__(self, val): self.val = val class StringPrinter(Printer): def display_hint(self): return 'string' class String16Printer(StringPrinter): def to_string(self): return webkit.ustring_to_string(self.val['_M_dataplus']['_M_p']) pp_set.add_printer( 'string16', '^string16|std::basic_string<(unsigned short|char16|base::char16).*>$', String16Printer); class GURLPrinter(StringPrinter): def to_string(self): return self.val['spec_'] pp_set.add_printer('GURL', '^GURL$', GURLPrinter) class FilePathPrinter(StringPrinter): def to_string(self): return self.val['path_']['_M_dataplus']['_M_p'] pp_set.add_printer('FilePath', '^FilePath$', FilePathPrinter) class SizePrinter(Printer): def to_string(self): return '%sx%s' % (self.val['width_'], self.val['height_']) pp_set.add_printer('gfx::Size', '^gfx::(Size|SizeF|SizeBase<.*>)$', SizePrinter) class PointPrinter(Printer): def to_string(self): return '%s,%s' % (self.val['x_'], self.val['y_']) pp_set.add_printer('gfx::Point', '^gfx::(Point|PointF|PointBase<.*>)$', PointPrinter) class RectPrinter(Printer): def to_string(self): return '%s %s' % (self.val['origin_'], self.val['size_']) pp_set.add_printer('gfx::Rect', '^gfx::(Rect|RectF|RectBase<.*>)$', RectPrinter) class SmartPtrPrinter(Printer): def to_string(self): return '%s%s' % (self.typename, typed_ptr(self.ptr())) class ScopedRefPtrPrinter(SmartPtrPrinter): typename = 'scoped_refptr' def ptr(self): return self.val['ptr_'] pp_set.add_printer('scoped_refptr', '^scoped_refptr<.*>$', ScopedRefPtrPrinter) class LinkedPtrPrinter(SmartPtrPrinter): typename = 'linked_ptr' def ptr(self): return self.val['value_'] pp_set.add_printer('linked_ptr', '^linked_ptr<.*>$', LinkedPtrPrinter) class WeakPtrPrinter(SmartPtrPrinter): typename = 'base::WeakPtr' def ptr(self): flag = ScopedRefPtrPrinter(self.val['ref_']['flag_']).ptr() if flag and flag['is_valid_']: return self.val['ptr_'] return gdb.Value(0).cast(self.val['ptr_'].type) pp_set.add_printer('base::WeakPtr', '^base::WeakPtr<.*>$', WeakPtrPrinter) class CallbackPrinter(Printer): """Callbacks provide no usable information so reduce the space they take.""" def to_string(self): return '...' pp_set.add_printer('base::Callback', '^base::Callback<.*>$', CallbackPrinter) class LocationPrinter(Printer): def to_string(self): return '%s()@%s:%s' % (self.val['function_name_'].string(), self.val['file_name_'].string(), self.val['line_number_']) pp_set.add_printer('tracked_objects::Location', '^tracked_objects::Location$', LocationPrinter) class LockPrinter(Printer): def to_string(self): try: if self.val['owned_by_thread_']: return 'Locked by thread %s' % self.val['owning_thread_id_'] else: return 'Unlocked' except gdb.error: return 'Unknown state' pp_set.add_printer('base::Lock', '^base::Lock$', LockPrinter) class TimeDeltaPrinter(object): def __init__(self, val): self._timedelta = datetime.timedelta(microseconds=int(val['delta_'])) def timedelta(self): return self._timedelta def to_string(self): return str(self._timedelta) pp_set.add_printer('base::TimeDelta', '^base::TimeDelta$', TimeDeltaPrinter) class TimeTicksPrinter(TimeDeltaPrinter): def __init__(self, val): self._timedelta = datetime.timedelta(microseconds=int(val['ticks_'])) pp_set.add_printer('base::TimeTicks', '^base::TimeTicks$', TimeTicksPrinter) class TimePrinter(object): def __init__(self, val): timet_offset = gdb.parse_and_eval( 'base::Time::kTimeTToMicrosecondsOffset') self._datetime = (datetime.datetime.fromtimestamp(0) + datetime.timedelta(microseconds= int(val['us_'] - timet_offset))) def datetime(self): return self._datetime def to_string(self): return str(self._datetime) pp_set.add_printer('base::Time', '^base::Time$', TimePrinter) class IpcMessagePrinter(Printer): def header(self): return self.val['header_'].cast( gdb.lookup_type('IPC::Message::Header').pointer()) def to_string(self): message_type = self.header()['type'] return '%s of kind %s line %s' % ( self.val.dynamic_type, (message_type >> 16).cast(gdb.lookup_type('IPCMessageStart')), message_type & 0xffff) def children(self): yield ('header_', self.header().dereference()) yield ('capacity_', self.val['capacity_']) yield ('variable_buffer_offset_', self.val['variable_buffer_offset_']) for field in self.val.type.fields(): if field.is_base_class: continue yield (field.name, self.val[field.name]) pp_set.add_printer('IPC::Message', '^IPC::Message$', IpcMessagePrinter) class NotificationRegistrarPrinter(Printer): def to_string(self): try: registrations = self.val['registered_'] vector_finish = registrations['_M_impl']['_M_finish'] vector_start = registrations['_M_impl']['_M_start'] if vector_start == vector_finish: return 'Not watching notifications' if vector_start.dereference().type.sizeof == 0: # Incomplete type: b/8242773 return 'Watching some notifications' return ('Watching %s notifications; ' 'print %s->registered_ for details') % ( int(vector_finish - vector_start), typed_ptr(self.val.address)) except gdb.error: return 'NotificationRegistrar' pp_set.add_printer('content::NotificationRegistrar', '^content::NotificationRegistrar$', NotificationRegistrarPrinter) class SiteInstanceImplPrinter(object): def __init__(self, val): self.val = val.cast(val.dynamic_type) def to_string(self): return 'SiteInstanceImpl@%s for %s' % ( self.val.address, self.val['site_']) def children(self): yield ('id_', self.val['id_']) yield ('has_site_', self.val['has_site_']) if self.val['browsing_instance_']['ptr_']: yield ('browsing_instance_', self.val['browsing_instance_']['ptr_']) if self.val['process_']: yield ('process_', typed_ptr(self.val['process_'])) if self.val['render_process_host_factory_']: yield ('render_process_host_factory_', self.val['render_process_host_factory_']) pp_set.add_printer('content::SiteInstanceImpl', '^content::SiteInstanceImpl$', SiteInstanceImplPrinter) class RenderProcessHostImplPrinter(object): def __init__(self, val): self.val = val.cast(val.dynamic_type) def to_string(self): pid = '' try: child_process_launcher_ptr = ( self.val['child_process_launcher_']['impl_']['data_']['ptr']) if child_process_launcher_ptr: context = (child_process_launcher_ptr['context_']['ptr_']) if context: pid = ' PID %s' % str(context['process_']['process_']) except gdb.error: # The definition of the Context type may not be available. # b/8242773 pass return 'RenderProcessHostImpl@%s%s' % (self.val.address, pid) def children(self): yield ('id_', self.val['id_']) yield ('render_widget_hosts_', self.val['render_widget_hosts_']['data_']) yield ('fast_shutdown_started_', self.val['fast_shutdown_started_']) yield ('deleting_soon_', self.val['deleting_soon_']) yield ('pending_views_', self.val['pending_views_']) yield ('visible_widgets_', self.val['visible_widgets_']) yield ('backgrounded_', self.val['backgrounded_']) yield ('widget_helper_', self.val['widget_helper_']) yield ('is_initialized_', self.val['is_initialized_']) yield ('browser_context_', typed_ptr(self.val['browser_context_'])) yield ('sudden_termination_allowed_', self.val['sudden_termination_allowed_']) yield ('ignore_input_events_', self.val['ignore_input_events_']) yield ('is_guest_', self.val['is_guest_']) pp_set.add_printer('content::RenderProcessHostImpl', '^content::RenderProcessHostImpl$', RenderProcessHostImplPrinter) gdb.printing.register_pretty_printer(gdb, pp_set, replace=_DEBUGGING)
bsd-3-clause
3,970,690,216,523,493,000
33.087838
80
0.606739
false
bplancher/odoo
addons/l10n_be_invoice_bba/invoice.py
8
11056
# -*- encoding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. # Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved. import re, time, random from openerp import api from openerp.osv import fields, osv from openerp.tools.translate import _ import logging from openerp.exceptions import UserError _logger = logging.getLogger(__name__) """ account.invoice object: - Add support for Belgian structured communication - Rename 'reference' field labels to 'Communication' """ class account_invoice(osv.osv): _inherit = 'account.invoice' @api.cr_uid_context def _get_reference_type(self, cursor, user, context=None): """Add BBA Structured Communication Type and change labels from 'reference' into 'communication' """ res = super(account_invoice, self)._get_reference_type(cursor, user, context=context) res[[i for i,x in enumerate(res) if x[0] == 'none'][0]] = \ ('none', _('Free Communication')) res.append(('bba', _('BBA Structured Communication'))) #l_logger.warning('reference_type = %s' %res ) return res def check_bbacomm(self, val): supported_chars = '0-9+*/ ' pattern = re.compile('[^' + supported_chars + ']') if pattern.findall(val or ''): return False bbacomm = re.sub('\D', '', val or '') if len(bbacomm) == 12: base = int(bbacomm[:10]) mod = base % 97 or 97 if mod == int(bbacomm[-2:]): return True return False def _check_communication(self, cr, uid, ids): for inv in self.browse(cr, uid, ids): if inv.reference_type == 'bba': return self.check_bbacomm(inv.reference) return True @api.onchange('partner_id') def _onchange_partner_id(self): result = super(account_invoice, self)._onchange_partner_id() # reference_type = self.default_get(cr, uid, ['reference_type'])['reference_type'] # _logger.warning('partner_id %s' % partner_id) reference = False reference_type = 'none' if self.partner_id: if (self.type == 'out_invoice'): reference_type = self.partner_id.out_inv_comm_type if reference_type: reference = self.generate_bbacomm(self.type, reference_type, self.partner_id.id, '')['value']['reference'] self.reference_type = reference_type or 'none' self.reference = reference return result def generate_bbacomm(self, cr, uid, ids, type, reference_type, partner_id, reference, context=None): partner_obj = self.pool.get('res.partner') reference = reference or '' algorithm = False if partner_id: algorithm = partner_obj.browse(cr, uid, partner_id, context=context).out_inv_comm_algorithm algorithm = algorithm or 'random' if (type == 'out_invoice'): if reference_type == 'bba': if algorithm == 'date': if not self.check_bbacomm(reference): doy = time.strftime('%j') year = time.strftime('%Y') seq = '001' seq_ids = self.search(cr, uid, [('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'), ('reference', 'like', '+++%s/%s/%%' % (doy, year))], order='reference') if seq_ids: prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15]) if prev_seq < 999: seq = '%03d' % (prev_seq + 1) else: raise UserError(_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \ '\nPlease create manually a unique BBA Structured Communication.')) bbacomm = doy + year + seq base = int(bbacomm) mod = base % 97 or 97 reference = '+++%s/%s/%s%02d+++' % (doy, year, seq, mod) elif algorithm == 'partner_ref': if not self.check_bbacomm(reference): partner_ref = self.pool.get('res.partner').browse(cr, uid, partner_id).ref partner_ref_nr = re.sub('\D', '', partner_ref or '') if (len(partner_ref_nr) < 3) or (len(partner_ref_nr) > 7): raise UserError(_('The Partner should have a 3-7 digit Reference Number for the generation of BBA Structured Communications!' \ '\nPlease correct the Partner record.')) else: partner_ref_nr = partner_ref_nr.ljust(7, '0') seq = '001' seq_ids = self.search(cr, uid, [('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'), ('reference', 'like', '+++%s/%s/%%' % (partner_ref_nr[:3], partner_ref_nr[3:]))], order='reference') if seq_ids: prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15]) if prev_seq < 999: seq = '%03d' % (prev_seq + 1) else: raise UserError(_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \ '\nPlease create manually a unique BBA Structured Communication.')) bbacomm = partner_ref_nr + seq base = int(bbacomm) mod = base % 97 or 97 reference = '+++%s/%s/%s%02d+++' % (partner_ref_nr[:3], partner_ref_nr[3:], seq, mod) elif algorithm == 'random': if not self.check_bbacomm(reference): base = random.randint(1, 9999999999) bbacomm = str(base).rjust(10, '0') base = int(bbacomm) mod = base % 97 or 97 mod = str(mod).rjust(2, '0') reference = '+++%s/%s/%s%s+++' % (bbacomm[:3], bbacomm[3:7], bbacomm[7:], mod) else: raise UserError(_("Unsupported Structured Communication Type Algorithm '%s' !" \ "\nPlease contact your Odoo support channel.") % algorithm) return {'value': {'reference': reference}} def create(self, cr, uid, vals, context=None): reference = vals.get('reference', False) reference_type = vals.get('reference_type', False) if vals.get('type') == 'out_invoice' and not reference_type: # fallback on default communication type for partner reference_type = self.pool.get('res.partner').browse(cr, uid, vals['partner_id']).out_inv_comm_type if reference_type == 'bba': reference = self.generate_bbacomm(cr, uid, [], vals['type'], reference_type, vals['partner_id'], '', context={})['value']['reference'] vals.update({ 'reference_type': reference_type or 'none', 'reference': reference, }) if reference_type == 'bba': if not reference: raise UserError(_('Empty BBA Structured Communication!' \ '\nPlease fill in a unique BBA Structured Communication.')) if self.check_bbacomm(reference): reference = re.sub('\D', '', reference) vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++' same_ids = self.search(cr, uid, [('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'), ('reference', '=', vals['reference'])]) if same_ids: raise UserError(_('The BBA Structured Communication has already been used!' \ '\nPlease create manually a unique BBA Structured Communication.')) return super(account_invoice, self).create(cr, uid, vals, context=context) def write(self, cr, uid, ids, vals, context=None): if isinstance(ids, (int, long)): ids = [ids] for inv in self.browse(cr, uid, ids, context): if vals.has_key('reference_type'): reference_type = vals['reference_type'] else: reference_type = inv.reference_type or '' if reference_type == 'bba' and 'reference' in vals: if self.check_bbacomm(vals['reference']): reference = re.sub('\D', '', vals['reference']) vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++' same_ids = self.search(cr, uid, [('id', '!=', inv.id), ('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'), ('reference', '=', vals['reference'])]) if same_ids: raise UserError(_('The BBA Structured Communication has already been used!' \ '\nPlease create manually a unique BBA Structured Communication.')) return super(account_invoice, self).write(cr, uid, ids, vals, context) def copy(self, cr, uid, id, default=None, context=None): default = default or {} invoice = self.browse(cr, uid, id, context=context) if invoice.type in ['out_invoice']: reference_type = invoice.reference_type or 'none' default['reference_type'] = reference_type if reference_type == 'bba': partner = invoice.partner_id default['reference'] = self.generate_bbacomm(cr, uid, id, invoice.type, reference_type, partner.id, '', context=context)['value']['reference'] return super(account_invoice, self).copy(cr, uid, id, default, context=context) _columns = { 'reference_type': fields.selection(_get_reference_type, 'Payment Reference', required=True, readonly=True), } _constraints = [ (_check_communication, 'Invalid BBA Structured Communication !', ['reference', 'reference_type']), ] account_invoice()
agpl-3.0
-1,151,263,108,695,639,000
51.669903
177
0.499819
false
laurentgo/pants
src/python/pants/backend/jvm/repository.py
17
1337
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os class Repository(object): """An artifact repository, such as a maven repo.""" def __init__(self, name=None, url=None, push_db_basedir=None, **kwargs): """ :param string url: Optional URL of the repository. :param string push_db_basedir: Push history file base directory. """ self.name = name self.url = url self.push_db_basedir = push_db_basedir def push_db(self, target): return os.path.join(self.push_db_basedir, target.provides.org, target.provides.name, 'publish.properties') def __eq__(self, other): return ( isinstance(other, Repository) and (self.name, self.url, self.push_db_basedir) == (other.name, other.url, other.push_db_basedir) ) def __hash__(self): return hash((self.name, self.url, self.push_db_basedir)) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "{} -> {} ({})".format(self.name, self.url, self.push_db_basedir)
apache-2.0
-3,409,492,094,387,366,000
30.833333
99
0.622289
false
tanium/pytan
BUILD/doc/source/examples/ask_saved_question_by_name_sse_code.py
1
3070
# import the basic python packages we need import os import sys import tempfile import pprint import traceback # disable python from generating a .pyc file sys.dont_write_bytecode = True # change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API pytan_loc = "~/gh/pytan" pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib') # Determine our script name, script dir my_file = os.path.abspath(sys.argv[0]) my_dir = os.path.dirname(my_file) # try to automatically determine the pytan lib directory by assuming it is in '../../lib/' parent_dir = os.path.dirname(my_dir) pytan_root_dir = os.path.dirname(parent_dir) lib_dir = os.path.join(pytan_root_dir, 'lib') # add pytan_loc and lib_dir to the PYTHONPATH variable path_adds = [lib_dir, pytan_static_path] [sys.path.append(aa) for aa in path_adds if aa not in sys.path] # import pytan import pytan # create a dictionary of arguments for the pytan handler handler_args = {} # establish our connection info for the Tanium Server handler_args['username'] = "Administrator" handler_args['password'] = "Tanium2015!" handler_args['host'] = "10.0.1.240" handler_args['port'] = "443" # optional # optional, level 0 is no output except warnings/errors # level 1 through 12 are more and more verbose handler_args['loglevel'] = 1 # optional, use a debug format for the logging output (uses two lines per log entry) handler_args['debugformat'] = False # optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES # very useful for capturing the full exchange of XML requests and responses handler_args['record_all_requests'] = True # instantiate a handler using all of the arguments in the handler_args dictionary print "...CALLING: pytan.handler() with args: {}".format(handler_args) handler = pytan.Handler(**handler_args) # print out the handler string print "...OUTPUT: handler string: {}".format(handler) # setup the arguments for the handler() class kwargs = {} kwargs["sse"] = True kwargs["qtype"] = u'saved' kwargs["name"] = u'Installed Applications' print "...CALLING: handler.ask with args: {}".format(kwargs) response = handler.ask(**kwargs) print "...OUTPUT: Type of response: ", type(response) print "...OUTPUT: Pretty print of response:" print pprint.pformat(response) print "...OUTPUT: Equivalent Question if it were to be asked in the Tanium Console: " print response['question_object'].query_text if response['question_results']: # call the export_obj() method to convert response to CSV and store it in out export_kwargs = {} export_kwargs['obj'] = response['question_results'] export_kwargs['export_format'] = 'csv' print "...CALLING: handler.export_obj() with args {}".format(export_kwargs) out = handler.export_obj(**export_kwargs) # trim the output if it is more than 15 lines long if len(out.splitlines()) > 15: out = out.splitlines()[0:15] out.append('..trimmed for brevity..') out = '\n'.join(out) print "...OUTPUT: CSV Results of response: " print out
mit
4,185,640,925,806,190,600
32.736264
90
0.71759
false
erjohnso/ansible
lib/ansible/modules/network/junos/junos_config.py
8
13496
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: junos_config version_added: "2.1" author: "Peter Sprygada (@privateip)" short_description: Manage configuration on devices running Juniper JUNOS description: - This module provides an implementation for working with the active configuration running on Juniper JUNOS devices. It provides a set of arguments for loading configuration, performing rollback operations and zeroing the active configuration on the device. extends_documentation_fragment: junos options: lines: description: - This argument takes a list of C(set) or C(delete) configuration lines to push into the remote device. Each line must start with either C(set) or C(delete). This argument is mutually exclusive with the I(src) argument. required: false default: null src: description: - The I(src) argument provides a path to the configuration file to load into the remote system. The path can either be a full system path to the configuration file if the value starts with / or relative to the root of the implemented role or playbook. This argument is mutually exclusive with the I(lines) argument. required: false default: null version_added: "2.2" src_format: description: - The I(src_format) argument specifies the format of the configuration found int I(src). If the I(src_format) argument is not provided, the module will attempt to determine the format of the configuration file specified in I(src). required: false default: null choices: ['xml', 'set', 'text', 'json'] version_added: "2.2" rollback: description: - The C(rollback) argument instructs the module to rollback the current configuration to the identifier specified in the argument. If the specified rollback identifier does not exist on the remote device, the module will fail. To rollback to the most recent commit, set the C(rollback) argument to 0. required: false default: null zeroize: description: - The C(zeroize) argument is used to completely sanitize the remote device configuration back to initial defaults. This argument will effectively remove all current configuration statements on the remote device. required: false default: null confirm: description: - The C(confirm) argument will configure a time out value for the commit to be confirmed before it is automatically rolled back. If the C(confirm) argument is set to False, this argument is silently ignored. If the value for this argument is set to 0, the commit is confirmed immediately. required: false default: 0 comment: description: - The C(comment) argument specifies a text string to be used when committing the configuration. If the C(confirm) argument is set to False, this argument is silently ignored. required: false default: configured by junos_config replace: description: - The C(replace) argument will instruct the remote device to replace the current configuration hierarchy with the one specified in the corresponding hierarchy of the source configuration loaded from this module. - Note this argument should be considered deprecated. To achieve the equivalent, set the I(update) argument to C(replace). This argument will be removed in a future release. The C(replace) and C(update) argument is mutually exclusive. required: false choices: ['yes', 'no'] default: false backup: description: - This argument will cause the module to create a full backup of the current C(running-config) from the remote device before any changes are made. The backup file is written to the C(backup) folder in the playbook root directory. If the directory does not exist, it is created. required: false default: no choices: ['yes', 'no'] version_added: "2.2" update: description: - This argument will decide how to load the configuration data particulary when the candidate configuration and loaded configuration contain conflicting statements. Following are accepted values. C(merge) combines the data in the loaded configuration with the candidate configuration. If statements in the loaded configuration conflict with statements in the candidate configuration, the loaded statements replace the candidate ones. C(override) discards the entire candidate configuration and replaces it with the loaded configuration. C(replace) substitutes each hierarchy level in the loaded configuration for the corresponding level. required: false default: merge choices: ['merge', 'override', 'replace'] version_added: "2.3" confirm_commit: description: - This argument will execute commit operation on remote device. It can be used to confirm a previous commit. required: false default: no choices: ['yes', 'no'] version_added: "2.4" requirements: - ncclient (>=v0.5.2) notes: - This module requires the netconf system service be enabled on the remote device being managed. - Loading JSON-formatted configuration I(json) is supported starting in Junos OS Release 16.1 onwards. - Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4. """ EXAMPLES = """ - name: load configure file into device junos_config: src: srx.cfg comment: update config provider: "{{ netconf }}" - name: load configure lines into device junos_config: lines: - set interfaces ge-0/0/1 unit 0 description "Test interface" - set vlans vlan01 description "Test vlan" comment: update config provider: "{{ netconf }}" - name: rollback the configuration to id 10 junos_config: rollback: 10 provider: "{{ netconf }}" - name: zero out the current configuration junos_config: zeroize: yes provider: "{{ netconf }}" - name: confirm a previous commit junos_config: confirm_commit: yes provider: "{{ netconf }}" """ RETURN = """ backup_path: description: The full path to the backup file returned: when backup is yes type: string sample: /playbooks/ansible/backup/config.2016-07-16@22:28:34 """ import re import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.junos import get_diff, load_config, get_configuration from ansible.module_utils.junos import commit_configuration, discard_changes, locked_config from ansible.module_utils.junos import junos_argument_spec, load_configuration from ansible.module_utils.junos import check_args as junos_check_args from ansible.module_utils.netconf import send_request from ansible.module_utils.six import string_types from ansible.module_utils._text import to_native try: from lxml.etree import Element, fromstring except ImportError: from xml.etree.ElementTree import Element, fromstring try: from lxml.etree import ParseError except ImportError: try: from xml.etree.ElementTree import ParseError except ImportError: # for Python < 2.7 from xml.parsers.expat import ExpatError ParseError = ExpatError USE_PERSISTENT_CONNECTION = True DEFAULT_COMMENT = 'configured by junos_config' def check_args(module, warnings): junos_check_args(module, warnings) if module.params['replace'] is not None: module.fail_json(msg='argument replace is deprecated, use update') def zeroize(ele): return send_request(ele, Element('request-system-zeroize')) def rollback(ele, id='0'): return get_diff(ele, id) def guess_format(config): try: json.loads(config) return 'json' except ValueError: pass try: fromstring(config) return 'xml' except ParseError: pass if config.startswith('set') or config.startswith('delete'): return 'set' return 'text' def filter_delete_statements(module, candidate): reply = get_configuration(module, format='set') match = reply.find('.//configuration-set') if match is None: # Could not find configuration-set in reply, perhaps device does not support it? return candidate config = to_native(match.text, encoding='latin-1') modified_candidate = candidate[:] for index, line in reversed(list(enumerate(candidate))): if line.startswith('delete'): newline = re.sub('^delete', 'set', line) if newline not in config: del modified_candidate[index] return modified_candidate def configure_device(module, warnings, candidate): kwargs = {} config_format = None if module.params['src']: config_format = module.params['src_format'] or guess_format(str(candidate)) if config_format == 'set': kwargs.update({'format': 'text', 'action': 'set'}) else: kwargs.update({'format': config_format, 'action': module.params['update']}) if isinstance(candidate, string_types): candidate = candidate.split('\n') # this is done to filter out `delete ...` statements which map to # nothing in the config as that will cause an exception to be raised if any((module.params['lines'], config_format == 'set')): candidate = filter_delete_statements(module, candidate) kwargs['format'] = 'text' kwargs['action'] = 'set' return load_config(module, candidate, warnings, **kwargs) def main(): """ main entry point for module execution """ argument_spec = dict( lines=dict(type='list'), src=dict(type='path'), src_format=dict(choices=['xml', 'text', 'set', 'json']), # update operations update=dict(default='merge', choices=['merge', 'override', 'replace', 'update']), # deprecated replace in Ansible 2.3 replace=dict(type='bool'), confirm=dict(default=0, type='int'), comment=dict(default=DEFAULT_COMMENT), confirm_commit=dict(type='bool', default=False), # config operations backup=dict(type='bool', default=False), rollback=dict(type='int'), zeroize=dict(default=False, type='bool'), ) argument_spec.update(junos_argument_spec) mutually_exclusive = [('lines', 'src', 'rollback', 'zeroize')] module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True) warnings = list() check_args(module, warnings) candidate = module.params['lines'] or module.params['src'] commit = not module.check_mode result = {'changed': False, 'warnings': warnings} if module.params['backup']: for conf_format in ['set', 'text']: reply = get_configuration(module, format=conf_format) match = reply.find('.//configuration-%s' % conf_format) if match is not None: break else: module.fail_json(msg='unable to retrieve device configuration') result['__backup__'] = match.text.strip() rollback_id = module.params['rollback'] if rollback_id: diff = rollback(module, rollback_id) if commit: kwargs = { 'comment': module.params['comment'] } with locked_config(module): load_configuration(module, rollback=rollback_id) commit_configuration(module, **kwargs) if module._diff: result['diff'] = {'prepared': diff} result['changed'] = True elif module.params['zeroize']: if commit: zeroize(module) result['changed'] = True else: if candidate: with locked_config(module): diff = configure_device(module, warnings, candidate) if diff: if commit: kwargs = { 'comment': module.params['comment'] } if module.params['confirm'] > 0: kwargs.update({ 'confirm': True, 'confirm_timeout': module.params['confirm'] }) commit_configuration(module, **kwargs) else: discard_changes(module) result['changed'] = True if module._diff: result['diff'] = {'prepared': diff} elif module.params['confirm_commit']: with locked_config(module): # confirm a previous commit commit_configuration(module) result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
2,989,866,363,501,046,300
32.572139
92
0.642042
false
vilorious/pyload
module/network/Browser.py
40
4190
#!/usr/bin/env python # -*- coding: utf-8 -*- from logging import getLogger from HTTPRequest import HTTPRequest from HTTPDownload import HTTPDownload class Browser(object): __slots__ = ("log", "options", "bucket", "cj", "_size", "http", "dl") def __init__(self, bucket=None, options={}): self.log = getLogger("log") self.options = options #holds pycurl options self.bucket = bucket self.cj = None # needs to be setted later self._size = 0 self.renewHTTPRequest() self.dl = None def renewHTTPRequest(self): if hasattr(self, "http"): self.http.close() self.http = HTTPRequest(self.cj, self.options) def setLastURL(self, val): self.http.lastURL = val # tunnel some attributes from HTTP Request to Browser lastEffectiveURL = property(lambda self: self.http.lastEffectiveURL) lastURL = property(lambda self: self.http.lastURL, setLastURL) code = property(lambda self: self.http.code) cookieJar = property(lambda self: self.cj) def setCookieJar(self, cj): self.cj = cj self.http.cj = cj @property def speed(self): if self.dl: return self.dl.speed return 0 @property def size(self): if self._size: return self._size if self.dl: return self.dl.size return 0 @property def arrived(self): if self.dl: return self.dl.arrived return 0 @property def percent(self): if not self.size: return 0 return (self.arrived * 100) / self.size def clearCookies(self): if self.cj: self.cj.clear() self.http.clearCookies() def clearReferer(self): self.http.lastURL = None def abortDownloads(self): self.http.abort = True if self.dl: self._size = self.dl.size self.dl.abort = True def httpDownload(self, url, filename, get={}, post={}, ref=True, cookies=True, chunks=1, resume=False, progressNotify=None, disposition=False): """ this can also download ftp """ self._size = 0 self.dl = HTTPDownload(url, filename, get, post, self.lastEffectiveURL if ref else None, self.cj if cookies else None, self.bucket, self.options, progressNotify, disposition) name = self.dl.download(chunks, resume) self._size = self.dl.size self.dl = None return name def load(self, *args, **kwargs): """ retrieves page """ return self.http.load(*args, **kwargs) def putHeader(self, name, value): """ add a header to the request """ self.http.putHeader(name, value) def addAuth(self, pwd): """Adds user and pw for http auth :param pwd: string, user:password """ self.options["auth"] = pwd self.renewHTTPRequest() #we need a new request def removeAuth(self): if "auth" in self.options: del self.options["auth"] self.renewHTTPRequest() def setOption(self, name, value): """Adds an option to the request, see HTTPRequest for existing ones""" self.options[name] = value def deleteOption(self, name): if name in self.options: del self.options[name] def clearHeaders(self): self.http.clearHeaders() def close(self): """ cleanup """ if hasattr(self, "http"): self.http.close() del self.http if hasattr(self, "dl"): del self.dl if hasattr(self, "cj"): del self.cj if __name__ == "__main__": browser = Browser()#proxies={"socks5": "localhost:5000"}) ip = "http://www.whatismyip.com/automation/n09230945.asp" #browser.getPage("http://google.com/search?q=bar") #browser.getPage("https://encrypted.google.com/") #print browser.getPage(ip) #print browser.getRedirectLocation("http://google.com/") #browser.getPage("https://encrypted.google.com/") #browser.getPage("http://google.com/search?q=bar") browser.httpDownload("http://speedtest.netcologne.de/test_10mb.bin", "test_10mb.bin")
gpl-3.0
-3,494,078,334,252,524,500
27.69863
106
0.597375
false
gregdek/ansible
lib/ansible/modules/network/aci/aci_contract.py
12
8772
#!/usr/bin/python # -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: aci_contract short_description: Manage contract resources (vz:BrCP) description: - Manage Contract resources on Cisco ACI fabrics. notes: - This module does not manage Contract Subjects, see M(aci_contract_subject) to do this. Contract Subjects can still be removed using this module. - The C(tenant) used must exist before using this module in your playbook. The M(aci_tenant) module can be used for this. seealso: - module: aci_contract_subject - module: aci_tenant - name: APIC Management Information Model reference description: More information about the internal APIC class B(vz:BrCP). link: https://developer.cisco.com/docs/apic-mim-ref/ author: - Dag Wieers (@dagwieers) version_added: '2.4' options: contract: description: - The name of the contract. type: str required: yes aliases: [ contract_name, name ] description: description: - Description for the contract. type: str aliases: [ descr ] tenant: description: - The name of the tenant. type: str required: yes aliases: [ tenant_name ] scope: description: - The scope of a service contract. - The APIC defaults to C(context) when unset during creation. type: str choices: [ application-profile, context, global, tenant ] priority: description: - The desired QoS class to be used. - The APIC defaults to C(unspecified) when unset during creation. type: str choices: [ level1, level2, level3, unspecified ] dscp: description: - The target Differentiated Service (DSCP) value. - The APIC defaults to C(unspecified) when unset during creation. type: str choices: [ AF11, AF12, AF13, AF21, AF22, AF23, AF31, AF32, AF33, AF41, AF42, AF43, CS0, CS1, CS2, CS3, CS4, CS5, CS6, CS7, EF, VA, unspecified ] aliases: [ target ] state: description: - Use C(present) or C(absent) for adding or removing. - Use C(query) for listing an object or multiple objects. type: str choices: [ absent, present, query ] default: present extends_documentation_fragment: aci ''' EXAMPLES = r''' - name: Add a new contract aci_contract: host: apic username: admin password: SomeSecretPassword tenant: production contract: web_to_db description: Communication between web-servers and database scope: application-profile state: present delegate_to: localhost - name: Remove an existing contract aci_contract: host: apic username: admin password: SomeSecretPassword tenant: production contract: web_to_db state: absent delegate_to: localhost - name: Query a specific contract aci_contract: host: apic username: admin password: SomeSecretPassword tenant: production contract: web_to_db state: query delegate_to: localhost register: query_result - name: Query all contracts aci_contract: host: apic username: admin password: SomeSecretPassword state: query delegate_to: localhost register: query_result ''' RETURN = r''' current: description: The existing configuration from the APIC after the module has finished returned: success type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production environment", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] error: description: The error information as returned from the APIC returned: failure type: dict sample: { "code": "122", "text": "unknown managed object class foo" } raw: description: The raw output returned by the APIC REST API (xml or json) returned: parse error type: str sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>' sent: description: The actual/minimal configuration pushed to the APIC returned: info type: list sample: { "fvTenant": { "attributes": { "descr": "Production environment" } } } previous: description: The original configuration from the APIC before the module has started returned: info type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] proposed: description: The assembled configuration from the user-provided parameters returned: info type: dict sample: { "fvTenant": { "attributes": { "descr": "Production environment", "name": "production" } } } filter_string: description: The filter string used for the request returned: failure or debug type: str sample: ?rsp-prop-include=config-only method: description: The HTTP method used for the request to the APIC returned: failure or debug type: str sample: POST response: description: The HTTP response from the APIC returned: failure or debug type: str sample: OK (30 bytes) status: description: The HTTP status from the APIC returned: failure or debug type: int sample: 200 url: description: The HTTP url used for the request to the APIC returned: failure or debug type: str sample: https://10.11.12.13/api/mo/uni/tn-production.json ''' from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec from ansible.module_utils.basic import AnsibleModule def main(): argument_spec = aci_argument_spec() argument_spec.update( contract=dict(type='str', required=False, aliases=['contract_name', 'name']), # Not required for querying all objects tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects description=dict(type='str', aliases=['descr']), scope=dict(type='str', choices=['application-profile', 'context', 'global', 'tenant']), priority=dict(type='str', choices=['level1', 'level2', 'level3', 'unspecified']), # No default provided on purpose dscp=dict(type='str', choices=['AF11', 'AF12', 'AF13', 'AF21', 'AF22', 'AF23', 'AF31', 'AF32', 'AF33', 'AF41', 'AF42', 'AF43', 'CS0', 'CS1', 'CS2', 'CS3', 'CS4', 'CS5', 'CS6', 'CS7', 'EF', 'VA', 'unspecified'], aliases=['target']), # No default provided on purpose state=dict(type='str', default='present', choices=['absent', 'present', 'query']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'absent', ['contract', 'tenant']], ['state', 'present', ['contract', 'tenant']], ], ) contract = module.params['contract'] description = module.params['description'] scope = module.params['scope'] priority = module.params['priority'] dscp = module.params['dscp'] state = module.params['state'] tenant = module.params['tenant'] aci = ACIModule(module) aci.construct_url( root_class=dict( aci_class='fvTenant', aci_rn='tn-{0}'.format(tenant), module_object=tenant, target_filter={'name': tenant}, ), subclass_1=dict( aci_class='vzBrCP', aci_rn='brc-{0}'.format(contract), module_object=contract, target_filter={'name': contract}, ), ) aci.get_existing() if state == 'present': aci.payload( aci_class='vzBrCP', class_config=dict( name=contract, descr=description, scope=scope, prio=priority, targetDscp=dscp, ), ) aci.get_diff(aci_class='vzBrCP') aci.post_config() elif state == 'absent': aci.delete_config() aci.exit_json() if __name__ == "__main__": main()
gpl-3.0
3,903,769,901,249,731,600
27.666667
148
0.603397
false
atsaki/libcloud
example_loadbalancer.py
58
2483
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time from libcloud.loadbalancer.base import Member, Algorithm from libcloud.loadbalancer.types import Provider, State from libcloud.loadbalancer.providers import get_driver def main(): cls = get_driver(Provider.RACKSPACE) driver = cls('username', 'api key', region='ord') balancers = driver.list_balancers() print(balancers) # creating a balancer which balances traffic across two # nodes: 192.168.86.1:80 and 192.168.86.2:8080. Balancer # itself listens on port 80/tcp new_balancer_name = 'testlb' + os.urandom(4).encode('hex') members = (Member(None, '192.168.86.1', 80), Member(None, '192.168.86.2', 8080)) new_balancer = driver.create_balancer(name=new_balancer_name, algorithm=Algorithm.ROUND_ROBIN, port=80, protocol='http', members=members) print(new_balancer) # wait for balancer to become ready # NOTE: in real life code add timeout to not end up in # endless loop when things go wrong on provider side while True: balancer = driver.get_balancer(balancer_id=new_balancer.id) if balancer.state == State.RUNNING: break print('sleeping for 30 seconds for balancers to become ready') time.sleep(30) # fetch list of members members = balancer.list_members() print(members) # remove first member balancer.detach_member(members[0]) # remove the balancer driver.destroy_balancer(new_balancer) if __name__ == '__main__': main()
apache-2.0
-4,751,802,172,801,826,000
33.971831
74
0.662908
false
varunarya10/basicdb
basicdb/utils.py
3
4735
import re BATCH_QUERY_REGEX = re.compile(r'Item\.(\d+)\.(.*)') PUT_ATTRIBUTE_QUERY_REGEX = re.compile(r'Attribute\.(\d+)\.(Name|Value|Replace)') DELETE_QUERY_ARG_REGEX = re.compile(r'Attribute\.(\d+)\.(Name|Value)') EXPECTED_QUERY_ARG_REGEX = re.compile(r'Expected\.(\d+)\.(Name|Value|Exists)') def extract_numbered_args(regex, params): attrs = {} for (k, v) in params.iteritems(): match = regex.match(k) if not match: continue idx, elem = match.groups() if idx not in attrs: attrs[idx] = {} attrs[idx][elem] = v return attrs def extract_batch_additions_and_replacements_from_query_params(req): args = extract_numbered_args(BATCH_QUERY_REGEX, req._params) additions = {} replacements = {} for data in args.values(): if 'ItemName' in data: item_name = data['ItemName'] subargs = extract_numbered_args(PUT_ATTRIBUTE_QUERY_REGEX, data) for subdata in subargs.values(): if 'Name' in subdata and 'Value' in subdata: attr_name = subdata['Name'] attr_value = subdata['Value'] if 'Replace' in subdata and subdata['Replace'] == 'true': if item_name not in replacements: replacements[item_name] = {} if attr_name not in replacements[item_name]: replacements[item_name][attr_name] = set() replacements[item_name][attr_name].add(attr_value) else: if item_name not in additions: additions[item_name] = {} if attr_name not in additions[item_name]: additions[item_name][attr_name] = set() additions[item_name][attr_name].add(attr_value) return additions, replacements def extract_batch_deletions_from_query_params(req): args = extract_numbered_args(BATCH_QUERY_REGEX, req._params) deletions = {} for data in args.values(): if 'ItemName' in data: item_name = data['ItemName'] subargs = extract_numbered_args(DELETE_QUERY_ARG_REGEX, data) for subdata in subargs.values(): if 'Name' not in subdata: continue attr_name = subdata['Name'] if item_name not in deletions: deletions[item_name] = {} if attr_name not in deletions[item_name]: deletions[item_name][attr_name] = set() if 'Value' in subdata: deletions[item_name][attr_name].add(subdata['Value']) else: import basicdb deletions[item_name][attr_name].add(basicdb.AllAttributes) return deletions def extract_additions_and_replacements_from_query_params(req): args = extract_numbered_args(PUT_ATTRIBUTE_QUERY_REGEX, req._params) additions = {} replacements = {} for idx, data in args.iteritems(): if 'Name' in args[idx] and 'Value' in args[idx]: name = args[idx]['Name'] value = args[idx]['Value'] if 'Replace' in args[idx] and args[idx]['Replace'] == 'true': if name not in replacements: replacements[name] = set() replacements[name].add(value) else: if name not in additions: additions[name] = set() additions[name].add(value) return additions, replacements def extract_expectations_from_query_params(req): args = extract_numbered_args(EXPECTED_QUERY_ARG_REGEX, req._params) expectations = set() for data in args.values(): if 'Name' in data: if 'Value' in data: expected_value = data['Value'] elif 'Exists' in data: val = data['Exists'] expected_value = not (val == 'false') expectations.add((data['Name'], expected_value)) return expectations def extract_deletions_from_query_params(req): args = extract_numbered_args(DELETE_QUERY_ARG_REGEX, req._params) deletions = {} for data in args.values(): if 'Name' not in data: continue attr_name = data['Name'] if attr_name not in deletions: deletions[attr_name] = set() if 'Value' in data: deletions[attr_name].add(data['Value']) else: import basicdb deletions[attr_name].add(basicdb.AllAttributes) return deletions
apache-2.0
1,404,700,660,640,621,300
36.88
81
0.540655
false
mfherbst/spack
var/spack/repos/builtin/packages/pigz/package.py
4
1886
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Pigz(MakefilePackage): """A parallel implementation of gzip for modern multi-processor, multi-core machines.""" homepage = "http://zlib.net/pigz/" url = "https://github.com/madler/pigz/archive/v2.3.4.tar.gz" version('2.4', '3c8a601db141d3013ef9fe5f2daaf73f') version('2.3.4', 'c109057050b15edf3eb9bb4d0805235e') depends_on('zlib') def build(self, spec, prefix): make() def install(self, spec, prefix): mkdirp(prefix.bin) mkdirp(prefix.man.man1) install('pigz', "%s/pigz" % prefix.bin) install('pigz.1', "%s/pigz.1" % prefix.man.man1)
lgpl-2.1
8,219,573,443,659,824,000
39.12766
78
0.657476
false
liberatorqjw/scikit-learn
sklearn/tests/test_multiclass.py
8
21910
import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_greater from sklearn.multiclass import OneVsRestClassifier from sklearn.multiclass import OneVsOneClassifier from sklearn.multiclass import OutputCodeClassifier from sklearn.multiclass import fit_ovr from sklearn.multiclass import fit_ovo from sklearn.multiclass import fit_ecoc from sklearn.multiclass import predict_ovr from sklearn.multiclass import predict_ovo from sklearn.multiclass import predict_ecoc from sklearn.multiclass import predict_proba_ovr from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.preprocessing import LabelBinarizer from sklearn.svm import LinearSVC, SVC from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge, Perceptron, LogisticRegression) from sklearn.tree import DecisionTreeClassifier from sklearn.grid_search import GridSearchCV from sklearn.pipeline import Pipeline from sklearn import svm from sklearn import datasets from sklearn.externals.six.moves import zip iris = datasets.load_iris() rng = np.random.RandomState(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] n_classes = 3 def test_ovr_exceptions(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) assert_raises(ValueError, ovr.predict, []) with ignore_warnings(): assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()], LabelBinarizer(), []) # Fail on multioutput data assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit, np.array([[1, 0], [0, 1]]), np.array([[1, 2], [3, 1]])) assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit, np.array([[1, 0], [0, 1]]), np.array([[1.5, 2.4], [3.1, 0.8]])) def test_ovr_fit_predict(): # A classifier which implements decision_function. ovr = OneVsRestClassifier(LinearSVC(random_state=0)) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ovr.estimators_), n_classes) clf = LinearSVC(random_state=0) pred2 = clf.fit(iris.data, iris.target).predict(iris.data) assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2)) # A classifier which implements predict_proba. ovr = OneVsRestClassifier(MultinomialNB()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert_greater(np.mean(iris.target == pred), 0.65) def test_ovr_fit_predict_sparse(): for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix, sp.lil_matrix]: base_clf = MultinomialNB(alpha=1) X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=True, return_indicator=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) Y_pred = clf.predict(X_test) clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train)) Y_pred_sprs = clf_sprs.predict(X_test) assert_true(clf.multilabel_) assert_true(sp.issparse(Y_pred_sprs)) assert_array_equal(Y_pred_sprs.toarray(), Y_pred) # Test predict_proba Y_proba = clf_sprs.predict_proba(X_test) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = Y_proba > .5 assert_array_equal(pred, Y_pred_sprs.toarray()) # Test decision_function clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train)) dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int) assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray()) def test_ovr_always_present(): """Test that ovr works with classes that are always present or absent.""" # Note: tests is the case where _ConstantPredictor is utilised X = np.ones((10, 2)) X[:5, :] = 0 # Build an indicator matrix where two features are always on. # As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)] y = np.zeros((10, 3)) y[5:, 0] = 1 y[:, 1] = 1 y[:, 2] = 1 ovr = OneVsRestClassifier(LogisticRegression()) assert_warns(UserWarning, ovr.fit, X, y) y_pred = ovr.predict(X) assert_array_equal(np.array(y_pred), np.array(y)) y_pred = ovr.decision_function(X) assert_equal(np.unique(y_pred[:, -2:]), 1) y_pred = ovr.predict_proba(X) assert_array_equal(y_pred[:, -1], np.ones(X.shape[0])) # y has a constantly absent label y = np.zeros((10, 2)) y[5:, 0] = 1 # variable label ovr = OneVsRestClassifier(LogisticRegression()) assert_warns(UserWarning, ovr.fit, X, y) y_pred = ovr.predict_proba(X) assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0])) def test_ovr_multiclass(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]]) y = ["eggs", "spam", "ham", "eggs", "ham"] Y = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0]]) classes = set("ham eggs spam".split()) for base_clf in (MultinomialNB(), LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet()): clf = OneVsRestClassifier(base_clf).fit(X, y) assert_equal(set(clf.classes_), classes) y_pred = clf.predict(np.array([[0, 0, 4]]))[0] assert_equal(set(y_pred), set("eggs")) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[0, 0, 4]])[0] assert_array_equal(y_pred, [0, 0, 1]) def test_ovr_binary(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]]) y = ["eggs", "spam", "spam", "eggs", "spam"] Y = np.array([[0, 1, 1, 0, 1]]).T classes = set("eggs spam".split()) def conduct_test(base_clf, test_predict_proba=False): clf = OneVsRestClassifier(base_clf).fit(X, y) assert_equal(set(clf.classes_), classes) y_pred = clf.predict(np.array([[0, 0, 4]]))[0] assert_equal(set(y_pred), set("eggs")) if test_predict_proba: X_test = np.array([[0, 0, 4]]) probabilities = clf.predict_proba(X_test) assert_equal(2, len(probabilities[0])) assert_equal(clf.classes_[np.argmax(probabilities, axis=1)], clf.predict(X_test)) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[3, 0, 0]])[0] assert_equal(y_pred, 1) for base_clf in (LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet()): conduct_test(base_clf) for base_clf in (MultinomialNB(), SVC(probability=True), LogisticRegression()): conduct_test(base_clf, test_predict_proba=True) @ignore_warnings def test_ovr_multilabel(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]]) y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"], ["ham", "eggs"], ["ham"]] # y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]] Y = np.array([[0, 1, 1], [0, 1, 0], [1, 1, 1], [1, 0, 1], [1, 0, 0]]) classes = set("ham eggs spam".split()) for base_clf in (MultinomialNB(), LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet(), Lasso(alpha=0.5)): # test input as lists of tuples clf = assert_warns(DeprecationWarning, OneVsRestClassifier(base_clf).fit, X, y) assert_equal(set(clf.classes_), classes) y_pred = clf.predict([[0, 4, 4]])[0] assert_equal(set(y_pred), set(["spam", "eggs"])) assert_true(clf.multilabel_) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[0, 4, 4]])[0] assert_array_equal(y_pred, [0, 1, 1]) assert_true(clf.multilabel_) def test_ovr_fit_predict_svc(): ovr = OneVsRestClassifier(svm.SVC()) ovr.fit(iris.data, iris.target) assert_equal(len(ovr.estimators_), 3) assert_greater(ovr.score(iris.data, iris.target), .9) def test_ovr_multilabel_dataset(): base_clf = MultinomialNB(alpha=1) for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=2, length=50, allow_unlabeled=au, return_indicator=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) Y_pred = clf.predict(X_test) assert_true(clf.multilabel_) assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"), prec, decimal=2) assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"), recall, decimal=2) def test_ovr_multilabel_predict_proba(): base_clf = MultinomialNB(alpha=1) for au in (False, True): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=au, return_indicator=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # decision function only estimator. Fails in current implementation. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert_raises(AttributeError, decision_only.predict_proba, X_test) # Estimator with predict_proba disabled, depending on parameters. decision_only = OneVsRestClassifier(svm.SVC(probability=False)) decision_only.fit(X_train, Y_train) assert_raises(AttributeError, decision_only.predict_proba, X_test) Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = Y_proba > .5 assert_array_equal(pred, Y_pred) def test_ovr_single_label_predict_proba(): base_clf = MultinomialNB(alpha=1) X, Y = iris.data, iris.target X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # decision function only estimator. Fails in current implementation. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert_raises(AttributeError, decision_only.predict_proba, X_test) Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) assert_almost_equal(Y_proba.sum(axis=1), 1.0) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = np.array([l.argmax() for l in Y_proba]) assert_false((pred - Y_pred).any()) def test_ovr_multilabel_decision_function(): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=True, return_indicator=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train) assert_array_equal((clf.decision_function(X_test) > 0).astype(int), clf.predict(X_test)) def test_ovr_single_label_decision_function(): X, Y = datasets.make_classification(n_samples=100, n_features=20, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train) assert_array_equal(clf.decision_function(X_test).ravel() > 0, clf.predict(X_test)) def test_ovr_gridsearch(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ovr, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert_true(best_C in Cs) def test_ovr_pipeline(): # Test with pipeline of length one # This test is needed because the multiclass estimators may fail to detect # the presence of predict_proba or decision_function. clf = Pipeline([("tree", DecisionTreeClassifier())]) ovr_pipe = OneVsRestClassifier(clf) ovr_pipe.fit(iris.data, iris.target) ovr = OneVsRestClassifier(DecisionTreeClassifier()) ovr.fit(iris.data, iris.target) assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data)) def test_ovr_coef_(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) ovr.fit(iris.data, iris.target) shape = ovr.coef_.shape assert_equal(shape[0], n_classes) assert_equal(shape[1], iris.data.shape[1]) def test_ovr_coef_exceptions(): # Not fitted exception! ovr = OneVsRestClassifier(LinearSVC(random_state=0)) # lambda is needed because we don't want coef_ to be evaluated right away assert_raises(ValueError, lambda x: ovr.coef_, None) # Doesn't have coef_ exception! ovr = OneVsRestClassifier(DecisionTreeClassifier()) ovr.fit(iris.data, iris.target) assert_raises(AttributeError, lambda x: ovr.coef_, None) def test_ovo_exceptions(): ovo = OneVsOneClassifier(LinearSVC(random_state=0)) assert_raises(ValueError, ovo.predict, []) def test_ovo_fit_predict(): # A classifier which implements decision_function. ovo = OneVsOneClassifier(LinearSVC(random_state=0)) ovo.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2) # A classifier which implements predict_proba. ovo = OneVsOneClassifier(MultinomialNB()) ovo.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2) def test_ovo_gridsearch(): ovo = OneVsOneClassifier(LinearSVC(random_state=0)) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ovo, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert_true(best_C in Cs) def test_ovo_ties(): # test that ties are broken using the decision function, not defaulting to # the smallest label X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]]) y = np.array([2, 0, 1, 2]) multi_clf = OneVsOneClassifier(Perceptron()) ovo_prediction = multi_clf.fit(X, y).predict(X) # recalculate votes to make sure we have a tie predictions = np.vstack([clf.predict(X) for clf in multi_clf.estimators_]) scores = np.vstack([clf.decision_function(X) for clf in multi_clf.estimators_]) # classifiers are in order 0-1, 0-2, 1-2 # aggregate votes: votes = np.zeros((4, 3)) votes[np.arange(4), predictions[0]] += 1 votes[np.arange(4), 2 * predictions[1]] += 1 votes[np.arange(4), 1 + predictions[2]] += 1 # for the first point, there is one vote per class assert_array_equal(votes[0, :], 1) # for the rest, there is no tie and the prediction is the argmax assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:]) # for the tie, the prediction is the class with the highest score assert_equal(ovo_prediction[0], 0) # in the zero-one classifier, the score for 0 is greater than the score for # one. assert_greater(scores[0][0], scores[0][1]) # score for one is greater than score for zero assert_greater(scores[2, 0] - scores[0, 0], scores[0, 0] + scores[1, 0]) # score for one is greater than score for two assert_greater(scores[2, 0] - scores[0, 0], -scores[1, 0] - scores[2, 0]) def test_ovo_ties2(): # test that ties can not only be won by the first two labels X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]]) y_ref = np.array([2, 0, 1, 2]) # cycle through labels so that each label wins once for i in range(3): y = (y_ref + i) % 3 multi_clf = OneVsOneClassifier(Perceptron()) ovo_prediction = multi_clf.fit(X, y).predict(X) assert_equal(ovo_prediction[0], i % 3) def test_ovo_string_y(): "Test that the OvO doesn't screw the encoding of string labels" X = np.eye(4) y = np.array(['a', 'b', 'c', 'd']) svc = LinearSVC() ovo = OneVsOneClassifier(svc) ovo.fit(X, y) assert_array_equal(y, ovo.predict(X)) def test_ecoc_exceptions(): ecoc = OutputCodeClassifier(LinearSVC(random_state=0)) assert_raises(ValueError, ecoc.predict, []) def test_ecoc_fit_predict(): # A classifier which implements decision_function. ecoc = OutputCodeClassifier(LinearSVC(random_state=0), code_size=2, random_state=0) ecoc.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ecoc.estimators_), n_classes * 2) # A classifier which implements predict_proba. ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0) ecoc.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ecoc.estimators_), n_classes * 2) def test_ecoc_gridsearch(): ecoc = OutputCodeClassifier(LinearSVC(random_state=0), random_state=0) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ecoc, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert_true(best_C in Cs) @ignore_warnings def test_deprecated(): base_estimator = DecisionTreeClassifier(random_state=0) X, Y = iris.data, iris.target X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] all_metas = [ (OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr), (OneVsOneClassifier, fit_ovo, predict_ovo, None), (OutputCodeClassifier, fit_ecoc, predict_ecoc, None), ] for MetaEst, fit_func, predict_func, proba_func in all_metas: try: meta_est = MetaEst(base_estimator, random_state=0).fit(X_train, Y_train) fitted_return = fit_func(base_estimator, X_train, Y_train, random_state=0) except TypeError: meta_est = MetaEst(base_estimator).fit(X_train, Y_train) fitted_return = fit_func(base_estimator, X_train, Y_train) if len(fitted_return) == 2: estimators_, classes_or_lb = fitted_return assert_almost_equal(predict_func(estimators_, classes_or_lb, X_test), meta_est.predict(X_test)) if proba_func is not None: assert_almost_equal(proba_func(estimators_, X_test, is_multilabel=False), meta_est.predict_proba(X_test)) else: estimators_, classes_or_lb, codebook = fitted_return assert_almost_equal(predict_func(estimators_, classes_or_lb, codebook, X_test), meta_est.predict(X_test)) if __name__ == "__main__": import nose nose.runmodule()
bsd-3-clause
6,644,685,680,957,347,000
38.265233
81
0.57654
false
HackLinux/python-adb
adb_test.py
3
6340
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for adb.""" import cStringIO import struct import unittest from adb import adb_commands from adb import adb_protocol import common_stub BANNER = 'blazetest' LOCAL_ID = 1 REMOTE_ID = 2 class BaseAdbTest(unittest.TestCase): @classmethod def _ExpectWrite(cls, usb, command, arg0, arg1, data): usb.ExpectWrite(cls._MakeHeader(command, arg0, arg1, data)) usb.ExpectWrite(data) if command == 'WRTE': cls._ExpectRead(usb, 'OKAY', 0, 0) @classmethod def _ExpectRead(cls, usb, command, arg0, arg1, data=''): usb.ExpectRead(cls._MakeHeader(command, arg0, arg1, data)) if data: usb.ExpectRead(data) if command == 'WRTE': cls._ExpectWrite(usb, 'OKAY', LOCAL_ID, REMOTE_ID, '') @classmethod def _ConvertCommand(cls, command): return sum(ord(c) << (i * 8) for i, c in enumerate(command)) @classmethod def _MakeHeader(cls, command, arg0, arg1, data): command = cls._ConvertCommand(command) magic = command ^ 0xFFFFFFFF checksum = adb_protocol.AdbMessage.CalculateChecksum(data) return struct.pack('<6I', command, arg0, arg1, len(data), checksum, magic) @classmethod def _ExpectConnection(cls, usb): cls._ExpectWrite(usb, 'CNXN', 0x01000000, 4096, 'host::%s\0' % BANNER) cls._ExpectRead(usb, 'CNXN', 0, 0, 'device::\0') @classmethod def _ExpectOpen(cls, usb, service): cls._ExpectWrite(usb, 'OPEN', LOCAL_ID, 0, service) cls._ExpectRead(usb, 'OKAY', REMOTE_ID, LOCAL_ID) @classmethod def _ExpectClose(cls, usb): cls._ExpectRead(usb, 'CLSE', REMOTE_ID, 0) cls._ExpectWrite(usb, 'CLSE', LOCAL_ID, REMOTE_ID, '') @classmethod def _Connect(cls, usb): return adb_commands.AdbCommands.Connect(usb, BANNER) class AdbTest(BaseAdbTest): @classmethod def _ExpectCommand(cls, service, command, *responses): usb = common_stub.StubUsb() cls._ExpectConnection(usb) cls._ExpectOpen(usb, '%s:%s\0' % (service, command)) for response in responses: cls._ExpectRead(usb, 'WRTE', REMOTE_ID, 0, response) cls._ExpectClose(usb) return usb def testConnect(self): usb = common_stub.StubUsb() self._ExpectConnection(usb) adb_commands.AdbCommands.Connect(usb, BANNER) def testSmallResponseShell(self): command = 'keepin it real' response = 'word.' usb = self._ExpectCommand('shell', command, response) adb_commands = self._Connect(usb) self.assertEqual(response, adb_commands.Shell(command)) def testBigResponseShell(self): command = 'keepin it real big' # The data doesn't have to be big, the point is that it just concatenates # the data from different WRTEs together. responses = ['other stuff, ', 'and some words.'] usb = self._ExpectCommand('shell', command, *responses) adb_commands = self._Connect(usb) self.assertEqual(''.join(responses), adb_commands.Shell(command)) def testStreamingResponseShell(self): command = 'keepin it real big' # expect multiple lines responses = ['other stuff, ', 'and some words.'] usb = self._ExpectCommand('shell', command, *responses) adb_commands = self._Connect(usb) response_count = 0 for (expected,actual) in zip(responses, adb_commands.StreamingShell(command)): self.assertEqual(expected, actual) response_count = response_count + 1 self.assertEqual(len(responses), response_count) def testReboot(self): usb = self._ExpectCommand('reboot', '', '') adb_commands = self._Connect(usb) adb_commands.Reboot() def testRebootBootloader(self): usb = self._ExpectCommand('reboot', 'bootloader', '') adb_commands = self._Connect(usb) adb_commands.RebootBootloader() def testRemount(self): usb = self._ExpectCommand('remount', '', '') adb_commands = self._Connect(usb) adb_commands.Remount() def testRoot(self): usb = self._ExpectCommand('root', '', '') adb_commands = self._Connect(usb) adb_commands.Root() class FilesyncAdbTest(BaseAdbTest): @classmethod def _MakeSyncHeader(cls, command, *int_parts): command = cls._ConvertCommand(command) return struct.pack('<%dI' % (len(int_parts) + 1), command, *int_parts) @classmethod def _MakeWriteSyncPacket(cls, command, data='', size=None): return cls._MakeSyncHeader(command, size or len(data)) + data @classmethod def _ExpectSyncCommand(cls, write_commands, read_commands): usb = common_stub.StubUsb() cls._ExpectConnection(usb) cls._ExpectOpen(usb, 'sync:\0') while write_commands or read_commands: if write_commands: command = write_commands.pop(0) cls._ExpectWrite(usb, 'WRTE', LOCAL_ID, REMOTE_ID, command) if read_commands: command = read_commands.pop(0) cls._ExpectRead(usb, 'WRTE', REMOTE_ID, LOCAL_ID, command) cls._ExpectClose(usb) return usb def testPush(self): filedata = 'alo there, govnah' mtime = 100 send = [ self._MakeWriteSyncPacket('SEND', '/data,33272'), self._MakeWriteSyncPacket('DATA', filedata), self._MakeWriteSyncPacket('DONE', size=mtime), ] data = 'OKAY\0\0\0\0' usb = self._ExpectSyncCommand([''.join(send)], [data]) adb_commands = self._Connect(usb) adb_commands.Push(cStringIO.StringIO(filedata), '/data', mtime=mtime) def testPull(self): filedata = "g'ddayta, govnah" recv = self._MakeWriteSyncPacket('RECV', '/data') data = [ self._MakeWriteSyncPacket('DATA', filedata), self._MakeWriteSyncPacket('DONE'), ] usb = self._ExpectSyncCommand([recv], [''.join(data)]) adb_commands = self._Connect(usb) self.assertEqual(filedata, adb_commands.Pull('/data')) if __name__ == '__main__': unittest.main()
apache-2.0
2,538,067,745,087,401,000
29.334928
82
0.671767
false
304471720/mongrel2
examples/ws/python/echo.py
55
3488
import simplejson as json from mongrel2 import handler import wsutil import sys import time import re sender_id = "82209006-86FF-4982-B5EA-D1E29E55D480" conn = handler.Connection(sender_id, "tcp://127.0.0.1:9990", "tcp://127.0.0.1:9989") CONNECTION_TIMEOUT=5 closingMessages={} badUnicode=re.compile(u'[\ud800-\udfff]') logf=open('echo.log','wb') #logf=open('/dev/null','wb') #logf=sys.stdout def abortConnection(conn,req,reason='none',code=None): #print 'abort',conn,req,reason,code if code is not None: #print "Closing cleanly\n" conn.reply_websocket(req,code+reason,opcode=wsutil.OP_CLOSE) closingMessages[req.conn_id]=(time.time(),req.sender) else: conn.reply(req,'') print >>logf,'abort',code,reason while True: now=time.time() logf.flush() for k,(t,uuid) in closingMessages.items(): if now > t+CONNECTION_TIMEOUT: conn.send(uuid,k,'') try: req = conn.recv() except: print "FAILED RECV" continue if req.is_disconnect(): #print "DISCONNECTED", req.conn_id continue if req.headers.get('METHOD') == 'WEBSOCKET_HANDSHAKE': #print "HANDSHAKE" conn.reply(req, '\r\n'.join([ "HTTP/1.1 101 Switching Protocols", "Upgrade: websocket", "Connection: Upgrade", "Sec-WebSocket-Accept: %s\r\n\r\n"])%req.body) continue if req.headers.get('METHOD') != 'WEBSOCKET': print 'METHOD is Not WEBSOCKET:',req.headers#,req.body conn.reply(req,'') continue try: #print 'headers',req.headers flags = int(req.headers.get('FLAGS'),16) fin = flags&0x80==0x80 rsvd=flags & 0x70 opcode=flags & 0xf wsdata = req.body #print fin,rsvd,opcode,len(wsdata),wsdata #logf.write('\n') except: #print "Unable to decode FLAGS" abortConnection(conn,req,'WS decode failed') #continue if rsvd != 0: abortConnection(conn,req,'reserved non-zero', wsutil.CLOSE_PROTOCOL_ERROR) continue if opcode == wsutil.OP_CLOSE: if req.conn_id in closingMessages: del closingMessages[req.conn_id] conn.reply(req,'') else: conn.reply_websocket(req,wsdata,opcode) conn.reply(req,'') continue if req.conn_id in closingMessages: continue if opcode not in wsutil.opcodes: abortConnection(conn,req,'Unknown opcode', wsutil.CLOSE_PROTOCOL_ERROR) continue if (opcode & 0x8) != 0: if opcode ==wsutil.OP_PING: opcode = wsutil.OP_PONG conn.reply_websocket(req,wsdata,opcode) continue if opcode == wsutil.OP_PONG: continue # We don't send pings, so ignore pongs if(opcode == wsutil.OP_TEXT): try: x=wsdata.decode('utf-8') #Thank you for not fixing python issue8271 in 2.x :( if badUnicode.search(x): raise UnicodeError('Surrogates not allowed') #for c in x: #if (0xd800 <= ord(c) <= 0xdfff): #raise UnicodeError('Surrogates not allowed') except: abortConnection(conn,req,'invalid UTF', wsutil.CLOSE_BAD_DATA) continue conn.reply_websocket(req,wsdata,opcode)
bsd-3-clause
7,351,966,733,813,497,000
27.826446
74
0.571961
false
kapilrastogi/Impala
tests/common/impala_cluster.py
1
8569
# Copyright (c) 2012 Cloudera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Basic object model of an Impala cluster (set of Impala processes). # import logging import psutil import socket from getpass import getuser from random import choice from tests.common.impala_service import * from tests.util.shell_util import exec_process_async, exec_process from time import sleep logging.basicConfig(level=logging.ERROR, format='%(threadName)s: %(message)s') LOG = logging.getLogger('impala_cluster') LOG.setLevel(level=logging.DEBUG) # Represents a set of Impala processes. Each Impala process must be created with # a basic set of command line options (beeswax_port, webserver_port, etc) class ImpalaCluster(object): def __init__(self): self.__impalads, self.__statestoreds, self.__catalogd =\ self.__build_impala_process_lists() LOG.info("Found %d impalad/%d statestored/%d catalogd process(es)" %\ (len(self.__impalads), len(self.__statestoreds), 1 if self.__catalogd else 0)) def refresh(self): """ Re-loads the impalad/statestored/catalogd processes if they exist. Helpful to confirm that processes have been killed. """ self.__impalads, self.__statestoreds, self.__catalogd =\ self.__build_impala_process_lists() @property def statestored(self): """ Returns the statestore process Note: Currently we expectly a single statestore process, in the future this might change in which case this should return the "active" statestore. """ # If no statestored process exists, return None. return self.__statestoreds[0] if len(self.__statestoreds) > 0 else None @property def impalads(self): """Returns a list of the known impalad processes""" return self.__impalads @property def catalogd(self): """Returns the catalogd process, or None if no catalogd process was found""" return self.__catalogd def get_first_impalad(self): return self.impalads[0] def get_any_impalad(self): """Selects a random impalad from the list of known processes""" return choice(self.impalads) def get_different_impalad(self, other_impalad): """Selects an impalad that is different from the given impalad""" if len(self.impalads) <= 1: assert 0, "Only %d impalads available to choose from" % len(self.impalads) LOG.info("other_impalad: " + str(other_impalad)) LOG.info("Cluster: " + str(len(self.impalads))) LOG.info("Cluster: " + str(self.impalads)) return choice([impalad for impalad in self.impalads if impalad != other_impalad]) def __build_impala_process_lists(self): """ Gets all the running Impala procs (with start arguments) on the machine. Note: This currently only works for the local case. To support running in a cluster environment this would need to enumerate each machine in the cluster. """ impalads = list() statestored = list() catalogd = None for pid in psutil.get_pid_list(): try: process = psutil.Process(pid) except psutil.NoSuchProcess, e: # A process from get_pid_list() no longer exists, continue. LOG.info(e) continue try: if process.username != getuser(): continue except KeyError, e: if "uid not found" in str(e): continue raise if process.name == 'impalad' and len(process.cmdline) >= 1: impalads.append(ImpaladProcess(process.cmdline)) elif process.name == 'statestored' and len(process.cmdline) >= 1: statestored.append(StateStoreProcess(process.cmdline)) elif process.name == 'catalogd' and len(process.cmdline) >=1: catalogd = CatalogdProcess(process.cmdline) return impalads, statestored, catalogd # Represents a process running on a machine and common actions that can be performed # on a process such as restarting or killing. class Process(object): def __init__(self, cmd): self.cmd = cmd assert cmd is not None and len(cmd) >= 1,\ 'Process object must be created with valid command line argument list' def get_pid(self): """Gets the pid of the process. Returns None if the PID cannot be determined""" LOG.info("Attempting to find PID for %s" % ' '.join(self.cmd)) for pid in psutil.get_pid_list(): try: process = psutil.Process(pid) if set(self.cmd) == set(process.cmdline): return pid except psutil.NoSuchProcess, e: # A process from get_pid_list() no longer exists, continue. LOG.info(e) LOG.info("No PID found for process cmdline: %s. Process is dead?" % self.cmd) return None def start(self): LOG.info("Starting process: %s" % ' '.join(self.cmd)) self.process = exec_process_async(' '.join(self.cmd)) def wait(self): """Wait until the current process has exited, and returns (return code, stdout, stderr)""" LOG.info("Waiting for process: %s" % ' '.join(self.cmd)) stdout, stderr = self.process.communicate() return self.process.returncode, stdout, stderr def kill(self): """ Kills the given processes. Returns the PID that was killed or None of no PID was found (process not running) """ pid = self.get_pid() if pid is None: assert 0, "No processes %s found" % self.cmd LOG.info('Killing: %s (PID: %d)' % (' '.join(self.cmd), pid)) exec_process("kill -9 %d" % pid) return pid def restart(self): """Kills and restarts the process""" self.kill() # Wait for a bit so the ports will be released. sleep(1) self.start() def __str__(self): return "Command: %s PID: %s" % (self.cmd, self.get_pid()) # Base class for all Impala processes class BaseImpalaProcess(Process): def __init__(self, cmd, hostname): super(BaseImpalaProcess, self).__init__(cmd) self.hostname = hostname def _get_webserver_port(self, default=None): return int(self._get_arg_value('webserver_port', default)) def _get_arg_value(self, arg_name, default=None): """Gets the argument value for given argument name""" for arg in self.cmd: if ('%s=' % arg_name) in arg.strip().lstrip('-'): return arg.split('=')[1] if default is None: assert 0, "No command line argument '%s' found." % arg_name return default # Represents an impalad process class ImpaladProcess(BaseImpalaProcess): def __init__(self, cmd): super(ImpaladProcess, self).__init__(cmd, socket.gethostname()) self.service = ImpaladService(self.hostname, self._get_webserver_port(default=25000), self.__get_beeswax_port(default=21000), self.__get_be_port(default=22000), self.__get_hs2_port(default=21050)) def __get_beeswax_port(self, default=None): return int(self._get_arg_value('beeswax_port', default)) def __get_be_port(self, default=None): return int(self._get_arg_value('be_port', default)) def __get_hs2_port(self, default=None): return int(self._get_arg_value('hs2_port', default)) def start(self, wait_until_ready=True): """Starts the impalad and waits until the service is ready to accept connections.""" super(ImpaladProcess, self).start() self.service.wait_for_metric_value('impala-server.ready', expected_value=1, timeout=30) # Represents a statestored process class StateStoreProcess(BaseImpalaProcess): def __init__(self, cmd): super(StateStoreProcess, self).__init__(cmd, socket.gethostname()) self.service =\ StateStoredService(self.hostname, self._get_webserver_port(default=25010)) # Represents a catalogd process class CatalogdProcess(BaseImpalaProcess): def __init__(self, cmd): super(CatalogdProcess, self).__init__(cmd, socket.gethostname()) self.service = CatalogdService(self.hostname, self._get_webserver_port(default=25020), self.__get_port(default=26000)) def __get_port(self, default=None): return int(self._get_arg_value('catalog_service_port', default))
apache-2.0
7,098,882,221,587,213,000
35.619658
89
0.673241
false
z01nl1o02/tests
cnn_layer_size/show_conv_dim.py
1
1705
import os,sys,pdb import argparse ap = argparse.ArgumentParser() ap.add_argument('--height','-H',help='input height',type=int) ap.add_argument('--width','-W',help='input width',type=int) ap.add_argument('--layers','-F',help='layer info txt with each line for one layer') ap.add_argument('--deconv','-D',help='0 for conv 1 for deconv',type=int,default=0) args = ap.parse_args() def conv(CKSP,HW): C,K,S,P = CKSP H,W = HW H = int((H - K + 2*P)/S + 1) W = int((W - K + 2*P)/S + 1) return H,W def deconv(CKSP,HW): C,K,S,P = CKSP H,W = HW H = int((H - 1) * S + K - 2*P) W = int((W - 1) * S + K - 2*P) return H,W def calc_conv(layers,HW): output = [HW] for CKSP in layers: HW = conv(CKSP,HW) output.append(HW) return output def calc_deconv(layers,HW): output = [HW] for CKSP in layers: HW = deconv(CKSP,HW) output.append(HW) return output def parse_layers(filepath): layers = [] with open(filepath,'rb') as f: for line in f: line = line.strip() if line == "": continue C,K,S,P = [int(x) for x in line.split(',')] #channel number, kernel size, stride, padding layers.append( (C,K,S,P) ) return layers layers = parse_layers(args.layers) if args.deconv == 0: HW = calc_conv(layers, (args.height,args.width)) print 'conv...' else: HW = calc_deconv(layers, (args.height,args.width)) print 'deconv...' print('input size (h,w) = (%d,%d)'%(args.height,args.width)) for (H,W),(C,K,S,P) in zip(HW[1:],layers): print('(channel,kernel,stride,padding)=(%d,%d,%d,%d) (h,w) = (%d,%d)'%(C,K,S,P,H,W))
gpl-2.0
8,830,490,057,427,566,000
23.014085
101
0.557771
false
cretingame/Yarr-fw
script/debug.py
1
5687
import os import subprocess script_path = os.getcwd() + "/" + os.path.splitext(__file__)[0] + ".tcl" script_file = open(script_path, "w+") os.chdir("..") project_path = os.getcwd() script_file.write( "######################################################\n" + "# Generated file to open the virtual logic analyyer\n" + "######################################################\n" + "\n\n" + "#Run " + __file__+ " to generate this file\n\n") ltx_files = [] ltx_file = None cmds_debug=( "start_gui\n" + "open_hw\n" + "connect_hw_server\n" + "open_hw_target\n" + "current_hw_device [lindex [get_hw_devices] 1]\n" + "refresh_hw_device -update_hw_probes false [lindex [get_hw_devices] 1]\n" + "set_property PROBES.FILE {}{}{} [lindex [get_hw_devices] 1]\n" + "refresh_hw_device [lindex [get_hw_devices] 1]\n" + "display_hw_ila_data [ get_hw_ila_data *]\n" + "\n" ) cmds_post_gui=( "set_property TRIGGER_COMPARE_VALUE eq1'b1 [get_hw_probes app_0/dma_ctrl_start_l2p_s_1 -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_0.axis_debug\"}]]\n" + "set_property TRIGGER_COMPARE_VALUE eq1'b1 [get_hw_probes app_0/dma_ctrl_start_p2l_s_1 -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_0.axis_debug\"}]]\n" + "set_property CONTROL.TRIGGER_CONDITION OR [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_0.axis_debug\"}]\n" + "set_property TRIGGER_COMPARE_VALUE eq1'b1 [get_hw_probes app_0/dma_ctrl_start_l2p_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_2.pipelined_wishbone_debug\"}]]\n" + "set_property TRIGGER_COMPARE_VALUE eq1'b1 [get_hw_probes app_0/dma_ctrl_start_p2l_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_2.pipelined_wishbone_debug\"}]]\n" + "set_property CONTROL.TRIGGER_CONDITION OR [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_2.pipelined_wishbone_debug\"}]\n" + "set_property TRIGGER_COMPARE_VALUE eq1'b1 [get_hw_probes app_0/ddr_app_cmd_en_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_5.ddr_debug\"}]]\n" + "#set_property TRIGGER_COMPARE_VALUE eq1'b1 [get_hw_probes app_0/dma_ctrl_start_l2p_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_4.l2p_debug\"}]]\n"+ #"set_property TRIGGER_COMPARE_VALUE eq3'h1 [get_hw_probes app_0/ddr_app_cmd_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_5.ddr_debug\"}]]\n"+ #"set_property TRIGGER_COMPARE_VALUE eq29'h0000_2001 [get_hw_probes app_0/count_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_0.axis_debug\"}]]\n"+ #"set_property TRIGGER_COMPARE_VALUE eq29'h0000_2001 [get_hw_probes app_0/gray_count_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_2.pipelined_wishbone_debug\"}]]\n" + #"set_property TRIGGER_COMPARE_VALUE eq29'h0000_2001 [get_hw_probes app_0/ddr_count_s -of_objects [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~\"app_0/dbg_5.ddr_debug\"}]]\n" + "set root /home/asautaux/Yarr-fw/ila/\n"+ "set ilafile1 ila_axis_data\n"+ "set ilafile2 ila_wb_data\n"+ "#set ilafile3 ila_l2p_data\n"+ "set ilafile3 ila_ram_data\n"+ "\n\n" + 'for {set i 0} {$i < 10000} {incr i} {\n'+ ' run_hw_ila [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1]]\n'+ ' wait_on_hw_ila -timeout 1 [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1]]\n'+ ' display_hw_ila_data [upload_hw_ila_data [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~"app_0/dbg_0.axis_debug"}]]\n'+ ' display_hw_ila_data [upload_hw_ila_data [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~"app_0/dbg_2.pipelined_wishbone_debug"}]]\n'+ ' #display_hw_ila_data [upload_hw_ila_data [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~"app_0/dbg_4.l2p_debug"}]]\n'+ ' display_hw_ila_data [upload_hw_ila_data [get_hw_ilas -of_objects [get_hw_devices xc7k160t_1] -filter {CELL_NAME=~"app_0/dbg_5.ddr_debug"}]]\n'+ ' write_hw_ila_data $root$i-$ilafile1.ila hw_ila_data_1\n'+ ' write_hw_ila_data $root$i-$ilafile2.ila hw_ila_data_2\n'+ ' write_hw_ila_data $root$i-$ilafile3.ila hw_ila_data_3\n'+ ' #write_hw_ila_data $root$i-$ilafile4.ila hw_ila_data_4\n'+ '}\n' "\n" ) for root, dirs, files in os.walk(project_path): for file in files: if file.endswith(".ltx"): ltx_file = os.path.join(root, file) ltx_files.append(ltx_file) #print "Bitfile found : " + ltx_file if len(ltx_files) == 0 : print("No debug file found !\n") elif len(ltx_files) == 1: print("Debug file found : " + ltx_files[0]) ok = raw_input ("Will you debug with this file [Y/n] ?") if ok[0].lower() == 'y': ltx_file = ltx_files[0] nb = 0 else: ltx_file = None else: print("Several debug files found: ") i = 0 for ltx_file in ltx_files: print (str(i) + ": " + ltx_file) i = i + 1 try: nb = input("Choose a file by typing a number: ") int(nb) except: print("You didn't enter a valid number") ltx_file = None else: if nb >= len(ltx_files) or nb < 0 : print("You didn't enter a valid number") ltx_file = None if (ltx_file != None): ltx_file = ltx_files[nb] cmds = cmds_debug.format('{',ltx_file,'}') #+ cmds_post_gui script_file.write(cmds) script_file.flush() subprocess.call(["vivado", "-mode", "batch","-source", script_path]) else: print "No debug file found !" script_file.close()
gpl-3.0
-2,069,367,810,578,305,000
46.789916
220
0.657113
false
Gloomymoon/SecKill
manage.py
1
1215
#!/usr/bin/env python # -*- coding: UTF-8 -* import os from app import create_app, db from app.models import User, Role, Permission, Coupon, SecKill, Datemark from flask_script import Manager, Shell from flask_migrate import Migrate, MigrateCommand app = create_app(os.getenv('ATH_CONFIG') or 'default') manager = Manager(app) def make_shell_context(): return dict(app=app, db=db, User=User, Role=Role) manager.add_command("shell", Shell(make_context=make_shell_context)) manager.add_command("db", MigrateCommand) @manager.command def test(): """Run the unit tests.""" import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests) if __name__ == '__main__': manager.run() def init_app_data(): db.drop_all() db.create_all() Coupon.insert_coupons() Role.insert_roles() u = User(ip="127.0.0.1", name="Administrator", role=Role.query.filter_by(permissions=0xff).first()) db.session.add(u) db.session.commit() def calculated(): sk = SecKill.query.filter_by(win=False).filter_by(datemark=Datemark.today()).order_by(SecKill.kill_time).all() sk1 = sk[0] db.session.add(sk1) db.session.commit()
gpl-3.0
-8,661,893,249,062,246,000
25.434783
111
0.68642
false
xzturn/tensorflow
tensorflow/python/compiler/tensorrt/test/biasadd_matmul_test.py
3
4713
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model script to test TF-TensorRT integration.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.platform import test class BiasaddMatMulTest(trt_test.TfTrtIntegrationTestBase): """Testing conversion of BiasAdd MatMul in TF-TRT conversion.""" def _ConstOp(self, shape): return constant_op.constant(np.random.randn(*shape), dtype=dtypes.float32) def GraphFn(self, x): input_matrix_rows = 4 input_matrix_columns = 144 b = self._ConstOp((input_matrix_columns, 4)) x1 = math_ops.matmul(x, b) b = self._ConstOp((1, 4)) x1 = x1 + b b = self._ConstOp((input_matrix_rows, 144)) x2 = self.trt_incompatible_op(x) x2 = math_ops.matmul(x2, b, transpose_a=True) x2 = gen_array_ops.reshape(x2, [4, -1]) x2 = self.trt_incompatible_op(x2) b = self._ConstOp((4, input_matrix_columns)) x3 = math_ops.matmul(x, b, transpose_b=True) b = self._ConstOp((16, input_matrix_rows)) x4 = self.trt_incompatible_op(x) x4 = math_ops.matmul(x4, b, transpose_b=True, transpose_a=True) x4 = gen_array_ops.reshape(x4, [4, -1]) x4 = self.trt_incompatible_op(x4) # Note that tf.nn.bias_add supports up to 5 dimensions. b = self._ConstOp((input_matrix_columns, 48)) x5 = math_ops.matmul(x, b) b = self._ConstOp((48,)) x5 = nn.bias_add(x5, b) x5 = gen_array_ops.reshape(x5, [4, -1]) x6 = gen_array_ops.reshape(x, [4, 24, 6]) b = self._ConstOp((6,)) x6 = nn.bias_add(x6, b, data_format="NHWC") x6 = gen_array_ops.reshape(x6, [4, -1]) x7 = gen_array_ops.reshape(x, [4, 12, 4, 3]) b = self._ConstOp((3,)) x7 = nn.bias_add(x7, b, data_format="NHWC") x7 = gen_array_ops.reshape(x7, [4, -1]) x8 = gen_array_ops.reshape(x, [4, 4, 3, 2, 6]) b = self._ConstOp((6,)) x8 = nn.bias_add(x8, b, data_format="NHWC") x8 = gen_array_ops.reshape(x8, [4, -1]) x9 = gen_array_ops.reshape(x, [4, 12, 3, 2, 2]) b = self._ConstOp((12,)) x9 = nn.bias_add(x9, b, data_format="NCHW") x9 = gen_array_ops.reshape(x9, [4, -1]) x10 = gen_array_ops.reshape(x, [4, 3, 4, 12]) b = self._ConstOp((3,)) x10 = nn.bias_add(x10, b, data_format="NCHW") x10 = gen_array_ops.reshape(x10, [4, -1]) x11 = gen_array_ops.reshape(x, [4, 6, 24]) b = self._ConstOp((6,)) x11 = nn.bias_add(x11, b, data_format="NCHW") x11 = gen_array_ops.reshape(x11, [4, -1]) out = array_ops.concat([x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11], axis=-1) return array_ops.squeeze(out, name="output_0") def GetParams(self): return self.BuildParams(self.GraphFn, dtypes.float32, [[4, 144]], [[4, 6680]]) def GetConversionParams(self, run_params): """Return a ConversionParams for test.""" conversion_params = super(BiasaddMatMulTest, self).GetConversionParams(run_params) conversion_params._replace(max_batch_size=4, maximum_cached_engines=1) rewrite_config_with_trt = self.GetTrtRewriterConfig( run_params=run_params, conversion_params=conversion_params, # Disable layout optimizer, since it will convert BiasAdd with NHWC # format to NCHW format under four dimensional input. disable_non_trt_optimizers=True) return conversion_params._replace( rewriter_config_template=rewrite_config_with_trt) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return ["TRTEngineOp_0"] if __name__ == "__main__": test.main()
apache-2.0
3,452,165,737,496,780,300
35.820313
93
0.643115
false
beeftornado/sentry
src/sentry/api/endpoints/project_key_stats.py
3
2056
from __future__ import absolute_import import six from collections import OrderedDict from django.db.models import F from rest_framework.response import Response from sentry import tsdb from sentry.api.base import StatsMixin from sentry.api.bases.project import ProjectEndpoint from sentry.api.exceptions import ResourceDoesNotExist from sentry.models import ProjectKey class ProjectKeyStatsEndpoint(ProjectEndpoint, StatsMixin): def get(self, request, project, key_id): try: key = ProjectKey.objects.get( project=project, public_key=key_id, roles=F("roles").bitor(ProjectKey.roles.store) ) except ProjectKey.DoesNotExist: raise ResourceDoesNotExist try: stat_args = self._parse_args(request) except ValueError: return Response({"detail": "Invalid request data"}, status=400) stats = OrderedDict() for model, name in ( (tsdb.models.key_total_received, "total"), (tsdb.models.key_total_blacklisted, "filtered"), (tsdb.models.key_total_rejected, "dropped"), ): # XXX (alex, 08/05/19) key stats were being stored under either key_id or str(key_id) # so merge both of those back into one stats result. result = tsdb.get_range(model=model, keys=[key.id, six.text_type(key.id)], **stat_args) for key_id, points in six.iteritems(result): for ts, count in points: bucket = stats.setdefault(int(ts), {}) bucket.setdefault(name, 0) bucket[name] += count return Response( [ { "ts": ts, "total": data["total"], "dropped": data["dropped"], "filtered": data["filtered"], "accepted": data["total"] - data["dropped"] - data["filtered"], } for ts, data in six.iteritems(stats) ] )
bsd-3-clause
8,782,102,129,794,222,000
35.714286
99
0.57393
false
javachengwc/hue
desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/shared/password_hasher.py
118
1850
# file openpyxl/shared/password_hasher.py # Copyright (c) 2010 openpyxl # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # @license: http://www.opensource.org/licenses/mit-license.php # @author: Eric Gazoni """Basic password hashing.""" def hash_password(plaintext_password=''): """Create a password hash from a given string. This method is based on the algorithm provided by Daniel Rentz of OpenOffice and the PEAR package Spreadsheet_Excel_Writer by Xavier Noguer <[email protected]>. """ password = 0x0000 i = 1 for char in plaintext_password: value = ord(char) << i rotated_bits = value >> 15 value &= 0x7fff password ^= (value | rotated_bits) i += 1 password ^= len(plaintext_password) password ^= 0xCE4B return str(hex(password)).upper()[2:]
apache-2.0
8,326,501,430,527,015,000
38.361702
79
0.727027
false
skycucumber/Messaging-Gateway
webapp/venv/lib/python2.7/site-packages/twisted/python/release.py
52
1107
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ A release-automation toolkit. Don't use this outside of Twisted. Maintainer: Christopher Armstrong """ import os # errors class DirectoryExists(OSError): """ Some directory exists when it shouldn't. """ pass class DirectoryDoesntExist(OSError): """ Some directory doesn't exist when it should. """ pass class CommandFailed(OSError): pass # utilities def sh(command, null=True, prompt=False): """ I'll try to execute C{command}, and if C{prompt} is true, I'll ask before running it. If the command returns something other than 0, I'll raise C{CommandFailed(command)}. """ print "--$", command if prompt: if raw_input("run ?? ").startswith('n'): return if null: command = "%s > /dev/null" % command if os.system(command) != 0: raise CommandFailed(command) def runChdirSafe(f, *args, **kw): origdir = os.path.abspath('.') try: return f(*args, **kw) finally: os.chdir(origdir)
gpl-2.0
-5,842,044,035,875,407,000
16.571429
66
0.61879
false
leekchan/django_test
django/contrib/admindocs/utils.py
23
4114
"Misc. utility functions/classes for admin documentation generator." import re from email.parser import HeaderParser from email.errors import HeaderParseError from django.utils.safestring import mark_safe from django.core.urlresolvers import reverse from django.utils.encoding import force_bytes try: import docutils.core import docutils.nodes import docutils.parsers.rst.roles except ImportError: docutils_is_available = False else: docutils_is_available = True def trim_docstring(docstring): """ Uniformly trim leading/trailing whitespace from docstrings. Based on https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation """ if not docstring or not docstring.strip(): return '' # Convert tabs to spaces and split into lines lines = docstring.expandtabs().splitlines() indent = min(len(line) - len(line.lstrip()) for line in lines if line.lstrip()) trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]] return "\n".join(trimmed).strip() def parse_docstring(docstring): """ Parse out the parts of a docstring. Return (title, body, metadata). """ docstring = trim_docstring(docstring) parts = re.split(r'\n{2,}', docstring) title = parts[0] if len(parts) == 1: body = '' metadata = {} else: parser = HeaderParser() try: metadata = parser.parsestr(parts[-1]) except HeaderParseError: metadata = {} body = "\n\n".join(parts[1:]) else: metadata = dict(metadata.items()) if metadata: body = "\n\n".join(parts[1:-1]) else: body = "\n\n".join(parts[1:]) return title, body, metadata def parse_rst(text, default_reference_context, thing_being_parsed=None): """ Convert the string from reST to an XHTML fragment. """ overrides = { 'doctitle_xform': True, 'inital_header_level': 3, "default_reference_context": default_reference_context, "link_base": reverse('django-admindocs-docroot').rstrip('/') } if thing_being_parsed: thing_being_parsed = force_bytes("<%s>" % thing_being_parsed) # Wrap ``text`` in some reST that sets the default role to ``cmsreference``, # then restores it. source = """ .. default-role:: cmsreference %s .. default-role:: """ parts = docutils.core.publish_parts(source % text, source_path=thing_being_parsed, destination_path=None, writer_name='html', settings_overrides=overrides) return mark_safe(parts['fragment']) # # reST roles # ROLES = { 'model': '%s/models/%s/', 'view': '%s/views/%s/', 'template': '%s/templates/%s/', 'filter': '%s/filters/#%s', 'tag': '%s/tags/#%s', } def create_reference_role(rolename, urlbase): def _role(name, rawtext, text, lineno, inliner, options=None, content=None): if options is None: options = {} if content is None: content = [] node = docutils.nodes.reference( rawtext, text, refuri=(urlbase % ( inliner.document.settings.link_base, text.lower(), )), **options ) return [node], [] docutils.parsers.rst.roles.register_canonical_role(rolename, _role) def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None): if options is None: options = {} if content is None: content = [] context = inliner.document.settings.default_reference_context node = docutils.nodes.reference( rawtext, text, refuri=(ROLES[context] % ( inliner.document.settings.link_base, text.lower(), )), **options ) return [node], [] if docutils_is_available: docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role) for name, urlbase in ROLES.items(): create_reference_role(name, urlbase)
bsd-3-clause
-9,213,072,462,321,522,000
28.597122
94
0.60841
false
Rundll/django-mailer-2
django_mailer/lockfile.py
179
15120
""" lockfile.py - Platform-independent advisory file locks. Requires Python 2.5 unless you apply 2.4.diff Locking is done on a per-thread basis instead of a per-process basis. Usage: >>> lock = FileLock('somefile') >>> try: ... lock.acquire() ... except AlreadyLocked: ... print 'somefile', 'is locked already.' ... except LockFailed: ... print 'somefile', 'can\\'t be locked.' ... else: ... print 'got lock' got lock >>> print lock.is_locked() True >>> lock.release() >>> lock = FileLock('somefile') >>> print lock.is_locked() False >>> with lock: ... print lock.is_locked() True >>> print lock.is_locked() False >>> # It is okay to lock twice from the same thread... >>> with lock: ... lock.acquire() ... >>> # Though no counter is kept, so you can't unlock multiple times... >>> print lock.is_locked() False Exceptions: Error - base class for other exceptions LockError - base class for all locking exceptions AlreadyLocked - Another thread or process already holds the lock LockFailed - Lock failed for some other reason UnlockError - base class for all unlocking exceptions AlreadyUnlocked - File was not locked. NotMyLock - File was locked but not by the current thread/process """ from __future__ import division import sys import socket import os import thread import threading import time import errno import urllib # Work with PEP8 and non-PEP8 versions of threading module. if not hasattr(threading, "current_thread"): threading.current_thread = threading.currentThread if not hasattr(threading.Thread, "get_name"): threading.Thread.get_name = threading.Thread.getName __all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked', 'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock', 'LinkFileLock', 'MkdirFileLock', 'SQLiteFileLock'] class Error(Exception): """ Base class for other exceptions. >>> try: ... raise Error ... except Exception: ... pass """ pass class LockError(Error): """ Base class for error arising from attempts to acquire the lock. >>> try: ... raise LockError ... except Error: ... pass """ pass class LockTimeout(LockError): """Raised when lock creation fails within a user-defined period of time. >>> try: ... raise LockTimeout ... except LockError: ... pass """ pass class AlreadyLocked(LockError): """Some other thread/process is locking the file. >>> try: ... raise AlreadyLocked ... except LockError: ... pass """ pass class LockFailed(LockError): """Lock file creation failed for some other reason. >>> try: ... raise LockFailed ... except LockError: ... pass """ pass class UnlockError(Error): """ Base class for errors arising from attempts to release the lock. >>> try: ... raise UnlockError ... except Error: ... pass """ pass class NotLocked(UnlockError): """Raised when an attempt is made to unlock an unlocked file. >>> try: ... raise NotLocked ... except UnlockError: ... pass """ pass class NotMyLock(UnlockError): """Raised when an attempt is made to unlock a file someone else locked. >>> try: ... raise NotMyLock ... except UnlockError: ... pass """ pass class LockBase: """Base class for platform-specific lock classes.""" def __init__(self, path, threaded=True): """ >>> lock = LockBase('somefile') >>> lock = LockBase('somefile', threaded=False) """ self.path = path self.lock_file = os.path.abspath(path) + ".lock" self.hostname = socket.gethostname() self.pid = os.getpid() if threaded: name = threading.current_thread().get_name() tname = "%s-" % urllib.quote(name, safe="") else: tname = "" dirname = os.path.dirname(self.lock_file) self.unique_name = os.path.join(dirname, "%s.%s%s" % (self.hostname, tname, self.pid)) def acquire(self, timeout=None): """ Acquire the lock. * If timeout is omitted (or None), wait forever trying to lock the file. * If timeout > 0, try to acquire the lock for that many seconds. If the lock period expires and the file is still locked, raise LockTimeout. * If timeout <= 0, raise AlreadyLocked immediately if the file is already locked. """ raise NotImplemented("implement in subclass") def release(self): """ Release the lock. If the file is not locked, raise NotLocked. """ raise NotImplemented("implement in subclass") def is_locked(self): """ Tell whether or not the file is locked. """ raise NotImplemented("implement in subclass") def i_am_locking(self): """ Return True if this object is locking the file. """ raise NotImplemented("implement in subclass") def break_lock(self): """ Remove a lock. Useful if a locking thread failed to unlock. """ raise NotImplemented("implement in subclass") def __enter__(self): """ Context manager support. """ self.acquire() return self def __exit__(self, *_exc): """ Context manager support. """ self.release() class LinkFileLock(LockBase): """Lock access to a file using atomic property of link(2).""" def acquire(self, timeout=None): try: open(self.unique_name, "wb").close() except IOError: raise LockFailed("failed to create %s" % self.unique_name) end_time = time.time() if timeout is not None and timeout > 0: end_time += timeout while True: # Try and create a hard link to it. try: os.link(self.unique_name, self.lock_file) except OSError: # Link creation failed. Maybe we've double-locked? nlinks = os.stat(self.unique_name).st_nlink if nlinks == 2: # The original link plus the one I created == 2. We're # good to go. return else: # Otherwise the lock creation failed. if timeout is not None and time.time() > end_time: os.unlink(self.unique_name) if timeout > 0: raise LockTimeout else: raise AlreadyLocked time.sleep(timeout is not None and timeout/10 or 0.1) else: # Link creation succeeded. We're good to go. return def release(self): if not self.is_locked(): raise NotLocked elif not os.path.exists(self.unique_name): raise NotMyLock os.unlink(self.unique_name) os.unlink(self.lock_file) def is_locked(self): return os.path.exists(self.lock_file) def i_am_locking(self): return (self.is_locked() and os.path.exists(self.unique_name) and os.stat(self.unique_name).st_nlink == 2) def break_lock(self): if os.path.exists(self.lock_file): os.unlink(self.lock_file) class MkdirFileLock(LockBase): """Lock file by creating a directory.""" def __init__(self, path, threaded=True): """ >>> lock = MkdirFileLock('somefile') >>> lock = MkdirFileLock('somefile', threaded=False) """ LockBase.__init__(self, path, threaded) if threaded: tname = "%x-" % thread.get_ident() else: tname = "" # Lock file itself is a directory. Place the unique file name into # it. self.unique_name = os.path.join(self.lock_file, "%s.%s%s" % (self.hostname, tname, self.pid)) def acquire(self, timeout=None): end_time = time.time() if timeout is not None and timeout > 0: end_time += timeout if timeout is None: wait = 0.1 else: wait = max(0, timeout / 10) while True: try: os.mkdir(self.lock_file) except OSError: err = sys.exc_info()[1] if err.errno == errno.EEXIST: # Already locked. if os.path.exists(self.unique_name): # Already locked by me. return if timeout is not None and time.time() > end_time: if timeout > 0: raise LockTimeout else: # Someone else has the lock. raise AlreadyLocked time.sleep(wait) else: # Couldn't create the lock for some other reason raise LockFailed("failed to create %s" % self.lock_file) else: open(self.unique_name, "wb").close() return def release(self): if not self.is_locked(): raise NotLocked elif not os.path.exists(self.unique_name): raise NotMyLock os.unlink(self.unique_name) os.rmdir(self.lock_file) def is_locked(self): return os.path.exists(self.lock_file) def i_am_locking(self): return (self.is_locked() and os.path.exists(self.unique_name)) def break_lock(self): if os.path.exists(self.lock_file): for name in os.listdir(self.lock_file): os.unlink(os.path.join(self.lock_file, name)) os.rmdir(self.lock_file) class SQLiteFileLock(LockBase): "Demonstration of using same SQL-based locking." import tempfile _fd, testdb = tempfile.mkstemp() os.close(_fd) os.unlink(testdb) del _fd, tempfile def __init__(self, path, threaded=True): LockBase.__init__(self, path, threaded) self.lock_file = unicode(self.lock_file) self.unique_name = unicode(self.unique_name) import sqlite3 self.connection = sqlite3.connect(SQLiteFileLock.testdb) c = self.connection.cursor() try: c.execute("create table locks" "(" " lock_file varchar(32)," " unique_name varchar(32)" ")") except sqlite3.OperationalError: pass else: self.connection.commit() import atexit atexit.register(os.unlink, SQLiteFileLock.testdb) def acquire(self, timeout=None): end_time = time.time() if timeout is not None and timeout > 0: end_time += timeout if timeout is None: wait = 0.1 elif timeout <= 0: wait = 0 else: wait = timeout / 10 cursor = self.connection.cursor() while True: if not self.is_locked(): # Not locked. Try to lock it. cursor.execute("insert into locks" " (lock_file, unique_name)" " values" " (?, ?)", (self.lock_file, self.unique_name)) self.connection.commit() # Check to see if we are the only lock holder. cursor.execute("select * from locks" " where unique_name = ?", (self.unique_name,)) rows = cursor.fetchall() if len(rows) > 1: # Nope. Someone else got there. Remove our lock. cursor.execute("delete from locks" " where unique_name = ?", (self.unique_name,)) self.connection.commit() else: # Yup. We're done, so go home. return else: # Check to see if we are the only lock holder. cursor.execute("select * from locks" " where unique_name = ?", (self.unique_name,)) rows = cursor.fetchall() if len(rows) == 1: # We're the locker, so go home. return # Maybe we should wait a bit longer. if timeout is not None and time.time() > end_time: if timeout > 0: # No more waiting. raise LockTimeout else: # Someone else has the lock and we are impatient.. raise AlreadyLocked # Well, okay. We'll give it a bit longer. time.sleep(wait) def release(self): if not self.is_locked(): raise NotLocked if not self.i_am_locking(): raise NotMyLock((self._who_is_locking(), self.unique_name)) cursor = self.connection.cursor() cursor.execute("delete from locks" " where unique_name = ?", (self.unique_name,)) self.connection.commit() def _who_is_locking(self): cursor = self.connection.cursor() cursor.execute("select unique_name from locks" " where lock_file = ?", (self.lock_file,)) return cursor.fetchone()[0] def is_locked(self): cursor = self.connection.cursor() cursor.execute("select * from locks" " where lock_file = ?", (self.lock_file,)) rows = cursor.fetchall() return not not rows def i_am_locking(self): cursor = self.connection.cursor() cursor.execute("select * from locks" " where lock_file = ?" " and unique_name = ?", (self.lock_file, self.unique_name)) return not not cursor.fetchall() def break_lock(self): cursor = self.connection.cursor() cursor.execute("delete from locks" " where lock_file = ?", (self.lock_file,)) self.connection.commit() if hasattr(os, "link"): FileLock = LinkFileLock else: FileLock = MkdirFileLock
mit
2,514,133,598,610,212,000
29.361446
77
0.513294
false
beebotte/ISS-realtime-position
iss_position_pub.py
1
3041
#!/usr/bin/python # coding: utf8 ############################################################ # Author Bachar Wehbi <[email protected]> # Copyright (c) 2013-2014 Beebotte <[email protected]> # This program is published under the MIT License # Check http://opensource.org/licenses/MIT for details. # # This code uses the Beebotte API, you must have an account. # You can register here: http://beebotte.com/register # # This program computes the position of the ISS in real-time # and publishes it to Beebotte. # # Use the pip package manager to install dependencies: # $ pip install pyephem # $ pip install beebotte ############################################################ import time from beebotte import * import ephem import datetime import urllib2 from math import degrees ### URL where we will fetch TLE data url = "http://www.celestrak.com/NORAD/elements/stations.txt" ### Replace CHENNL_TOKEN with that of your channel's (this code assumes the channel name is "ISS") CHANNEL_TOKEN = None bbt = BBT(token = CHANNEL_TOKEN) ### Otherwise, use your Access and Secret keys to connect to Beebotte ### Replace ACCESS_KEY and SECRET_KEY with those of your account # ACCESS_KEY = None # SECRET_KEY = None # bbt = BBT(ACCESS_KEY, SECRET_KEY) ### Change channel name and resource name as suits you iss_position_resource = Resource(bbt, 'ISS', 'position') iss = None count = 0 def update_tle(): global iss ### This is what TLE looks like. It will be updated every hour # line1 = "ISS (ZARYA)" # line2 = "1 25544U 98067A 16070.60802946 .00010558 00000-0 16731-3 0 9999" # line3 = "2 25544 51.6423 189.6478 0001642 260.2328 233.0609 15.53995147989640" try: ### Fetch and extract ISS TLE data req = urllib2.Request(url) response = urllib2.urlopen(req) data = response.read() tle = data.split('\n')[0:3] if len(tle) >= 3: line1 = tle[0] line2 = tle[1] line3 = tle[2] iss = ephem.readtle(line1, line2, line3) except Exception as inst: print type(inst) ### the exception instance print inst.args ### arguments stored in .args print inst ### def run(): global count update_tle() while True: ### update the TLE data once per hour if count > 3600: update_tle() count = 0 count += 1 try: ### compute the ISS position now = datetime.datetime.utcnow() iss.compute(now) print('longitude: %f - latitude: %f' % (degrees(iss.sublong), degrees(iss.sublat))) ### Send temperature to Beebotte iss_position_resource.publish({ "timestamp": round(time.time()), ### transform longitude and latitude to degrees "position": { "long": degrees(iss.sublong), "lat": degrees(iss.sublat) } }) except Exception as inst: print type(inst) ### the exception instance print inst.args ### arguments stored in .args print inst ### ### sleep some time time.sleep( 1 ) run()
mit
6,291,293,306,692,910,000
27.157407
98
0.624466
false
ansible/ansible-modules-core
network/netvisor/pn_vlan.py
29
8941
#!/usr/bin/python """ PN CLI vlan-create/vlan-delete """ # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # import shlex ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = """ --- module: pn_vlan author: "Pluribus Networks (@amitsi)" version_added: "2.2" version: 1.0 short_description: CLI command to create/delete a VLAN. description: - Execute vlan-create or vlan-delete command. - VLANs are used to isolate network traffic at Layer 2.The VLAN identifiers 0 and 4095 are reserved and cannot be used per the IEEE 802.1Q standard. The range of configurable VLAN identifiers is 2 through 4092. options: pn_cliusername: description: - Provide login username if user is not root. required: False pn_clipassword: description: - Provide login password if user is not root. required: False pn_cliswitch: description: - Target switch(es) to run the cli on. required: False state: description: - State the action to perform. Use 'present' to create vlan and 'absent' to delete vlan. required: True choices: ['present', 'absent'] pn_vlanid: description: - Specify a VLAN identifier for the VLAN. This is a value between 2 and 4092. required: True pn_scope: description: - Specify a scope for the VLAN. - Required for vlan-create. choices: ['fabric', 'local'] pn_description: description: - Specify a description for the VLAN. pn_stats: description: - Specify if you want to collect statistics for a VLAN. Statistic collection is enabled by default. pn_ports: description: - Specifies the switch network data port number, list of ports, or range of ports. Port numbers must ne in the range of 1 to 64. pn_untagged_ports: description: - Specifies the ports that should have untagged packets mapped to the VLAN. Untagged packets are packets that do not contain IEEE 802.1Q VLAN tags. """ EXAMPLES = """ - name: create a VLAN pn_vlan: state: 'present' pn_vlanid: 1854 pn_scope: fabric - name: delete VLANs pn_vlan: state: 'absent' pn_vlanid: 1854 """ RETURN = """ command: description: The CLI command run on the target node(s). stdout: description: The set of responses from the vlan command. returned: always type: list stderr: description: The set of error responses from the vlan command. returned: on error type: list changed: description: Indicates whether the CLI caused changes on the target. returned: always type: bool """ VLAN_EXISTS = None MAX_VLAN_ID = 4092 MIN_VLAN_ID = 2 def pn_cli(module): """ This method is to generate the cli portion to launch the Netvisor cli. It parses the username, password, switch parameters from module. :param module: The Ansible module to fetch username, password and switch :return: returns the cli string for further processing """ username = module.params['pn_cliusername'] password = module.params['pn_clipassword'] cliswitch = module.params['pn_cliswitch'] if username and password: cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) else: cli = '/usr/bin/cli --quiet ' if cliswitch == 'local': cli += ' switch-local ' else: cli += ' switch ' + cliswitch return cli def check_cli(module, cli): """ This method checks for idempotency using the vlan-show command. If a vlan with given vlan id exists, return VLAN_EXISTS as True else False. :param module: The Ansible module to fetch input parameters :param cli: The CLI string :return Global Booleans: VLAN_EXISTS """ vlanid = module.params['pn_vlanid'] show = cli + \ ' vlan-show id %s format id,scope no-show-headers' % str(vlanid) show = shlex.split(show) out = module.run_command(show)[1] out = out.split() # Global flags global VLAN_EXISTS if str(vlanid) in out: VLAN_EXISTS = True else: VLAN_EXISTS = False def run_cli(module, cli): """ This method executes the cli command on the target node(s) and returns the output. The module then exits based on the output. :param cli: the complete cli string to be executed on the target node(s). :param module: The Ansible module to fetch command """ cliswitch = module.params['pn_cliswitch'] state= module.params['state'] command = get_command_from_state(state) cmd = shlex.split(cli) # 'out' contains the output # 'err' contains the error messages result, out, err = module.run_command(cmd) print_cli = cli.split(cliswitch)[1] # Response in JSON format if result != 0: module.exit_json( command=print_cli, stderr=err.strip(), msg="%s operation failed" % command, changed=False ) if out: module.exit_json( command=print_cli, stdout=out.strip(), msg="%s operation completed" % command, changed=True ) else: module.exit_json( command=print_cli, msg="%s operation completed" % command, changed=True ) def get_command_from_state(state): """ This method gets appropriate command name for the state specified. It returns the command name for the specified state. :param state: The state for which the respective command name is required. """ command = None if state == 'present': command = 'vlan-create' if state == 'absent': command = 'vlan-delete' return command def main(): """ This section is for arguments parsing """ module = AnsibleModule( argument_spec=dict( pn_cliusername=dict(required=False, type='str'), pn_clipassword=dict(required=False, type='str', no_log=True), pn_cliswitch=dict(required=False, type='str', default='local'), state =dict(required=True, type='str', choices=['present', 'absent']), pn_vlanid=dict(required=True, type='int'), pn_scope=dict(type='str', choices=['fabric', 'local']), pn_description=dict(type='str'), pn_stats=dict(type='bool'), pn_ports=dict(type='str'), pn_untagged_ports=dict(type='str') ), required_if=( ["state", "present", ["pn_vlanid", "pn_scope"]], ["state", "absent", ["pn_vlanid"]] ) ) # Accessing the arguments state = module.params['state'] vlanid = module.params['pn_vlanid'] scope = module.params['pn_scope'] description = module.params['pn_description'] stats = module.params['pn_stats'] ports = module.params['pn_ports'] untagged_ports = module.params['pn_untagged_ports'] command = get_command_from_state(state) # Building the CLI command string cli = pn_cli(module) if not MIN_VLAN_ID <= vlanid <= MAX_VLAN_ID: module.exit_json( msg="VLAN id must be between 2 and 4092", changed=False ) if command == 'vlan-create': check_cli(module, cli) if VLAN_EXISTS is True: module.exit_json( skipped=True, msg='VLAN with id %s already exists' % str(vlanid) ) cli += ' %s id %s scope %s ' % (command, str(vlanid), scope) if description: cli += ' description ' + description if stats is True: cli += ' stats ' if stats is False: cli += ' no-stats ' if ports: cli += ' ports ' + ports if untagged_ports: cli += ' untagged-ports ' + untagged_ports if command == 'vlan-delete': check_cli(module, cli) if VLAN_EXISTS is False: module.exit_json( skipped=True, msg='VLAN with id %s does not exist' % str(vlanid) ) cli += ' %s id %s ' % (command, str(vlanid)) run_cli(module, cli) # AnsibleModule boilerplate from ansible.module_utils.basic import AnsibleModule if __name__ == '__main__': main()
gpl-3.0
3,900,338,691,962,531,000
27.749196
79
0.618387
false
antoan2/incubator-mxnet
python/mxnet/module/sequential_module.py
38
17331
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=too-many-arguments, too-many-locals, too-many-instance-attributes """`SequentialModule` is a container module that chains a number of modules together.""" import logging import copy from ..initializer import Uniform from .base_module import BaseModule class SequentialModule(BaseModule): """A SequentialModule is a container module that can chain multiple modules together. .. note:: Building a computation graph with this kind of imperative container is less flexible and less efficient than the symbolic graph. So, this should be only used as a handy utility. """ META_TAKE_LABELS = 'take_labels' META_AUTO_WIRING = 'auto_wiring' def __init__(self, logger=logging): super(SequentialModule, self).__init__(logger=logger) self._modules = [] self._metas = [] self._label_shapes = None self._data_shapes = None self._meta_keys = set([getattr(SequentialModule, x) for x in dir(SequentialModule) if x.startswith('META_')]) def add(self, module, **kwargs): """Adds a module to the chain. Parameters ---------- module : BaseModule The new module to add. kwargs : **keywords All the keyword arguments are saved as meta information for the added module. The currently known meta includes - `take_labels`: indicating whether the module expect to take labels when doing computation. Note any module in the chain can take labels (not necessarily only the top most one), and they all take the same labels passed from the original data batch for the `SequentialModule`. Returns ------- self This function returns `self` to allow us to easily chain a series of `add` calls. Examples -------- >>> # An example of addinging two modules to a chain. >>> seq_mod = mx.mod.SequentialModule() >>> seq_mod.add(mod1) >>> seq_mod.add(mod2) """ self._modules.append(module) # a sanity check to avoid typo for key in kwargs: assert key in self._meta_keys, ('Unknown meta "%s", a typo?' % key) self._metas.append(kwargs) # after adding new modules, we are reset back to raw states, needs # to bind, init_params, etc. self.binded = False self.params_initialized = False self.optimizer_initialized = False return self # for easier chaining @property def data_names(self): """A list of names for data required by this module.""" if len(self._modules) > 0: return self._modules[0].data_names return [] @property def output_names(self): """A list of names for the outputs of this module.""" if len(self._modules) > 0: return self._modules[-1].output_names return [] @property def data_shapes(self): """Gets data shapes. Returns ------- list A list of `(name, shape)` pairs. The data shapes of the first module is the data shape of a `SequentialModule`. """ assert self.binded return self._modules[0].data_shapes @property def label_shapes(self): """Gets label shapes. Returns ------- list A list of `(name, shape)` pairs. The return value could be `None` if the module does not need labels, or if the module is not bound for training (in this case, label information is not available). """ assert self.binded return self._label_shapes @property def output_shapes(self): """Gets output shapes. Returns ------- list A list of `(name, shape)` pairs. The output shapes of the last module is the output shape of a `SequentialModule`. """ assert self.binded return self._modules[-1].output_shapes def get_params(self): """Gets current parameters. Returns ------- (arg_params, aux_params) A pair of dictionaries each mapping parameter names to NDArray values. This is a merged dictionary of all the parameters in the modules. """ assert self.binded and self.params_initialized arg_params = dict() aux_params = dict() for module in self._modules: arg, aux = module.get_params() arg_params.update(arg) aux_params.update(aux) return (arg_params, aux_params) def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None, allow_missing=False, force_init=False, allow_extra=False): """Initializes parameters. Parameters ---------- initializer : Initializer arg_params : dict Default ``None``. Existing parameters. This has higher priority than `initializer`. aux_params : dict Default ``None``. Existing auxiliary states. This has higher priority than `initializer`. allow_missing : bool Allow missing values in `arg_params` and `aux_params` (if not ``None``). In this case, missing values will be filled with `initializer`. force_init : bool Default ``False``. allow_extra : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor. """ if self.params_initialized and not force_init: return assert self.binded, 'call bind before initializing the parameters' for module in self._modules: module.init_params(initializer=initializer, arg_params=arg_params, aux_params=aux_params, allow_missing=allow_missing, force_init=force_init, allow_extra=allow_extra) # make sure we do not have duplicated parameter names def _check_name(known_names, new_names, modules, i): """Internal function to help checking duplicated names.""" for name in new_names: assert not name in known_names, "Duplicated parameter names: " + \ ('name "%s" in layer %d (%s) is already ' % (name, i, type(modules[i]))) + \ ('used in layer %d (%s).' % (known_names[name], type(modules[known_names[name]]))) known_names[name] = i arg_names = dict() aux_names = dict() for i_layer, module in enumerate(self._modules): arg_params, aux_params = module.get_params() _check_name(arg_names, arg_params.keys(), self._modules, i_layer) _check_name(aux_names, aux_params.keys(), self._modules, i_layer) self.params_initialized = True def bind(self, data_shapes, label_shapes=None, for_training=True, inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'): """Binds the symbols to construct executors. This is necessary before one can perform computation with the module. Parameters ---------- data_shapes : list of (str, tuple) Typically is `data_iter.provide_data`. label_shapes : list of (str, tuple) Typically is `data_iter.provide_label`. for_training : bool Default is ``True``. Whether the executors should be bind for training. inputs_need_grad : bool Default is ``False``. Whether the gradients to the input data need to be computed. Typically this is not needed. But this might be needed when implementing composition of modules. force_rebind : bool Default is ``False``. This function does nothing if the executors are already bound. But with this ``True``, the executors will be forced to rebind. shared_module : Module Default is ``None``. Currently shared module is not supported for `SequentialModule`. grad_req : str, list of str, dict of str to str Requirement for gradient accumulation. Can be 'write', 'add', or 'null' (default to 'write'). Can be specified globally (str) or for each argument (list, dict). """ if self.binded and not force_rebind: self.logger.warning('Already bound, ignoring bind()') return if inputs_need_grad: assert for_training is True assert shared_module is None, 'Shared module is not supported' assert len(self._modules) > 0, 'Attempting to bind an empty SequentialModule' self.binded = True # the same label shapes are used for all chained modules self._label_shapes = label_shapes my_data_shapes = data_shapes anybody_ever_needs_label = False for i_layer, module in enumerate(self._modules): meta = self._metas[i_layer] if SequentialModule.META_TAKE_LABELS in meta and \ meta[SequentialModule.META_TAKE_LABELS]: my_label_shapes = label_shapes anybody_ever_needs_label = True else: my_label_shapes = None my_inputs_need_grad = bool(inputs_need_grad or (for_training and i_layer > 0)) if meta.get(SequentialModule.META_AUTO_WIRING, False): data_names = module.data_names assert len(data_names) == len(my_data_shapes) my_data_shapes = [(new_name, shape) for (new_name, (_, shape)) in zip(data_names, my_data_shapes)] module.bind(data_shapes=my_data_shapes, label_shapes=my_label_shapes, for_training=for_training, inputs_need_grad=my_inputs_need_grad, force_rebind=force_rebind, shared_module=None, grad_req=grad_req) # the output of the previous module is the data of the next module my_data_shapes = module.output_shapes if not anybody_ever_needs_label: # then I do not need label either self._label_shapes = None def init_optimizer(self, kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),), force_init=False): """Installs and initializes optimizers. Parameters ---------- kvstore : str or KVStore Default `'local'`. optimizer : str or Optimizer Default `'sgd'` optimizer_params : dict Default ``(('learning_rate', 0.01),)``. The default value is not a dictionary, just to avoid pylint warning of dangerous default values. force_init : bool Default ``False``, indicating whether we should force re-initializing the optimizer in the case an optimizer is already installed. """ assert self.binded and self.params_initialized if self.optimizer_initialized and not force_init: self.logger.warning('optimizer already initialized, ignoring.') return for module in self._modules: module.init_optimizer(kvstore=kvstore, optimizer=optimizer, optimizer_params=optimizer_params, force_init=force_init) self.optimizer_initialized = True def forward(self, data_batch, is_train=None): """Forward computation. Parameters ---------- data_batch : DataBatch is_train : bool Default is ``None``, in which case `is_train` is take as ``self.for_training``. """ assert self.binded and self.params_initialized # make a shallow copy, just to maintain necessary properties (if any) like # bucket_key, pad, etc. data_batch = copy.copy(data_batch) for i_layer, module in enumerate(self._modules): module.forward(data_batch, is_train=is_train) if i_layer+1 == len(self._modules): # the last layer, do not need to do the followings break data_batch.data = module.get_outputs() if hasattr(data_batch, 'provide_data'): # need to update this, in case the internal module is using bucketing # or whatever data_names = [x[0] for x in module.output_shapes] assert len(data_names) == len(data_batch.data) data_batch.provide_data = [(name, x.shape) for name, x in zip(data_names, data_batch.data)] def backward(self, out_grads=None): """Backward computation.""" assert self.binded and self.params_initialized for i_layer, module in reversed(list(zip(range(len(self._modules)), self._modules))): module.backward(out_grads=out_grads) if i_layer == 0: break out_grads = module.get_input_grads() def update(self): """Updates parameters according to installed optimizer and the gradient computed in the previous forward-backward cycle. """ assert self.binded and self.params_initialized and self.optimizer_initialized for module in self._modules: module.update() def get_outputs(self, merge_multi_context=True): """Gets outputs from a previous forward computation. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are numpy arrays. """ assert self.binded and self.params_initialized return self._modules[-1].get_outputs(merge_multi_context=merge_multi_context) def get_input_grads(self, merge_multi_context=True): """Gets the gradients with respect to the inputs of the module. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArrays or list of list of NDArrays If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output elements are `NDArray`. """ assert self.binded and self.params_initialized and self.inputs_need_grad return self._modules[0].get_input_grads(merge_multi_context=merge_multi_context) def update_metric(self, eval_metric, labels): """Evaluates and accumulates evaluation metric on outputs of the last forward computation. Parameters ---------- eval_metric : EvalMetric labels : list of NDArray Typically ``data_batch.label``. """ assert self.binded and self.params_initialized for meta, module in zip(self._metas, self._modules): if SequentialModule.META_TAKE_LABELS in meta and \ meta[SequentialModule.META_TAKE_LABELS]: module.update_metric(eval_metric, labels) def install_monitor(self, mon): """Installs monitor on all executors.""" assert self.binded for module in self._modules: module.install_monitor(mon)
apache-2.0
-4,246,953,449,106,535,400
38.47836
98
0.592984
false
yeming233/rally
tests/unit/plugins/openstack/scenarios/ceilometer/test_stats.py
2
1757
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from rally.plugins.openstack.scenarios.ceilometer import stats from tests.unit import test class CeilometerStatsTestCase(test.ScenarioTestCase): def test_get_stats(self): scenario = stats.GetStats(self.context) scenario._get_stats = mock.MagicMock() context = {"user": {"tenant_id": "fake", "id": "fake_id"}, "tenant": {"id": "fake_id", "resources": ["fake_resource"]}} metadata_query = {"a": "test"} period = 10 groupby = "user_id" aggregates = "sum" scenario.context = context scenario.run("fake_meter", True, True, True, metadata_query, period, groupby, aggregates) scenario._get_stats.assert_called_once_with( "fake_meter", [{"field": "user_id", "value": "fake_id", "op": "eq"}, {"field": "project_id", "value": "fake_id", "op": "eq"}, {"field": "resource_id", "value": "fake_resource", "op": "eq"}, {"field": "metadata.a", "value": "test", "op": "eq"}], 10, "user_id", "sum" )
apache-2.0
8,847,066,008,096,648,000
38.044444
78
0.586796
false
stevenbaker/dotfiles
.vim/bundle/jedi-vim/jedi/test/completion/descriptors.py
14
2662
class RevealAccess(object): """ A data descriptor that sets and returns values normally and prints a message logging their access. """ def __init__(self, initval=None, name='var'): self.val = initval self.name = name def __get__(self, obj, objtype): print('Retrieving', self.name) return self.val def __set__(self, obj, val): print('Updating', self.name) self.val = val def just_a_method(self): pass class C(object): x = RevealAccess(10, 'var "x"') #? RevealAccess() x #? ['just_a_method'] x.just_a_method y = 5.0 def __init__(self): #? int() self.x #? [] self.just_a_method #? [] C.just_a_method m = C() #? int() m.x #? float() m.y #? int() C.x #? [] m.just_a_method #? [] C.just_a_method # ----------------- # properties # ----------------- class B(): @property def r(self): return 1 @r.setter def r(self, value): return '' def t(self): return '' p = property(t) #? [] B().r() #? int() B().r #? str() B().p #? [] B().p() class PropClass(): def __init__(self, a): self.a = a @property def ret(self): return self.a @ret.setter def ret(self, value): return 1.0 def ret2(self): return self.a ret2 = property(ret2) @property def nested(self): """ causes recusions in properties, should work """ return self.ret @property def nested2(self): """ causes recusions in properties, should not work """ return self.nested2 @property def join1(self): """ mutual recusion """ return self.join2 @property def join2(self): """ mutual recusion """ return self.join1 #? str() PropClass("").ret #? [] PropClass().ret. #? str() PropClass("").ret2 #? PropClass().ret2 #? int() PropClass(1).nested #? [] PropClass().nested. #? PropClass(1).nested2 #? [] PropClass().nested2. #? PropClass(1).join1 # ----------------- # staticmethod/classmethod # ----------------- class E(object): a = '' def __init__(self, a): self.a = a def f(x): return x f = staticmethod(f) @staticmethod def g(x): return x def s(cls, x): return x s = classmethod(s) @classmethod def t(cls, x): return x @classmethod def u(cls, x): return cls.a e = E(1) #? int() e.f(1) #? int() E.f(1) #? int() e.g(1) #? int() E.g(1) #? int() e.s(1) #? int() E.s(1) #? int() e.t(1) #? int() E.t(1) #? str() e.u(1) #? str() E.u(1)
mit
-5,508,422,796,701,280,000
13.626374
63
0.493614
false
Nofe92/srcdemo2
launcher/SrcDemo2Launcher.py
8
10302
import sys import os import re import time import base64 import tempfile import subprocess import threading if __name__ == '__main__': print 'Please do not launch this file directly.' sys.exit(0) def module_path(): if hasattr(sys, "frozen"): return os.path.dirname(sys.executable) return os.path.dirname(__file__) selfDir = os.path.abspath(module_path()) allowedCommands = {} def addCommand(commandName, command): global allowedCommands allowedCommands[commandName] = command stringRe = re.compile(r'"((?:[^"\\]|\\.)*)"') def parse_command(command): global stringRe, allowedCommands command = base64.b64decode(command).decode('utf8') allStrings = stringRe.findall(command) if not allStrings: return allStrings = [x.replace(u'\\"', u'"').replace(u'\\\\', u'\\') for x in allStrings] commandName = allStrings[0] arguments = allStrings[1:] if commandName in allowedCommands: debugPrint('[C] Executing command', commandName, 'with arguments', arguments) try: allowedCommands[commandName](*arguments) except: debugPrint('Error while running command', commandName, 'with arguments', arguments) class StreamRunner(threading.Thread): def __init__(self, process, streamIn, streamsOut, parseCommands=False, showCommands=False): self.process = process self.streamIn = streamIn self.streamsOut = streamsOut self.parseCommands = parseCommands self.showCommands = showCommands threading.Thread.__init__(self) def run(self): while self.process.poll() is None: l = self.streamIn.readline() if self.parseCommands and len(l) > 4 and l[:3] == '[C]': if self.showCommands: for s in self.streamsOut: s.write(l) parse_command(l[4:]) else: for s in self.streamsOut: s.write(l) def which(program): import os def is_executable(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath and is_executable(program): return program elif 'PATH' in os.environ: for path in os.environ['PATH'].split(os.pathsep): path = path.strip('"') executable_file = os.path.join(path, program) if is_executable(executable_file): return executable_file return None def is_windows(): return sys.platform[:3].lower() == 'win' def is_osx(): return sys.platform.lower().find('darwin') != -1 or sys.platform.lower().find('osx') != -1 def get_java(debugMode): if is_windows(): hiPriority = 'java.exe' loPriority = 'javaw.exe' debugPrint('Finding', hiPriority, '/', loPriority) def findJre(d): if not os.path.exists(d) or not os.path.isdir(d): return None found = None for i in os.listdir(d): f = d + os.sep + i if os.path.isdir(f): res = findJre(f) if res is not None: return res elif i.lower() == hiPriority: return f # Immediately return elif i.lower() == loPriority: found = f # Keep looking for the other, just in case return found lookIn=[selfDir] if 'PROGRAMFILES(X86)' in os.environ: lookIn.append(os.environ['PROGRAMFILES(X86)'] + os.sep + 'Oracle') lookIn.append(os.environ['PROGRAMFILES(X86)'] + os.sep + 'Java') if 'PROGRAMFILES' in os.environ: lookIn.append(os.environ['PROGRAMFILES'] + os.sep + 'Oracle') lookIn.append(os.environ['PROGRAMFILES'] + os.sep + 'Java') foundJre = None for p in lookIn: foundJre = findJre(p) if foundJre is not None: return foundJre elif is_osx(): return selfDir + '/jre-1.7.0/bin/java' else: return which('java') return None def add_subprocess_creationflags(kwargs): if is_windows(): import win32process kwargs['creationflags'] = win32process.CREATE_NO_WINDOW return kwargs def subprocess_call(command, *args, **kwargs): args = args[:] kwargs = add_subprocess_creationflags(kwargs.copy()) kwargs['stdin'] = subprocess.PIPE kwargs['stdout'] = subprocess.PIPE kwargs['stderr'] = subprocess.PIPE return subprocess.call(command, *args, **kwargs) def subprocess_getoutput(command, *args, **kwargs): args = args[:] kwargs = add_subprocess_creationflags(kwargs.copy()) kwargs['stdin'] = subprocess.PIPE kwargs['stderr'] = subprocess.PIPE return subprocess.check_output(command, *args, **kwargs) def attempt_unmount(mountpoint): global selfDir mountpoint = mountpoint.encode(sys.getfilesystemencoding()) if is_windows(): subprocess_call([selfDir + os.sep + 'tools' + os.sep + 'windows' + os.sep + 'dokanctl' + os.sep + 'dokanctl.exe', '/u', mountpoint, '/f']) else: try: subprocess_call(['fusermount', '-u', mountpoint]) except: pass try: subprocess_call(['umount', mountpoint]) except: pass addCommand('unmount', attempt_unmount) lastMountPoint = None def register_mountpoint(mountpoint): global lastMountPoint lastMountPoint = mountpoint addCommand('register_mountpoint', register_mountpoint) def unmount_registered_mountpoint(): global lastMountPoint if lastMountPoint is not None: debugPrint('Attempting unmount of', lastMountPoint) attempt_unmount(lastMountPoint) def addJvmArgument(printFlags, jvmArgs, default, prefix=None, xxArg=None): if prefix is not None: for i in jvmArgs: if len(i) > len(prefix) and i[:len(prefix)] == prefix: return jvmArgs.append(prefix + default) elif xxArg is not None and printFlags is not None and xxArg in printFlags: for i in jvmArgs: if len(i) > 4 and i[:4] == '-XX:' and xxArg in i: return jvmArgs.append('-XX:' + default) debugMode = False def debugPrint(*args): global debugMode if debugMode: try: print ' '.join(map(str, args)) except: try: print args except: try: print 'Could not print line! Something is very bad.' except: pass # Now it's really really bad def launch(inDebugMode=False): global selfDir, debugMode debugMode = inDebugMode or '--srcdemo-debug' in sys.argv[1:] debugPrint('Debug mode enabled.') foundJre = get_java(debugMode) if foundJre is None: debugPrint('JRE not found.') if is_windows(): import win32api win32api.MessageBox(0, 'A 32-bit Java runtime environment (JRE) was not found.\nPlease download it from http://java.com/.\nEven if you are on 64-bit Windows, this program needs a 32-bit Java runtime to run.\n\nIf you are sure you have installed it already, please copy the jre folder next to SrcDemo2.exe.', 'Java not found.') return else: print 'The Java runtime environment was not found.' sys.exit(1) if type(foundJre) is not type([]): foundJre = [foundJre] javaHome = os.path.abspath(os.path.dirname(os.path.dirname(foundJre[0]))) javaEnv = os.environ.copy() javaEnv['JAVA_HOME'] = javaHome javaVmArgs = [] if is_osx(): foundJre.append('-d64') javaVmArgs.append('-XstartOnFirstThread') for i in sys.argv[1:]: if len(i) > 11 and i[:11] == '--jvm-args=': javaVmArgs.extend(i[11:].split(' ')) jvmType = '-client' if '-server' not in javaVmArgs and '-client' not in javaVmArgs: # Probe for server JVM if subprocess_call(foundJre + ['-server', '-version']) == 0: jvmType = '-server' javaVmArgs = ['-server'] + javaVmArgs # Get available flags printFlags = None try: printFlags = subprocess_getoutput(foundJre + [jvmType, '-XX:+PrintFlagsFinal']) except: pass addJvmArgument(printFlags, javaVmArgs, '1024M', prefix='-Xmx') addJvmArgument(printFlags, javaVmArgs, '512k', prefix='-Xss') addJvmArgument(printFlags, javaVmArgs, ':none', prefix='-Xverify') addJvmArgument(printFlags, javaVmArgs, '+UseParallelGC', xxArg='GC') addJvmArgument(printFlags, javaVmArgs, '+AggressiveOpts', xxArg='AggressiveOpts') addJvmArgument(printFlags, javaVmArgs, '+UseFastAccessorMethods', xxArg='UseFastAccessorMethods') if jvmType == '-server': addJvmArgument(printFlags, javaVmArgs, '+UseStringCache', xxArg='UseStringCache') addJvmArgument(printFlags, javaVmArgs, '+UseCompressedStrings', xxArg='UseCompressedStrings') addJvmArgument(printFlags, javaVmArgs, '+OptimizeStringConcat', xxArg='OptimizeStringConcat') addJvmArgument(printFlags, javaVmArgs, 'CompileThreshold=100', xxArg='CompileThreshold') del printFlags command = foundJre + javaVmArgs + ['-jar', 'SrcDemo2.jar'] outStreams = [sys.stdout] errStreams = [] if debugMode: errStreams.append(sys.stderr) command.append('--srcdemo-debug') print 'Debug mode allows the console output to be logged to a file.' print 'You may enter the complete path of the file to log to below.' print 'Make sure it is writable (i.e. don\'t put it in the installation directory).' print 'If you don\'t want the output to be logged, leave the line blank.' print 'If you\'re not sure what to type, type "?" and SrcDemo2 will guess a filename for you.' while True: logFile = raw_input('Log file (blank to not log, "?" for auto): ').strip() if logFile: if logFile in (u'"?"', u'?'): logFile = tempfile.mkstemp(suffix='.log', prefix='srcdemo2-' + time.strftime('%Y-%m-%d-at-%H-%M-%S') + '-', text=False) os.close(logFile[0]) logFile = logFile[1] print 'Guessed log file:', logFile try: logHandle = open(logFile, 'wb') logHandle.write(u'Opened log.\n'.encode('utf8')) outStreams.append(logHandle) errStreams.append(logHandle) print 'Log file:', logFile break except: print 'Couldn\'t open this file for writing.' print 'Please make sure the file is writable.' else: break else: errStreams.append(sys.stdout) command.append('--srcdemo-jvm' + jvmType) command.extend(sys.argv[1:]) returnCode = 0 kwargs = add_subprocess_creationflags({ 'cwd': selfDir, 'env': javaEnv, 'stdin': subprocess.PIPE, 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE }) while True: debugPrint('Running', command) p = subprocess.Popen(command, **kwargs) p.stdin.close() StreamRunner(p, p.stdout, outStreams, parseCommands=True, showCommands=inDebugMode).start() StreamRunner(p, p.stderr, errStreams).start() try: returnCode = p.wait() except KeyboardInterrupt: debugPrint('Got keyboard interrupt.') returnCode = 0 unmount_registered_mountpoint() break debugPrint('Process finished with return code:', returnCode) unmount_registered_mountpoint() if returnCode != 57: break debugPrint('Done.') if returnCode: sys.exit(returnCode)
bsd-2-clause
6,488,521,361,306,192,000
32.339806
329
0.693749
false
stackforge/cloudbase-init
cloudbaseinit/tests/metadata/services/test_maasservice.py
1
17075
# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest try: import unittest.mock as mock except ImportError: import mock from cloudbaseinit import conf as cloudbaseinit_conf from cloudbaseinit import exception from cloudbaseinit.metadata.services import maasservice from cloudbaseinit.models import network as network_model from cloudbaseinit.tests import testutils from cloudbaseinit.utils import x509constants CONF = cloudbaseinit_conf.CONF class MaaSHttpServiceTest(unittest.TestCase): def setUp(self): self._maasservice = maasservice.MaaSHttpService() @mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService" "._get_cache_data") def _test_load(self, mock_get_cache_data, ip, cache_data_fails=False): if cache_data_fails: mock_get_cache_data.side_effect = Exception with testutils.ConfPatcher('metadata_base_url', ip, "maas"): with testutils.LogSnatcher('cloudbaseinit.metadata.services.' 'maasservice') as snatcher: response = self._maasservice.load() if ip is not None: if not cache_data_fails: mock_get_cache_data.assert_called_once_with( '%s/meta-data/' % self._maasservice._metadata_version) self.assertTrue(response) else: expected_logging = 'Metadata not found at URL \'%s\'' % ip self.assertEqual(expected_logging, snatcher.output[-1]) else: self.assertFalse(response) def test_load(self): self._test_load(ip='196.254.196.254') def test_load_no_ip(self): self._test_load(ip=None) def test_load_get_cache_data_fails(self): self._test_load(ip='196.254.196.254', cache_data_fails=True) @testutils.ConfPatcher('oauth_consumer_key', 'consumer_key', "maas") @testutils.ConfPatcher('oauth_consumer_secret', 'consumer_secret', "maas") @testutils.ConfPatcher('oauth_token_key', 'token_key', "maas") @testutils.ConfPatcher('oauth_token_secret', 'token_secret', "maas") def test_get_oauth_headers(self): response = self._maasservice._get_oauth_headers(url='196.254.196.254') self.assertIsInstance(response, dict) self.assertIn('Authorization', response) auth = response['Authorization'] self.assertTrue(auth.startswith('OAuth')) auth = auth[6:] parts = [item.strip() for item in auth.split(",")] auth_parts = dict(part.split("=") for part in parts) required_headers = { 'oauth_token', 'oauth_consumer_key', 'oauth_signature', } self.assertTrue(required_headers.issubset(set(auth_parts))) self.assertEqual('"token_key"', auth_parts['oauth_token']) self.assertEqual('"consumer_key"', auth_parts['oauth_consumer_key']) self.assertEqual('"consumer_secret%26token_secret"', auth_parts['oauth_signature']) @mock.patch('cloudbaseinit.metadata.services.base.' 'BaseHTTPMetadataService._http_request') @mock.patch('cloudbaseinit.metadata.services.maasservice.MaaSHttpService' '._get_oauth_headers') def test_http_request(self, mock_ouath_headers, mock_http_request): mock_url = "fake.url" self._maasservice._http_request(mock_url) mock_http_request.assert_called_once_with(mock_url, None, {}, None) @mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService" "._get_cache_data") def test_get_host_name(self, mock_get_cache_data): response = self._maasservice.get_host_name() mock_get_cache_data.assert_called_once_with( '%s/meta-data/local-hostname' % self._maasservice._metadata_version, decode=True) self.assertEqual(mock_get_cache_data.return_value, response) @mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService" "._get_cache_data") def test_get_instance_id(self, mock_get_cache_data): response = self._maasservice.get_instance_id() mock_get_cache_data.assert_called_once_with( '%s/meta-data/instance-id' % self._maasservice._metadata_version, decode=True) self.assertEqual(mock_get_cache_data.return_value, response) @mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService" "._get_cache_data") def test_get_public_keys(self, mock_get_cache_data): public_keys = [ "fake key 1", "fake key 2" ] public_key = "\n".join(public_keys) + "\n" mock_get_cache_data.return_value = public_key response = self._maasservice.get_public_keys() mock_get_cache_data.assert_called_with( '%s/meta-data/public-keys' % self._maasservice._metadata_version, decode=True) self.assertEqual(public_keys, response) @mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService" "._get_cache_data") def test_get_client_auth_certs(self, mock_get_cache_data): certs = [ "{begin}\n{cert}\n{end}".format( begin=x509constants.PEM_HEADER, end=x509constants.PEM_FOOTER, cert=cert) for cert in ("first cert", "second cert") ] mock_get_cache_data.return_value = "\n".join(certs) + "\n" response = self._maasservice.get_client_auth_certs() mock_get_cache_data.assert_called_with( '%s/meta-data/x509' % self._maasservice._metadata_version, decode=True) self.assertEqual(certs, response) @mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService" "._get_cache_data") def test_get_user_data(self, mock_get_cache_data): response = self._maasservice.get_user_data() mock_get_cache_data.assert_called_once_with( '%s/user-data' % self._maasservice._metadata_version) self.assertEqual(mock_get_cache_data.return_value, response) def _get_network_data(self): return { "version": mock.sentinel.network_data_version, "config": [{ "mtu": mock.sentinel.link_mtu1, "name": mock.sentinel.link_name1, "subnets": [{ "type": maasservice.MAAS_SUBNET_TYPE_MANUAL }], "type": maasservice.MAAS_CONFIG_TYPE_PHYSICAL, "mac_address": mock.sentinel.link_mac1, "id": mock.sentinel.link_id1 }, { "mtu": mock.sentinel.link_mtu2, "name": mock.sentinel.link_name2, "subnets": [{ "type": maasservice.MAAS_SUBNET_TYPE_MANUAL }], "type": maasservice.MAAS_CONFIG_TYPE_PHYSICAL, "mac_address": mock.sentinel.link_mac2, "id": mock.sentinel.link_id2 }, { "mtu": mock.sentinel.link_mtu3, "name": mock.sentinel.link_name3, "subnets": [{ "type": maasservice.MAAS_SUBNET_TYPE_MANUAL }], "type": maasservice.MAAS_CONFIG_TYPE_PHYSICAL, "mac_address": mock.sentinel.link_mac3, "id": mock.sentinel.link_id3 }, { "name": mock.sentinel.bond_name1, "id": mock.sentinel.bond_id1, "type": maasservice.MAAS_CONFIG_TYPE_BOND, "mac_address": mock.sentinel.bond_mac1, "bond_interfaces": [ mock.sentinel.link_id1, mock.sentinel.link_id2 ], "mtu": mock.sentinel.bond_mtu1, "subnets": [{ "address": mock.sentinel.bond_subnet_address1, "gateway": mock.sentinel.bond_subnet_gateway1, "type": maasservice.MAAS_SUBNET_TYPE_STATIC, "dns_nameservers": [ mock.sentinel.bond_subnet_dns1, mock.sentinel.bond_subnet_dns2] }, { "address": mock.sentinel.bond_subnet_address2, "type": maasservice.MAAS_SUBNET_TYPE_STATIC, "dns_nameservers": [] }], "params": { "bond-downdelay": 0, "bond-xmit-hash-policy": mock.sentinel.bond_lb_algo1, "bond-mode": mock.sentinel.bond_mode1, "bond-updelay": 0, "bond-miimon": 100, "bond-lacp-rate": maasservice.MAAS_BOND_LACP_RATE_FAST } }, { "type": maasservice.MAAS_CONFIG_TYPE_VLAN, "mtu": mock.sentinel.vlan_mtu1, "name": mock.sentinel.vlan_name1, "subnets": [{ "gateway": mock.sentinel.vlan_subnet_gateway1, "address": mock.sentinel.vlan_subnet_address1, "type": maasservice.MAAS_SUBNET_TYPE_STATIC, "dns_nameservers": [] }], "vlan_id": mock.sentinel.vlan_id1, "vlan_link": mock.sentinel.bond_id1, "id": mock.sentinel.vlan_link_id1 }, { "type": mock.sentinel.nameserver_config_type, "search": [ mock.sentinel.dns_search1 ], "address": [ mock.sentinel.bond_subnet_dns1, mock.sentinel.bond_subnet_dns2 ], }] } @mock.patch("cloudbaseinit.metadata.services.maasservice.MaaSHttpService" "._get_network_data") def _test_get_network_details_v2(self, mock_get_network_data, unsupported_version=False, invalid_bond_type=False, invalid_bond_lb_algo=False, unsupported_config_type=False): mock.sentinel.bond_subnet_address1 = "10.0.0.1/24" mock.sentinel.bond_subnet_gateway1 = "10.0.0.254" mock.sentinel.bond_subnet_address2 = "172.16.0.1/16" mock.sentinel.vlan_subnet_address1 = "2001:cdba::3257:9652/24" mock.sentinel.vlan_subnet_gateway1 = "2001:cdba::3257:1" if invalid_bond_type: mock.sentinel.bond_mode1 = "invalid bond type" else: mock.sentinel.bond_mode1 = network_model.BOND_TYPE_BALANCE_ALB if invalid_bond_lb_algo: mock.sentinel.bond_lb_algo1 = "invalid lb algorithm" else: mock.sentinel.bond_lb_algo1 = network_model.BOND_LB_ALGO_L2 if unsupported_version: mock.sentinel.network_data_version = "unsupported" else: mock.sentinel.network_data_version = 1 if unsupported_config_type: mock.sentinel.nameserver_config_type = "unsupported" else: mock.sentinel.nameserver_config_type = "nameserver" network_data = self._get_network_data() mock_get_network_data.return_value = network_data if (unsupported_version or invalid_bond_type or invalid_bond_lb_algo or unsupported_config_type): with self.assertRaises(exception.CloudbaseInitException): self._maasservice.get_network_details_v2() return network_details = self._maasservice.get_network_details_v2() self.assertEqual(1, len([ l for l in network_details.links if l.type == network_model.LINK_TYPE_PHYSICAL and l.id == mock.sentinel.link_id1 and l.name == mock.sentinel.link_name1 and l.enabled is True and l.mac_address == mock.sentinel.link_mac1 and l.mtu == mock.sentinel.link_mtu1])) self.assertEqual(1, len([ l for l in network_details.links if l.type == network_model.LINK_TYPE_PHYSICAL and l.id == mock.sentinel.link_id2 and l.name == mock.sentinel.link_name2 and l.enabled is True and l.mac_address == mock.sentinel.link_mac2 and l.mtu == mock.sentinel.link_mtu2])) # Disconnected network adapter, ensure it's not enabled self.assertEqual(1, len([ l for l in network_details.links if l.type == network_model.LINK_TYPE_PHYSICAL and l.id == mock.sentinel.link_id3 and l.name == mock.sentinel.link_name3 and l.enabled is False and l.mac_address == mock.sentinel.link_mac3 and l.mtu == mock.sentinel.link_mtu3])) self.assertEqual(1, len([ l for l in network_details.links if l.type == network_model.LINK_TYPE_BOND and l.id == mock.sentinel.bond_id1 and l.enabled is True and l.name == mock.sentinel.bond_name1 and l.mtu == mock.sentinel.bond_mtu1 and l.mac_address == mock.sentinel.bond_mac1 and l.vlan_link is None and l.vlan_id is None and l.bond.type == network_model.BOND_TYPE_BALANCE_ALB and l.bond.members == [ mock.sentinel.link_id1, mock.sentinel.link_id2] and l.bond.lb_algorithm == network_model.BOND_LB_ALGO_L2 and l.bond.lacp_rate == network_model.BOND_LACP_RATE_FAST])) self.assertEqual(1, len([ l for l in network_details.links if l.type == network_model.LINK_TYPE_VLAN and l.id == mock.sentinel.vlan_link_id1 and l.name == mock.sentinel.vlan_name1 and l.enabled is True and l.mac_address is None and l.mtu == mock.sentinel.vlan_mtu1 and l.vlan_link == mock.sentinel.bond_id1 and l.vlan_id == mock.sentinel.vlan_id1])) self.assertEqual(3, len(network_details.networks)) network_bond1 = [ n for n in network_details.networks if n.address_cidr == mock.sentinel.bond_subnet_address1 and n.dns_nameservers == [ mock.sentinel.bond_subnet_dns1, mock.sentinel.bond_subnet_dns2] and n.link == mock.sentinel.bond_id1 and n.routes == [network_model.Route( network_cidr=u'0.0.0.0/0', gateway=mock.sentinel.bond_subnet_gateway1 )]] self.assertEqual(1, len(network_bond1)) network_bond2 = [ n for n in network_details.networks if n.address_cidr == mock.sentinel.bond_subnet_address2 and n.dns_nameservers == [] and n.link == mock.sentinel.bond_id1 and n.routes == []] self.assertEqual(1, len(network_bond2)) network_vlan1 = [ n for n in network_details.networks if n.address_cidr == mock.sentinel.vlan_subnet_address1 and n.dns_nameservers == [] and n.link == mock.sentinel.vlan_link_id1 and n.routes == [network_model.Route( network_cidr=u'::/0', gateway=mock.sentinel.vlan_subnet_gateway1 )]] self.assertEqual(1, len(network_vlan1)) self.assertEqual( [network_model.NameServerService( addresses=[ mock.sentinel.bond_subnet_dns1, mock.sentinel.bond_subnet_dns2], search=[mock.sentinel.dns_search1])], network_details.services) def test_get_network_details_v2(self): self._test_get_network_details_v2() def test_get_network_details_v2_unsupported_version(self): self._test_get_network_details_v2(unsupported_version=True) def test_get_network_details_v2_unsupported_config_type(self): self._test_get_network_details_v2(unsupported_config_type=True) def test_get_network_details_v2_invalid_bond_type(self): self._test_get_network_details_v2(invalid_bond_type=True) def test_get_network_details_v2_invalid_bond_lb_algo(self): self._test_get_network_details_v2(invalid_bond_lb_algo=True)
apache-2.0
-6,710,028,509,444,999,000
41.264851
79
0.573353
false
sysalexis/kbengine
kbe/res/scripts/common/Lib/traceback.py
87
11167
"""Extract, format and print information about Python stack traces.""" import linecache import sys import operator __all__ = ['extract_stack', 'extract_tb', 'format_exception', 'format_exception_only', 'format_list', 'format_stack', 'format_tb', 'print_exc', 'format_exc', 'print_exception', 'print_last', 'print_stack', 'print_tb', 'clear_frames'] # # Formatting and printing lists of traceback lines. # def _format_list_iter(extracted_list): for filename, lineno, name, line in extracted_list: item = ' File "{}", line {}, in {}\n'.format(filename, lineno, name) if line: item = item + ' {}\n'.format(line.strip()) yield item def print_list(extracted_list, file=None): """Print the list of tuples as returned by extract_tb() or extract_stack() as a formatted stack trace to the given file.""" if file is None: file = sys.stderr for item in _format_list_iter(extracted_list): print(item, file=file, end="") def format_list(extracted_list): """Format a list of traceback entry tuples for printing. Given a list of tuples as returned by extract_tb() or extract_stack(), return a list of strings ready for printing. Each string in the resulting list corresponds to the item with the same index in the argument list. Each string ends in a newline; the strings may contain internal newlines as well, for those items whose source text line is not None. """ return list(_format_list_iter(extracted_list)) # # Printing and Extracting Tracebacks. # # extractor takes curr and needs to return a tuple of: # - Frame object # - Line number # - Next item (same type as curr) # In practice, curr is either a traceback or a frame. def _extract_tb_or_stack_iter(curr, limit, extractor): if limit is None: limit = getattr(sys, 'tracebacklimit', None) n = 0 while curr is not None and (limit is None or n < limit): f, lineno, next_item = extractor(curr) co = f.f_code filename = co.co_filename name = co.co_name linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) if line: line = line.strip() else: line = None yield (filename, lineno, name, line) curr = next_item n += 1 def _extract_tb_iter(tb, limit): return _extract_tb_or_stack_iter( tb, limit, operator.attrgetter("tb_frame", "tb_lineno", "tb_next")) def print_tb(tb, limit=None, file=None): """Print up to 'limit' stack trace entries from the traceback 'tb'. If 'limit' is omitted or None, all entries are printed. If 'file' is omitted or None, the output goes to sys.stderr; otherwise 'file' should be an open file or file-like object with a write() method. """ print_list(extract_tb(tb, limit=limit), file=file) def format_tb(tb, limit=None): """A shorthand for 'format_list(extract_tb(tb, limit))'.""" return format_list(extract_tb(tb, limit=limit)) def extract_tb(tb, limit=None): """Return list of up to limit pre-processed entries from traceback. This is useful for alternate formatting of stack traces. If 'limit' is omitted or None, all entries are extracted. A pre-processed stack trace entry is a quadruple (filename, line number, function name, text) representing the information that is usually printed for a stack trace. The text is a string with leading and trailing whitespace stripped; if the source is not available it is None. """ return list(_extract_tb_iter(tb, limit=limit)) # # Exception formatting and output. # _cause_message = ( "\nThe above exception was the direct cause " "of the following exception:\n") _context_message = ( "\nDuring handling of the above exception, " "another exception occurred:\n") def _iter_chain(exc, custom_tb=None, seen=None): if seen is None: seen = set() seen.add(exc) its = [] context = exc.__context__ cause = exc.__cause__ if cause is not None and cause not in seen: its.append(_iter_chain(cause, False, seen)) its.append([(_cause_message, None)]) elif (context is not None and not exc.__suppress_context__ and context not in seen): its.append(_iter_chain(context, None, seen)) its.append([(_context_message, None)]) its.append([(exc, custom_tb or exc.__traceback__)]) # itertools.chain is in an extension module and may be unavailable for it in its: yield from it def _format_exception_iter(etype, value, tb, limit, chain): if chain: values = _iter_chain(value, tb) else: values = [(value, tb)] for value, tb in values: if isinstance(value, str): # This is a cause/context message line yield value + '\n' continue if tb: yield 'Traceback (most recent call last):\n' yield from _format_list_iter(_extract_tb_iter(tb, limit=limit)) yield from _format_exception_only_iter(type(value), value) def print_exception(etype, value, tb, limit=None, file=None, chain=True): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if traceback is not None, it prints a header "Traceback (most recent call last):"; (2) it prints the exception type and value after the stack trace; (3) if type is SyntaxError and value has the appropriate format, it prints the line where the syntax error occurred with a caret on the next line indicating the approximate position of the error. """ if file is None: file = sys.stderr for line in _format_exception_iter(etype, value, tb, limit, chain): print(line, file=file, end="") def format_exception(etype, value, tb, limit=None, chain=True): """Format a stack trace and the exception information. The arguments have the same meaning as the corresponding arguments to print_exception(). The return value is a list of strings, each ending in a newline and some containing internal newlines. When these lines are concatenated and printed, exactly the same text is printed as does print_exception(). """ return list(_format_exception_iter(etype, value, tb, limit, chain)) def format_exception_only(etype, value): """Format the exception part of a traceback. The arguments are the exception type and value such as given by sys.last_type and sys.last_value. The return value is a list of strings, each ending in a newline. Normally, the list contains a single string; however, for SyntaxError exceptions, it contains several lines that (when printed) display detailed information about where the syntax error occurred. The message indicating which exception occurred is always the last string in the list. """ return list(_format_exception_only_iter(etype, value)) def _format_exception_only_iter(etype, value): # Gracefully handle (the way Python 2.4 and earlier did) the case of # being called with (None, None). if etype is None: yield _format_final_exc_line(etype, value) return stype = etype.__name__ smod = etype.__module__ if smod not in ("__main__", "builtins"): stype = smod + '.' + stype if not issubclass(etype, SyntaxError): yield _format_final_exc_line(stype, value) return # It was a syntax error; show exactly where the problem was found. filename = value.filename or "<string>" lineno = str(value.lineno) or '?' yield ' File "{}", line {}\n'.format(filename, lineno) badline = value.text offset = value.offset if badline is not None: yield ' {}\n'.format(badline.strip()) if offset is not None: caretspace = badline.rstrip('\n') offset = min(len(caretspace), offset) - 1 caretspace = caretspace[:offset].lstrip() # non-space whitespace (likes tabs) must be kept for alignment caretspace = ((c.isspace() and c or ' ') for c in caretspace) yield ' {}^\n'.format(''.join(caretspace)) msg = value.msg or "<no detail available>" yield "{}: {}\n".format(stype, msg) def _format_final_exc_line(etype, value): valuestr = _some_str(value) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line def _some_str(value): try: return str(value) except: return '<unprintable %s object>' % type(value).__name__ def print_exc(limit=None, file=None, chain=True): """Shorthand for 'print_exception(*sys.exc_info(), limit, file)'.""" print_exception(*sys.exc_info(), limit=limit, file=file, chain=chain) def format_exc(limit=None, chain=True): """Like print_exc() but return a string.""" return "".join(format_exception(*sys.exc_info(), limit=limit, chain=chain)) def print_last(limit=None, file=None, chain=True): """This is a shorthand for 'print_exception(sys.last_type, sys.last_value, sys.last_traceback, limit, file)'.""" if not hasattr(sys, "last_type"): raise ValueError("no last exception") print_exception(sys.last_type, sys.last_value, sys.last_traceback, limit, file, chain) # # Printing and Extracting Stacks. # def _extract_stack_iter(f, limit=None): return _extract_tb_or_stack_iter( f, limit, lambda f: (f, f.f_lineno, f.f_back)) def _get_stack(f): if f is None: f = sys._getframe().f_back.f_back return f def print_stack(f=None, limit=None, file=None): """Print a stack trace from its invocation point. The optional 'f' argument can be used to specify an alternate stack frame at which to start. The optional 'limit' and 'file' arguments have the same meaning as for print_exception(). """ print_list(extract_stack(_get_stack(f), limit=limit), file=file) def format_stack(f=None, limit=None): """Shorthand for 'format_list(extract_stack(f, limit))'.""" return format_list(extract_stack(_get_stack(f), limit=limit)) def extract_stack(f=None, limit=None): """Extract the raw traceback from the current stack frame. The return value has the same format as for extract_tb(). The optional 'f' and 'limit' arguments have the same meaning as for print_stack(). Each item in the list is a quadruple (filename, line number, function name, text), and the entries are in order from oldest to newest stack frame. """ stack = list(_extract_stack_iter(_get_stack(f), limit=limit)) stack.reverse() return stack def clear_frames(tb): "Clear all references to local variables in the frames of a traceback." while tb is not None: try: tb.tb_frame.clear() except RuntimeError: # Ignore the exception raised if the frame is still executing. pass tb = tb.tb_next
lgpl-3.0
-1,010,462,565,069,940,100
34.677316
79
0.646637
false
magnatronus/titanium-sac
lib/tiutils.py
1
2781
# # tiutils.py is a Titanium function library for use with the SpiralArm Titanium plug-in for Sublime Text 3 # # developed by Steve Rogers, SpiralArm Consulting Ltd (www.spiralarm.uk) # @sarmcon # # import sublime, subprocess,os from os.path import expanduser # read in our default Titanium settings settings = sublime.load_settings('titanium-sac.sublime-settings') LOGLEVEL = settings.get("logLevel", "info") PLATFORMS = settings.get("platforms", "ios,android") URL = settings.get("url", "http://www.mywebaddress") SDK = settings.get("sdk", "5.0.0.GA") workSpace = settings.get("workspace", "/") tiPath = settings.get("tiPath", "") rootAppId = settings.get("appId", "com.myapp") # set up some other useful vars home = expanduser("~") new_env = os.environ.copy() new_env['PATH'] = new_env['PATH']+ ":" + tiPath WORKSPACEDIR = home + workSpace # Run a Ti based shell command def runCommand(params): subprocess.Popen(params, env=new_env).wait() # Print out a console message def consolePrint(label, message): print("%s> %s" % (label,message)) # Generate our application id def getAppId(projectName): return rootAppId + "." + projectName # Generate our fully qualified project name def getProjectDirectory(projectName): return home + workSpace + "/" + projectName # Create a classic project def createClassicProject(projectName): consolePrint('info', "Creating Titanium Project....") runCommand(['ti', "create", "--force","--type", "app", "--sdk", SDK, "--id", getAppId(projectName), "--log-level", LOGLEVEL, "--name", projectName, "--workspace-dir", WORKSPACEDIR,"--platform", PLATFORMS, "--url", URL]) # Add the Alloy Files def generateAlloyProject(projectDir): consolePrint('info', "Generating Alloy Files....") subprocess.Popen(['alloy', "new", projectDir, "--force"], env=new_env).wait() # Clean the current project def cleanProject(projectDir): consolePrint('info', "Cleaning Project....") subprocess.Popen(['ti', "clean", "--project-dir", projectDir, "--log-level", LOGLEVEL, "--platforms", PLATFORMS], env=new_env).wait() # Add an Alloy widget to the project def createAlloyWidget(path, name): consolePrint('info', "Creating Widget %s...." % name) subprocess.Popen(['alloy', "generate", "widget", name, "--outputPath", path], env=new_env).wait() # Add an Alloy controller to the project def createAlloyController(path, name): consolePrint('info', "Creating Controller %s...." % name) subprocess.Popen(['alloy', "generate", "controller", name, "--outputPath", path], env=new_env).wait() # Create the Sublime Project File def createSublimeProject(projectDir): content = '{"folders":[{"path": "%s"}]}' % projectDir projectFile = open(projectDir+".sublime-project","w") projectFile.write(content); projectFile.close()
mit
7,324,114,253,495,125,000
32.506024
221
0.701906
false
r0h4n/commons
tendrl/commons/flows/expand_cluster/gluster_help.py
2
1884
from tendrl.commons.flows.exceptions import FlowExecutionFailedError from tendrl.commons.utils import log_utils as logger def get_node_ips(parameters): node_ips = [] for node, config in parameters["Cluster.node_configuration"].iteritems(): node_ips.append(config["provisioning_ip"]) return node_ips def expand_gluster(parameters): node_ips = get_node_ips(parameters) plugin = NS.gluster_provisioner.get_plugin() cluster = NS.tendrl.objects.Cluster( integration_id=parameters['TendrlContext.integration_id'] ).load() logger.log( "info", NS.publisher_id, {"message": "Setting up gluster nodes for cluster %s" % cluster.short_name}, job_id=parameters['job_id'], flow_id=parameters['flow_id'], ) ret_val = plugin.setup_gluster_node( node_ips, repo=NS.config.data.get('glusterfs_repo', None) ) if ret_val is not True: raise FlowExecutionFailedError("Error setting up gluster node") logger.log( "info", NS.publisher_id, {"message": "Expanding gluster cluster %s" % cluster.short_name}, job_id=parameters['job_id'], flow_id=parameters['flow_id'] ) failed_nodes = [] for node in node_ips: ret_val = plugin.expand_gluster_cluster(node) if not ret_val: failed_nodes.append(node) if failed_nodes: raise FlowExecutionFailedError( "Error expanding gluster cluster. Following nodes failed: %s" % ",".join(failed_nodes) ) logger.log( "info", NS.publisher_id, {"message": "Expanded Gluster Cluster %s" " with nodes %s" % ( cluster.short_name, ",".join(node_ips))}, job_id=parameters['job_id'], flow_id=parameters['flow_id'] )
lgpl-2.1
-8,369,157,104,479,833,000
29.885246
77
0.597665
false
Tiendil/deworld
deworld/cartographer.py
1
4067
# coding: utf-8 import os try: from PIL import Image except: pass from deworld.map_colors import HeightColorMap, RGBColorMap from deworld.layers import VEGETATION_TYPE def draw_image(turn, catalog, layer, power_points, colorizer): if not os.path.exists(catalog): os.makedirs(catalog) img = Image.new('RGB', (layer.w, layer.h)) data = [] for row in layer.data: for cell in row: data.append(colorizer(cell, discret=False).rgb) for point in power_points.values(): data[point.y * layer.w + point.x] = (0, 0, 0) img.putdata(data) img.save('%s/%.3d.png' % (catalog, turn)) def wind_colorizer(wind, discret=False): r, g, b = 0.5, 0.5, 0.5 g += wind[0] * 0.5 b += wind[1] * 0.5 return RGBColorMap.get_color(r=r, g=g, b=b) def temperature_colorizer(temp, discret=False): r, g, b = 0.5, 0.5, 0.5 if temp < 0.5: b += temp else: r += (temp - 0.5) return RGBColorMap.get_color(r=r, g=g, b=b) def wetness_colorizer(wetness, discret=False): return RGBColorMap.get_color(r=1.0-wetness, g=1.0-wetness, b=1.0) def vegetation_colorizer(vegetation, discret=False): if vegetation == VEGETATION_TYPE.GRASS: return RGBColorMap.get_color(r=55.0/256, g=200.0/256, b=55.0/256) if vegetation == VEGETATION_TYPE.FOREST: return RGBColorMap.get_color(r=55.0/256, g=125.0/256, b=55.0/256) if vegetation == VEGETATION_TYPE.DESERT: return RGBColorMap.get_color(r=244.0/256, g=164.0/256, b=96.0/256) return RGBColorMap.get_color(r=0.0, g=0.0, b=0.0) def soil_colorizer(soil, discret=False): return RGBColorMap.get_color(r=0.0, g=soil, b=0.0) def atmo_wind_colorizer(point, discret=False): return wind_colorizer(point.wind, discret=discret) def atmo_temperature_colorizer(point, discret=False): return temperature_colorizer(point.temperature, discret=discret) def atmo_wetness_colorizer(point, discret=False): return wetness_colorizer(point.wetness, discret=discret) def draw_world(turn, world, catalog): draw_image(turn=turn, catalog='%s/%s' % (catalog, 'height'), layer=world.layer_height, power_points=world.power_points, colorizer=HeightColorMap.get_color) draw_image(turn=turn, catalog='%s/%s' % (catalog, 'temperature'), layer=world.layer_temperature, power_points=world.power_points, colorizer=temperature_colorizer) draw_image(turn=turn, catalog='%s/%s' % (catalog, 'wind'), layer=world.layer_wind, power_points=world.power_points, colorizer=wind_colorizer) draw_image(turn=turn, catalog='%s/%s' % (catalog, 'wetness'), layer=world.layer_wetness, power_points=world.power_points, colorizer=wetness_colorizer) draw_image(turn=turn, catalog='%s/%s' % (catalog, 'vegetation'), layer=world.layer_vegetation, power_points=world.power_points, colorizer=vegetation_colorizer) draw_image(turn=turn, catalog='%s/%s' % (catalog, 'soil'), layer=world.layer_soil, power_points=world.power_points, colorizer=soil_colorizer) draw_image(turn=turn, catalog='%s/%s' % (catalog, 'atmo_wind'), layer=world.layer_atmosphere, power_points=world.power_points, colorizer=atmo_wind_colorizer) draw_image(turn=turn, catalog='%s/%s' % (catalog, 'atmo_temperature'), layer=world.layer_atmosphere, power_points=world.power_points, colorizer=atmo_temperature_colorizer) draw_image(turn=turn, catalog='%s/%s' % (catalog, 'atmo_wetness'), layer=world.layer_atmosphere, power_points=world.power_points, colorizer=atmo_wetness_colorizer)
bsd-2-clause
7,333,891,058,448,866,000
30.527132
74
0.598967
false
pfeyz/psiTurk
psiturk/command_line.py
6
3137
''' This module supports commandline functionality ''' import argparse import sys, os from psiturk.version import version_number from psiturk.psiturk_org_services import ExperimentExchangeServices def process(): ''' Figure out how we were invoked ''' invoked_as = os.path.basename(sys.argv[0]) if invoked_as == "psiturk": launch_shell() elif invoked_as == "psiturk-server": launch_server() elif invoked_as == "psiturk-shell": launch_shell() elif invoked_as == "psiturk-setup-example": setup_example() elif invoked_as == "psiturk-install": install_from_exchange() def install_from_exchange(): ''' Install from experiment exchange. ''' parser = argparse.ArgumentParser( description='Download experiment from the psiturk.org experiment\ exchange (http://psiturk.org/ee).' ) parser.add_argument( 'exp_id', metavar='exp_id', type=str, help='the id number of the\ experiment in the exchange' ) args = parser.parse_args() exp_exch = ExperimentExchangeServices() exp_exch.download_experiment(args.exp_id) def setup_example(): ''' Add commands for testing, etc. ''' parser = argparse.ArgumentParser( description='Creates a simple default project (stroop) in the current\ directory with the necessary psiTurk files.' ) # Optional flags parser.add_argument( '-v', '--version', help='Print version number.', action="store_true" ) args = parser.parse_args() # If requested version just print and quite if args.version: print version_number else: import psiturk.setup_example as se se.setup_example() def launch_server(): ''' Add commands for testing, etc.. ''' parser = argparse.ArgumentParser( description='Launch psiTurk experiment webserver process on the\ host/port defined in config.txt.' ) # Optional flags parser.add_argument( '-v', '--version', help='Print version number.', action="store_true" ) args = parser.parse_args() # If requested version just print and quite if args.version: print version_number else: import psiturk.experiment_server as es es.launch() def launch_shell(): ''' Add commands for testing, etc.. ''' parser = argparse.ArgumentParser( description='Launch the psiTurk interactive shell.' ) # Optional flags parser.add_argument( '-v', '--version', help='Print version number.', action="store_true" ) parser.add_argument( '-c', '--cabinmode', help='Launch psiturk in cabin (offline) mode', action="store_true" ) parser.add_argument( '-s', '--script', help='Run commands from a script file' ) args = parser.parse_args() # If requested version just print and quite if args.version: print version_number else: import psiturk.psiturk_shell as ps if args.script: ps.run(cabinmode=args.cabinmode, script=args.script) else: ps.run(cabinmode=args.cabinmode)
mit
3,108,617,503,214,783,000
29.163462
78
0.63532
false
llvm/llvm-lnt
lnt/util/wsgi_restart.py
1
3202
# This code lifted from the mod_wsgi docs. from __future__ import print_function from future import standard_library standard_library.install_aliases() import os from pathlib import Path from typing import Sequence import sys import signal import threading import atexit import queue _interval = 1.0 _times = {} _files = [] # type: Sequence[Path] _running = False _queue = queue.Queue() # type: queue.Queue _lock = threading.Lock() def _restart(path): _queue.put(True) prefix = 'monitor (pid=%d):' % os.getpid() print('%s Change detected to \'%s\'.' % (prefix, path), file=sys.stderr) print('%s Triggering process restart.' % prefix, file=sys.stderr) os.kill(os.getpid(), signal.SIGINT) def _modified(path): try: # If path doesn't denote a file and were previously # tracking it, then it has been removed or the file type # has changed so force a restart. If not previously # tracking the file then we can ignore it as probably # pseudo reference such as when file extracted from a # collection of modules contained in a zip file. if not os.path.isfile(path): return path in _times # Check for when file last modified. mtime = os.stat(path).st_mtime if path not in _times: _times[path] = mtime # Force restart when modification time has changed, even # if time now older, as that could indicate older file # has been restored. if mtime != _times[path]: return True except Exception: # If any exception occured, likely that file has been # been removed just before stat(), so force a restart. return True return False def _monitor(): while True: # Check modification times on all files in sys.modules. for module in sys.modules.values(): if not hasattr(module, '__file__'): continue path = getattr(module, '__file__') if not path: continue if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']: path = path[:-1] if _modified(path): return _restart(path) # Check modification times on files which have # specifically been registered for monitoring. for path in _files: if _modified(path): return _restart(path) # Go to sleep for specified interval. try: return _queue.get(timeout=_interval) except Exception: pass _thread = threading.Thread(target=_monitor) _thread.setDaemon(True) def _exiting(): try: _queue.put(True) except Exception: pass _thread.join() atexit.register(_exiting) def track(path): if path not in _files: _files.append(path) def start(interval=1.0): global _interval if interval < _interval: _interval = interval global _running _lock.acquire() if not _running: prefix = 'monitor (pid=%d):' % os.getpid() print('%s Starting change monitor.' % prefix, file=sys.stderr) _running = True _thread.start()
apache-2.0
-1,150,562,447,778,956,900
24.412698
76
0.601499
false
zhangyage/Python-oldboy
day07/paramiko-1.15.2/paramiko-1.15.2/paramiko/pipe.py
44
4005
# Copyright (C) 2003-2007 Robey Pointer <[email protected]> # # This file is part of paramiko. # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. """ Abstraction of a one-way pipe where the read end can be used in `select.select`. Normally this is trivial, but Windows makes it nearly impossible. The pipe acts like an Event, which can be set or cleared. When set, the pipe will trigger as readable in `select <select.select>`. """ import sys import os import socket from paramiko.py3compat import b def make_pipe(): if sys.platform[:3] != 'win': p = PosixPipe() else: p = WindowsPipe() return p class PosixPipe (object): def __init__(self): self._rfd, self._wfd = os.pipe() self._set = False self._forever = False self._closed = False def close(self): os.close(self._rfd) os.close(self._wfd) # used for unit tests: self._closed = True def fileno(self): return self._rfd def clear(self): if not self._set or self._forever: return os.read(self._rfd, 1) self._set = False def set(self): if self._set or self._closed: return self._set = True os.write(self._wfd, b'*') def set_forever(self): self._forever = True self.set() class WindowsPipe (object): """ On Windows, only an OS-level "WinSock" may be used in select(), but reads and writes must be to the actual socket object. """ def __init__(self): serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) serv.bind(('127.0.0.1', 0)) serv.listen(1) # need to save sockets in _rsock/_wsock so they don't get closed self._rsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._rsock.connect(('127.0.0.1', serv.getsockname()[1])) self._wsock, addr = serv.accept() serv.close() self._set = False self._forever = False self._closed = False def close(self): self._rsock.close() self._wsock.close() # used for unit tests: self._closed = True def fileno(self): return self._rsock.fileno() def clear (self): if not self._set or self._forever: return self._rsock.recv(1) self._set = False def set (self): if self._set or self._closed: return self._set = True self._wsock.send(b'*') def set_forever (self): self._forever = True self.set() class OrPipe (object): def __init__(self, pipe): self._set = False self._partner = None self._pipe = pipe def set(self): self._set = True if not self._partner._set: self._pipe.set() def clear(self): self._set = False if not self._partner._set: self._pipe.clear() def make_or_pipe(pipe): """ wraps a pipe into two pipe-like objects which are "or"d together to affect the real pipe. if either returned pipe is set, the wrapped pipe is set. when both are cleared, the wrapped pipe is cleared. """ p1 = OrPipe(pipe) p2 = OrPipe(pipe) p1._partner = p2 p2._partner = p1 return p1, p2
apache-2.0
4,168,675,356,764,557,000
25.879195
79
0.599501
false
alanjw/GreenOpenERP-Win-X86
python/Lib/site-packages/win32/Demos/mmapfile_demo.py
4
2822
import win32api, mmapfile import winerror import tempfile, os from pywin32_testutil import str2bytes system_info=win32api.GetSystemInfo() page_size=system_info[1] alloc_size=system_info[7] fname=tempfile.mktemp() mapping_name=os.path.split(fname)[1] fsize=8*page_size print fname, fsize, mapping_name m1=mmapfile.mmapfile(File=fname, Name=mapping_name, MaximumSize=fsize) m1.seek(100) m1.write_byte(str2bytes('?')) m1.seek(-1,1) assert m1.read_byte()==str2bytes('?') ## A reopened named mapping should have exact same size as original mapping m2=mmapfile.mmapfile(Name=mapping_name, File=None, MaximumSize=fsize*2) assert m2.size()==m1.size() m1.seek(0,0) m1.write(fsize*str2bytes('s')) assert m2.read(fsize)==fsize*str2bytes('s') move_src=100 move_dest=500 move_size=150 m2.seek(move_src,0) assert m2.tell()==move_src m2.write(str2bytes('m')*move_size) m2.move(move_dest, move_src, move_size) m2.seek(move_dest, 0) assert m2.read(move_size) == str2bytes('m') * move_size ## m2.write('x'* (fsize+1)) m2.close() m1.resize(fsize*2) assert m1.size()==fsize * 2 m1.seek(fsize) m1.write(str2bytes('w') * fsize) m1.flush() m1.close() os.remove(fname) ## Test a file with size larger than 32 bits ## need 10 GB free on drive where your temp folder lives fname_large=tempfile.mktemp() mapping_name='Pywin32_large_mmap' offsetdata=str2bytes('This is start of offset') ## Deliberately use odd numbers to test rounding logic fsize = (1024*1024*1024*10) + 333 offset = (1024*1024*32) + 42 view_size = (1024*1024*16) + 111 ## round mapping size and view size up to multiple of system page size if fsize%page_size: fsize += page_size - (fsize%page_size) if view_size%page_size: view_size += page_size - (view_size%page_size) ## round offset down to multiple of allocation granularity offset -= offset%alloc_size m1=None m2=None try: try: m1=mmapfile.mmapfile(fname_large, mapping_name, fsize, 0, offset*2) except mmapfile.error, exc: # if we don't have enough disk-space, that's OK. if exc.winerror!=winerror.ERROR_DISK_FULL: raise print "skipping large file test - need", fsize, "available bytes." else: m1.seek(offset) m1.write(offsetdata) ## When reopening an existing mapping without passing a file handle, you have ## to specify a positive size even though it's ignored m2=mmapfile.mmapfile(File=None, Name=mapping_name, MaximumSize=1, FileOffset=offset, NumberOfBytesToMap=view_size) assert m2.read(len(offsetdata))==offsetdata finally: if m1 is not None: m1.close() if m2 is not None: m2.close() if os.path.exists(fname_large): os.remove(fname_large)
agpl-3.0
-6,936,967,356,234,964,000
27.705263
85
0.67399
false
pdellaert/ansible
lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint_info.py
19
10046
#!/usr/bin/python # # Copyright (c) 2019 Hai Cao, <[email protected]> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_cdnendpoint_info version_added: "2.9" short_description: Get Azure CDN endpoint facts description: - Get facts for a specific Azure CDN endpoint or all Azure CDN endpoints. options: resource_group: description: - Name of resource group where this CDN profile belongs to. required: true profile_name: description: - Name of CDN profile. required: true name: description: - Limit results to a specific Azure CDN endpoint. tags: description: - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. extends_documentation_fragment: - azure author: - Hai Cao (@caohai) - Yunge zhu (@yungezz) ''' EXAMPLES = ''' - name: Get facts for all endpoints in CDN profile azure_rm_cdnendpoint_info: resource_group: myResourceGroup profile_name: myCDNProfile - name: Get facts of specific CDN endpoint azure_rm_cdnendpoint_info: resource_group: myResourceGroup profile_name: myCDNProfile name: myEndpoint1 ''' RETURN = ''' cdnendpoints: description: List of Azure CDN endpoints. returned: always type: complex contains: resource_group: description: - Name of a resource group where the Azure CDN endpoint exists. returned: always type: str sample: myResourceGroup name: description: - Name of the Azure CDN endpoint. returned: always type: str sample: myEndpoint profile_name: description: - Name of the Azure CDN profile that this endpoint is attached to. returned: always type: str sample: myProfile location: description: - Location of the Azure CDN endpoint. type: str sample: WestUS id: description: - ID of the Azure CDN endpoint. type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myCDN/providers/Microsoft.Cdn/profiles/myProfile/endpoints/myEndpoint1" provisioning_state: description: - Provisioning status of the Azure CDN endpoint. type: str sample: Succeeded resource_state: description: - Resource status of the profile. type: str sample: Running is_compression_enabled: description: - Indicates whether content compression is enabled on CDN. type: bool sample: true is_http_allowed: description: - Indicates whether HTTP traffic is allowed on the endpoint. type: bool sample: true is_https_allowed: description: - Indicates whether HTTPS traffic is allowed on the endpoint. type: bool sample: true query_string_caching_behavior: description: - Defines how CDN caches requests that include query strings. type: str sample: IgnoreQueryString content_types_to_compress: description: - List of content types on which compression applies. type: list sample: [ "text/plain", "text/html", "text/css", "text/javascript", "application/x-javascript", "application/javascript", "application/json", "application/xml" ] origins: description: - The source of the content being delivered via CDN. sample: { "host_name": "xxxxxxxx.blob.core.windows.net", "http_port": null, "https_port": null, "name": "xxxxxxxx-blob-core-windows-net" } origin_host_header: description: - The host header value sent to the origin with each request. type: str sample: xxxxxxxx.blob.core.windows.net origin_path: description: - A directory path on the origin that CDN can use to retrieve content from. type: str sample: /pic/ tags: description: - The tags of the Azure CDN endpoint. type: list sample: foo ''' from ansible.module_utils.azure_rm_common import AzureRMModuleBase try: from azure.mgmt.cdn import CdnManagementClient from azure.mgmt.cdn.models import ErrorResponseException from azure.common import AzureHttpError except ImportError: # handled in azure_rm_common pass import re AZURE_OBJECT_CLASS = 'endpoints' class AzureRMCdnEndpointInfo(AzureRMModuleBase): """Utility class to get Azure Azure CDN endpoint facts""" def __init__(self): self.module_args = dict( name=dict(type='str'), resource_group=dict( type='str', required=True ), profile_name=dict( type='str', required=True ), tags=dict(type='list') ) self.results = dict( changed=False, cdnendpoints=[] ) self.name = None self.resource_group = None self.profile_name = None self.tags = None super(AzureRMCdnEndpointInfo, self).__init__( derived_arg_spec=self.module_args, supports_tags=False, facts_module=True ) def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_cdnendpoint_facts' if is_old_facts: self.module.deprecate("The 'azure_rm_cdnendpoint_facts' module has been renamed to 'azure_rm_cdnendpoint_info'", version='2.13') for key in self.module_args: setattr(self, key, kwargs[key]) self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient, base_url=self._cloud_environment.endpoints.resource_manager, api_version='2017-04-02') if self.name: self.results['cdnendpoints'] = self.get_item() else: self.results['cdnendpoints'] = self.list_by_profile() return self.results def get_item(self): """Get a single Azure Azure CDN endpoint""" self.log('Get properties for {0}'.format(self.name)) item = None result = [] try: item = self.cdn_client.endpoints.get( self.resource_group, self.profile_name, self.name) except ErrorResponseException: pass if item and self.has_tags(item.tags, self.tags): result = [self.serialize_cdnendpoint(item)] return result def list_by_profile(self): """Get all Azure Azure CDN endpoints within an Azure CDN profile""" self.log('List all Azure CDN endpoints within an Azure CDN profile') try: response = self.cdn_client.endpoints.list_by_profile( self.resource_group, self.profile_name) except ErrorResponseException as exc: self.fail('Failed to list all items - {0}'.format(str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): results.append(self.serialize_cdnendpoint(item)) return results def serialize_cdnendpoint(self, cdnendpoint): ''' Convert a Azure CDN endpoint object to dict. :param cdn: Azure CDN endpoint object :return: dict ''' result = self.serialize_obj(cdnendpoint, AZURE_OBJECT_CLASS) new_result = {} new_result['id'] = cdnendpoint.id new_result['resource_group'] = re.sub('\\/.*', '', re.sub('.*resourcegroups\\/', '', result['id'])) new_result['profile_name'] = re.sub('\\/.*', '', re.sub('.*profiles\\/', '', result['id'])) new_result['name'] = cdnendpoint.name new_result['type'] = cdnendpoint.type new_result['location'] = cdnendpoint.location new_result['resource_state'] = cdnendpoint.resource_state new_result['provisioning_state'] = cdnendpoint.provisioning_state new_result['query_string_caching_behavior'] = cdnendpoint.query_string_caching_behavior new_result['is_compression_enabled'] = cdnendpoint.is_compression_enabled new_result['is_http_allowed'] = cdnendpoint.is_http_allowed new_result['is_https_allowed'] = cdnendpoint.is_https_allowed new_result['content_types_to_compress'] = cdnendpoint.content_types_to_compress new_result['origin_host_header'] = cdnendpoint.origin_host_header new_result['origin_path'] = cdnendpoint.origin_path new_result['origin'] = dict( name=cdnendpoint.origins[0].name, host_name=cdnendpoint.origins[0].host_name, http_port=cdnendpoint.origins[0].http_port, https_port=cdnendpoint.origins[0].https_port ) new_result['tags'] = cdnendpoint.tags return new_result def main(): """Main module execution code path""" AzureRMCdnEndpointInfo() if __name__ == '__main__': main()
gpl-3.0
714,812,297,882,869,600
30.892063
155
0.570078
false
galaxy001/libtorrent
python_BTL_BitTorrent-5.3-GPL/BTL/twisted_brpc.py
5
24332
# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """A generic resource for publishing objects via BRPC. Requires BRPC API Stability: semi-stable """ from __future__ import nested_scopes __version__ = "$Revision: 1.32 $"[11:-2] # System Imports import brpc import urlparse from cStringIO import StringIO from gzip import GzipFile pipeline_debug = False version = "1.0" from BTL.platform import app_name from BTL.reactor_magic import reactor from BTL.exceptions import str_exc from BTL.protocol import SmartReconnectingClientFactory from BTL.brpclib import ServerProxy import twisted.web if twisted.web.__version__ < '0.6.0': raise ImportError("BTL.twisted_brpc requires twisted.web 0.6.0 or greater," " from Twisted 2.4.0.\nYou appear to have twisted.web " "version %s installed at:\n%s" % (twisted.web.__version__, twisted.web.__file__)) from twisted.web import resource, server from twisted.internet import protocol from twisted.python import log, reflect, failure from twisted.web import http from twisted.internet import defer # Useful so people don't need to import brpc directly Fault = brpc.Fault class NoSuchFunction(Fault): """There is no function by the given name.""" pass class Handler: """Handle a BRPC request and store the state for a request in progress. Override the run() method and return result using self.result, a Deferred. We require this class since we're not using threads, so we can't encapsulate state in a running function if we're going to have to wait for results. For example, lets say we want to authenticate against twisted.cred, run a LDAP query and then pass its result to a database query, all as a result of a single BRPC command. We'd use a Handler instance to store the state of the running command. """ def __init__(self, resource, *args): self.resource = resource # the BRPC resource we are connected to self.result = defer.Deferred() self.run(*args) def run(self, *args): # event driven equivalent of 'raise UnimplementedError' try: raise NotImplementedError("Implement run() in subclasses") except: self.result.errback(failure.Failure()) def parse_accept_encoding(header): a = header.split(',') l = [] for i in a: i = i.strip() if ';' not in i: type = i # hmmm l.append(('1', type)) else: type, q = i.split(';') type = type.strip() q = q.strip() junk, q = q.split('=') q = q.strip() if q != '0': l.append((q, type)) l.sort() l.reverse() l = [ t for q, t in l ] return l class BRPC(resource.Resource): """A resource that implements BRPC. You probably want to connect this to '/RPC2'. Methods published can return BRPC serializable results, Faults, Binary, Boolean, DateTime, Deferreds, or Handler instances. By default methods beginning with 'brpc_' are published. Sub-handlers for prefixed methods (e.g., system.listMethods) can be added with putSubHandler. By default, prefixes are separated with a '.'. Override self.separator to change this. """ # Error codes for Twisted, if they conflict with yours then # modify them at runtime. NOT_FOUND = 8001 FAILURE = 8002 isLeaf = 1 separator = '.' def __init__(self): resource.Resource.__init__(self) self.subHandlers = {} def putSubHandler(self, prefix, handler): self.subHandlers[prefix] = handler def getSubHandler(self, prefix): return self.subHandlers.get(prefix, None) def getSubHandlerPrefixes(self): return self.subHandlers.keys() def _err(self, *a, **kw): log.err(*a, **kw) def render(self, request): request.setHeader('server', "%s/%s" % (app_name, version)) request.content.seek(0, 0) args, functionPath = brpc.loads(request.content.read()) args, kwargs = args request.functionPath = functionPath try: function = self._getFunction(functionPath) except Fault, f: self._cbRender(f, request) else: request.setHeader("content-type", "application/octet-stream") defer.maybeDeferred(function, *args, **kwargs).addErrback( self._ebRender ).addCallback( self._cbRender, request ) return server.NOT_DONE_YET def _cbRender(self, result, request): if isinstance(result, Handler): result = result.result if not isinstance(result, Fault): result = (result,) try: s = brpc.dumps(result, methodresponse=1) except Exception, e: f = Fault(self.FAILURE, "function:%s can't serialize output: %s" % (request.functionPath, str_exc(e))) self._err(f) s = brpc.dumps(f, methodresponse=1) encoding = request.getHeader("accept-encoding") if encoding: encodings = parse_accept_encoding(encoding) if 'gzip' in encodings or '*' in encodings: sio = StringIO() g = GzipFile(fileobj=sio, mode='wb', compresslevel=9) g.write(s) g.close() s = sio.getvalue() request.setHeader("Content-Encoding", "gzip") request.setHeader("content-length", str(len(s))) request.write(s) request.finish() def _ebRender(self, failure): self._err(failure) if isinstance(failure.value, Fault): return failure.value return Fault(self.FAILURE, "An unhandled exception occurred: %s" % failure.getErrorMessage()) def _getFunction(self, functionPath): """Given a string, return a function, or raise NoSuchFunction. This returned function will be called, and should return the result of the call, a Deferred, or a Fault instance. Override in subclasses if you want your own policy. The default policy is that given functionPath 'foo', return the method at self.brpc_foo, i.e. getattr(self, "brpc_" + functionPath). If functionPath contains self.separator, the sub-handler for the initial prefix is used to search for the remaining path. """ if functionPath.find(self.separator) != -1: prefix, functionPath = functionPath.split(self.separator, 1) handler = self.getSubHandler(prefix) if handler is None: raise NoSuchFunction(self.NOT_FOUND, "no such subHandler %s" % prefix) return handler._getFunction(functionPath) f = getattr(self, "brpc_%s" % functionPath, None) if not f: raise NoSuchFunction(self.NOT_FOUND, "function %s not found" % functionPath) elif not callable(f): raise NoSuchFunction(self.NOT_FOUND, "function %s not callable" % functionPath) else: return f def _listFunctions(self): """Return a list of the names of all brpc methods.""" return reflect.prefixedMethodNames(self.__class__, 'brpc_') class BRPCIntrospection(BRPC): """Implement the BRPC Introspection API. By default, the methodHelp method returns the 'help' method attribute, if it exists, otherwise the __doc__ method attribute, if it exists, otherwise the empty string. To enable the methodSignature method, add a 'signature' method attribute containing a list of lists. See methodSignature's documentation for the format. Note the type strings should be BRPC types, not Python types. """ def __init__(self, parent): """Implement Introspection support for an BRPC server. @param parent: the BRPC server to add Introspection support to. """ BRPC.__init__(self) self._brpc_parent = parent def brpc_listMethods(self): """Return a list of the method names implemented by this server.""" functions = [] todo = [(self._brpc_parent, '')] while todo: obj, prefix = todo.pop(0) functions.extend([ prefix + name for name in obj._listFunctions() ]) todo.extend([ (obj.getSubHandler(name), prefix + name + obj.separator) for name in obj.getSubHandlerPrefixes() ]) return functions brpc_listMethods.signature = [['array']] def brpc_methodHelp(self, method): """Return a documentation string describing the use of the given method. """ method = self._brpc_parent._getFunction(method) return (getattr(method, 'help', None) or getattr(method, '__doc__', None) or '') brpc_methodHelp.signature = [['string', 'string']] def brpc_methodSignature(self, method): """Return a list of type signatures. Each type signature is a list of the form [rtype, type1, type2, ...] where rtype is the return type and typeN is the type of the Nth argument. If no signature information is available, the empty string is returned. """ method = self._brpc_parent._getFunction(method) return getattr(method, 'signature', None) or '' brpc_methodSignature.signature = [['array', 'string'], ['string', 'string']] def addIntrospection(brpc): """Add Introspection support to an BRPC server. @param brpc: The brpc server to add Introspection support to. """ brpc.putSubHandler('system', BRPCIntrospection(brpc)) class Query(object): def __init__(self, path, host, method, user=None, password=None, *args): self.path = path self.host = host self.user = user self.password = password self.method = method self.payload = brpc.dumps(args, method) self.deferred = defer.Deferred() self.decode = False class QueryProtocol(http.HTTPClient): # All current queries are pipelined over the connection at # once. When the connection is made, or as queries are made # while a connection exists, queries are all sent to the # server. Pipelining limits can be controlled by the caller. # When a query completes (see parseResponse), if there are no # more queries then an idle timeout gets sets. # The QueryFactory reopens the connection if another query occurs. # # twisted_brpc does currently provide a mechanism for # per-query timeouts. This could be added with another # timeout_call mechanism that calls loseConnection and pops the # current query with an errback. timeout = 300 # idle timeout. def log(self, msg, *a): print "%s: %s: %r" % (self.peer, msg, a) def connectionMade(self): http.HTTPClient.connectionMade(self) self.current_queries = [] self.timeout_call = None if pipeline_debug: p = self.transport.getPeer() p = "%s:%d" % (p.host, p.port) self.peer = (id(self.transport), p) self.factory.connectionMade(self) def _cancelTimeout(self): if self.timeout_call and self.timeout_call.active(): self.timeout_call.cancel() self.timeout_call = None def connectionLost(self, reason): http.HTTPClient.connectionLost(self, reason) if pipeline_debug: self.log('connectionLost', reason.getErrorMessage()) self._cancelTimeout() if self.current_queries: # queries failed, put them back if pipeline_debug: self.log('putting back', [q.method for q in self.current_queries]) self.factory.prependQueries(self.current_queries) self.factory.connectionLost(self) def sendCommand(self, command, path): self.transport.write('%s %s HTTP/1.1\r\n' % (command, path)) def setLineMode(self, rest): # twisted is stupid. self.firstLine = 1 return http.HTTPClient.setLineMode(self, rest) def sendQuery(self): self._cancelTimeout() query = self.factory.popQuery() if pipeline_debug: self.log('sending', query.method) self.current_queries.append(query) self.sendCommand('POST', query.path) self.sendHeader('User-Agent', 'BTL/BRPC 1.0') self.sendHeader('Host', query.host) self.sendHeader('Accept-encoding', 'gzip') self.sendHeader('Connection', 'Keep-Alive') self.sendHeader('Content-type', 'application/octet-stream') self.sendHeader('Content-length', str(len(query.payload))) #if query.user: # auth = '%s:%s' % (query.user, query.password) # auth = auth.encode('base64').strip() # self.sendHeader('Authorization', 'Basic %s' % (auth,)) self.endHeaders() self.transport.write(query.payload) def parseResponse(self, contents): query = self.current_queries.pop(0) if pipeline_debug: self.log('responded', query.method) if not self.current_queries: assert not self.factory.anyQueries() assert not self.timeout_call self.timeout_call = reactor.callLater(self.timeout, self.transport.loseConnection) try: response = brpc.loads(contents) except Exception, e: query.deferred.errback(failure.Failure()) del query.deferred else: query.deferred.callback(response[0][0]) del query.deferred def badStatus(self, status, message): query = self.current_queries.pop(0) if pipeline_debug: self.log('failed', query.method) try: raise ValueError(status, message) except: query.deferred.errback(failure.Failure()) del query.deferred self.transport.loseConnection() def handleStatus(self, version, status, message): if status != '200': self.badStatus(status, message) def handleHeader(self, key, val): if not self.current_queries[0].decode: if key.lower() == 'content-encoding' and val.lower() == 'gzip': self.current_queries[0].decode = True def handleResponse(self, contents): if self.current_queries[0].decode: s = StringIO() s.write(contents) s.seek(-1) g = GzipFile(fileobj=s, mode='rb') contents = g.read() g.close() self.parseResponse(contents) class QueryFactory(object): def __init__(self): self.queries = [] self.instance = None def connectionMade(self, instance): self.instance = instance if pipeline_debug: print 'connection made %s' % str(instance.peer) while self.anyQueries(): self.instance.sendQuery() def connectionLost(self, instance): assert self.instance == instance if pipeline_debug: print 'connection lost %s' % str(instance.peer) self.instance = None def prependQueries(self, queries): self.queries = queries + self.queries def popQuery(self): return self.queries.pop(0) def anyQueries(self): return bool(self.queries) def addQuery(self, query): self.queries.append(query) if pipeline_debug: print 'addQuery: %s %s' % (self.instance, self.queries) if self.instance: self.instance.sendQuery() def disconnect(self): if not self.instance: return if not hasattr(self.instance, 'transport'): return self.instance.transport.loseConnection() class PersistantSingletonFactory(QueryFactory, SmartReconnectingClientFactory): def clientConnectionFailed(self, connector, reason): if pipeline_debug: print 'clientConnectionFailed %s' % str(connector) return SmartReconnectingClientFactory.clientConnectionFailed(self, connector, reason) def clientConnectionLost(self, connector, unused_reason): self.started = False if not self.anyQueries(): self.continueTrying = False return SmartReconnectingClientFactory.clientConnectionLost(self, connector, unused_reason) class SingletonFactory(QueryFactory, protocol.ClientFactory): def clientConnectionFailed(self, connector, reason): if pipeline_debug: print 'clientConnectionFailed %s' % str(connector) queries = list(self.queries) del self.queries[:] for query in queries: query.deferred.errback(reason) self.started = False class Proxy: """A Proxy for making remote BRPC calls. Pass the URL of the remote BRPC server to the constructor. Use proxy.callRemote('foobar', *args) to call remote method 'foobar' with *args. """ def __init__(self, url, user=None, password=None, retry_forever = True): """ @type url: C{str} @param url: The URL to which to post method calls. Calls will be made over SSL if the scheme is HTTPS. If netloc contains username or password information, these will be used to authenticate, as long as the C{user} and C{password} arguments are not specified. @type user: C{str} or None @param user: The username with which to authenticate with the server when making calls. If specified, overrides any username information embedded in C{url}. If not specified, a value may be taken from C{url} if present. @type password: C{str} or None @param password: The password with which to authenticate with the server when making calls. If specified, overrides any password information embedded in C{url}. If not specified, a value may be taken from C{url} if present. """ scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) netlocParts = netloc.split('@') if len(netlocParts) == 2: userpass = netlocParts.pop(0).split(':') self.user = userpass.pop(0) try: self.password = userpass.pop(0) except: self.password = None else: self.user = self.password = None hostport = netlocParts[0].split(':') self.host = hostport.pop(0) try: self.port = int(hostport.pop(0)) except: self.port = None self.path = path if self.path in ['', None]: self.path = '/' self.secure = (scheme == 'https') if user is not None: self.user = user if password is not None: self.password = password if not retry_forever: _Factory = SingletonFactory else: _Factory = PersistantSingletonFactory self.factory = _Factory() self.factory.started = False self.factory.protocol = QueryProtocol def callRemote(self, method, *args, **kwargs): if pipeline_debug: print 'callRemote to %s : %s' % (self.host, method) args = (args, kwargs) query = Query(self.path, self.host, method, self.user, self.password, *args) self.factory.addQuery(query) if pipeline_debug: print 'factory started: %s' % self.factory.started if not self.factory.started: self.factory.started = True def connect(host): if self.secure: if pipeline_debug: print 'connecting to %s' % str((host, self.port or 443)) from twisted.internet import ssl reactor.connectSSL(host, self.port or 443, self.factory, ssl.ClientContextFactory(), timeout=60) else: if pipeline_debug: print 'connecting to %s' % str((host, self.port or 80)) reactor.connectTCP(host, self.port or 80, self.factory, timeout=60) df = reactor.resolve(self.host) df.addCallback(connect) df.addErrback(query.deferred.errback) return query.deferred class AsyncServerProxy(object): def __init__(self, base_url, username=None, password=None, debug=False, retry_forever = True): self.base_url = base_url self.username = username self.password = password self.proxy = Proxy(self.base_url, self.username, self.password, retry_forever) self.debug = debug def __getattr__(self, attr): return self._make_call(attr) def _make_call(self, methodname): return lambda *a, **kw : self._method(methodname, *a, **kw) def _method(self, methodname, *a, **kw): # in case they have changed self.proxy.user = self.username self.proxy.password = self.password if self.debug: print ('callRemote:', self.__class__.__name__, self.base_url, methodname, a, kw) df = self.proxy.callRemote(methodname, *a, **kw) return df class EitherServerProxy(object): SYNC = 0 ASYNC = 1 SYNC_DEFERRED = 2 # BE CAREFUL to call getResult() on the returned Deferred! """Server Proxy that supports both asynchronous and synchronous calls.""" def __init__(self, base_url, username = None, password = None, debug = False, async = ASYNC, retry_forever = True ): """ The EitherServerProxy can make either synchronous or asynchronous calls. The default is specified by the async parameter to __init__, but each individual call can override the default behavior by passing 'async' as a boolean keyword argument to any method call. The async keyword argument can also be set to None. However, passing async as None means simply 'use default behavior'. When calling with async=SYNC, you should not be in the same thread as the reactor or you risk blocking the reactor. @param async: determines whether the default is asynchronous or blocking calls.""" assert async in [SYNC, ASYNC, SYNC_DEFERRED] self.async = async self.async_proxy = AsyncServerProxy( base_url, username, password, debug, retry_forever = retry_forever ) # HERE HACK. retry_forever is not supported by ServerProxy. self.sync_proxy = ServerProxy( base_url ) def __getattr__(self, attr): return self._make_call(attr) def _make_call(self, methodname): return lambda *a, **kw : self._method(methodname, *a, **kw) def _method(self, methodname, *a, **kw ): async = kw.pop('async', self.async) if async is None: async = self.async if async == ASYNC: df = self.async_proxy._method(methodname, *a, **kw) elif async == SYNC_DEFERRED: df = defer.execute(getattr(self.sync_proxy, methodname), *a, **kw) else: return self.sync_proxy.__getattr__(methodname)(*a, **kw) return df SYNC = EitherServerProxy.SYNC ASYNC = EitherServerProxy.ASYNC SYNC_DEFERRED = EitherServerProxy.SYNC_DEFERRED __all__ = ["BRPC", "Handler", "NoSuchFunction", "Fault", "Proxy", "AsyncServerProxy", "EitherServerProxy"]
mit
3,917,561,130,355,708,400
35.370703
106
0.612239
false
jhayworth/config
.emacs.d/elpy/rpc-venv/lib/python2.7/distutils/__init__.py
5
4374
import os import sys import warnings # opcode is not a virtualenv module, so we can use it to find the stdlib # Important! To work on pypy, this must be a module that resides in the # lib-python/modified-x.y.z directory import opcode dirname = os.path.dirname distutils_path = os.path.join(os.path.dirname(opcode.__file__), "distutils") if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)): warnings.warn("The virtualenv distutils package at %s appears to be in the same location as the system distutils?") else: __path__.insert(0, distutils_path) # noqa: F821 if sys.version_info < (3, 4): import imp real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ("", "", imp.PKG_DIRECTORY)) else: import importlib.machinery distutils_path = os.path.join(distutils_path, "__init__.py") loader = importlib.machinery.SourceFileLoader("_virtualenv_distutils", distutils_path) if sys.version_info < (3, 5): import types real_distutils = types.ModuleType(loader.name) else: import importlib.util spec = importlib.util.spec_from_loader(loader.name, loader) real_distutils = importlib.util.module_from_spec(spec) loader.exec_module(real_distutils) # Copy the relevant attributes try: __revision__ = real_distutils.__revision__ except AttributeError: pass __version__ = real_distutils.__version__ from distutils import dist, sysconfig # isort:skip try: basestring except NameError: basestring = str # patch build_ext (distutils doesn't know how to get the libs directory # path on windows - it hardcodes the paths around the patched sys.prefix) if sys.platform == "win32": from distutils.command.build_ext import build_ext as old_build_ext class build_ext(old_build_ext): def finalize_options(self): if self.library_dirs is None: self.library_dirs = [] elif isinstance(self.library_dirs, basestring): self.library_dirs = self.library_dirs.split(os.pathsep) self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs")) old_build_ext.finalize_options(self) from distutils.command import build_ext as build_ext_module build_ext_module.build_ext = build_ext # distutils.dist patches: old_find_config_files = dist.Distribution.find_config_files def find_config_files(self): found = old_find_config_files(self) if os.name == "posix": user_filename = ".pydistutils.cfg" else: user_filename = "pydistutils.cfg" user_filename = os.path.join(sys.prefix, user_filename) if os.path.isfile(user_filename): for item in list(found): if item.endswith("pydistutils.cfg"): found.remove(item) found.append(user_filename) return found dist.Distribution.find_config_files = find_config_files # distutils.sysconfig patches: old_get_python_inc = sysconfig.get_python_inc def sysconfig_get_python_inc(plat_specific=0, prefix=None): if prefix is None: prefix = sys.real_prefix return old_get_python_inc(plat_specific, prefix) sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__ sysconfig.get_python_inc = sysconfig_get_python_inc old_get_python_lib = sysconfig.get_python_lib def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None): if standard_lib and prefix is None: prefix = sys.real_prefix return old_get_python_lib(plat_specific, standard_lib, prefix) sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__ sysconfig.get_python_lib = sysconfig_get_python_lib old_get_config_vars = sysconfig.get_config_vars def sysconfig_get_config_vars(*args): real_vars = old_get_config_vars(*args) if sys.platform == "win32": lib_dir = os.path.join(sys.real_prefix, "libs") if isinstance(real_vars, dict) and "LIBDIR" not in real_vars: real_vars["LIBDIR"] = lib_dir # asked for all elif isinstance(real_vars, list) and "LIBDIR" in args: real_vars = real_vars + [lib_dir] # asked for list return real_vars sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__ sysconfig.get_config_vars = sysconfig_get_config_vars
gpl-3.0
6,473,410,251,523,373,000
31.641791
119
0.671925
false
hoeck/webkitwindow
webkitwindow.py
1
27686
import sys import os import Queue import StringIO import urlparse import mimetypes import pkgutil import itertools try: from PyQt4 import QtCore, QtGui, QtWebKit, QtNetwork except ImportError: from PySide import QtCore, QtGui, QtWebKit, QtNetwork HTTP_STATUS = { 200: 'OK', 301: 'Moved Permanently', 302: 'Found', 400: 'Bad Request', 404: 'Not Found', 406: 'Not Acceptable', 500: 'Internal Server Error', 503: 'Service Unavailable', } class Message(): """An HTTP message. headers must be a dict of {str: str/unicode}. (unicode gets converted to an utf8 string) body must be either None, str. When unicode, convert it to an utf8 string, else convert it to a str. """ def __init__(self, headers={}, body=None): self.headers = {} for k,v in headers.items(): assert isinstance(k, basestring), "header keys must be strings, not: %r" % (k, ) if isinstance(v, unicode): v = v.decode('utf-8') elif isinstance(v, str): pass else: assert False, "header values must be strings or unicode, not: %r" % (v, ) self.headers[k] = v if isinstance(body, unicode): self.body = body.encode('utf-8') elif isinstance(body, str): self.body = body elif body is None: self.body = "" else: self.body = str(body) self._write_fn = None self._close_fn = None # streaming response data def _set_streaming(self, write_fn, close_fn): self._write_fn = write_fn self._close_fn = close_fn def write(self, data): """Write data for a streaming response. Return True on success, False otherwise. """ if not self._write_fn: raise Exception("not a streaming response") if data: return self._write_fn(data) return False def close(self): """Close the streaming response. Return True on success, False otherwise. """ if not self._write_fn: raise Exception("not a streaming response") return self._close_fn() def _parse_url(obj, url): """Parse url and add the resulting parts as url_* attrs to obj.""" r = urlparse.urlparse(url) obj.url_scheme = r.scheme obj.url_netloc = r.netloc obj.url_path = r.path obj.url_params = r.params obj.url_query = r.query obj.url_query_dict = urlparse.parse_qs(r.query) obj.url_fragment = r.fragment def guess_type(name, default="application/octet-stream"): """Given a path to a file, guess its mimetype.""" guessed_type, encoding = mimetypes.guess_type(name, strict=False) return guessed_type or default class Request(): def __init__(self, method, url, message, fake_reply): self.message = message self.method = method self.url = url self.fake_reply = fake_reply self._streaming = False _parse_url(self, url) def respond(self, status=None, message=None, streaming=False): """Respond to this request with a Message. If streaming is True, initiate a streaming response. Stream data using the passed messages .write(data) method and end the request with .close(). Returns True when the reply was initiated successfully, False if it failed (e.g. when the client has already closed the connection). """ assert isinstance(message, Message) status = status or 200 if isinstance(status, (int, long)): status_text = HTTP_STATUS.get(status, '') elif isinstance(status, (tuple, list)): status, status_text = status status = int(status or 200) status_text = str(status_text or '') elif isinstance(status, basestring): status, status_text = status.split(' ', 1) status = int(status) else: raise TypeError("status must be a number or tuple of (status, text), not: %r" % (status, )) if streaming: def _write_fn(data): if self.fake_reply.aborted: return False self.fake_reply.fake_response_write.emit(str(data)) return True def _close_fn(): if self.fake_reply.aborted: return False self.fake_reply.fake_response_close.emit() return True message._set_streaming(write_fn=_write_fn, close_fn=_close_fn) if self.fake_reply.aborted: return False else: self.fake_reply.fake_response.emit(status, status_text, message, True) if message.body is not None: message.write(message.body) return True else: if self.fake_reply.aborted: return False else: self.fake_reply.fake_response.emit(status, status_text, message, False) return True # response shortcuts def notfound(self, msg=""): """Respond with '404 Not Found' and an optional message.""" return self.respond((404, 'Not Found'), Message({'Content-Type': 'text/plain'}, msg)) def gone(self, msg=""): """Respond with a '410 Gone' and an optional message.""" return self.respond((404, 'Not Found'), Message({'Content-Type': 'text/plain'}, msg)) def redirect(self, url): """Respond with a 302 Found to url.""" return self.respond((302, 'Found'), Message({'Location': url})) def found(self, body, content_type="text/plain"): """Respond with a 200, data and content_type.""" return self.respond((200, 'Found'), Message({"Content-Type": content_type}, body)) def found_resource(self, path, module_name, content_type=None, modify_fn=None): """Respond with a 200 and a resource file loaded using pkgutil.get_data. module_name and path are passed to pkgutil.get_data. Optionally run modify_fn on the returned string (e.g. to fill a template). Example to deliver a file from the webkitwindow.resources directory: req.found_resource(path='/styles.css', module_name='webkitwindow.resources', modify_fn=lambda s: s.replace('TODAY', datetime.datetime.now())) """ res_string = pkgutil.get_data(module_name, path) if modify_fn: res_string = modify_fn(res_string) return self.found(body=res_string, content_type=content_type or guess_type(path)) def found_file(self, path, content_type=None): """Respond with a 200 and the file at path, optionally using content_type.""" with open(path) as f: return self.found(body=f.read(), content_type=content_type or guess_type(path)) class WebSocket(): # create and pass this to NetworkHandler in the WebSocketBackend class def __init__(self, url, backend, id): self.url = url self._backend = backend self._id = id _parse_url(self, url) def connected(self): """Confirm a connection.""" self._backend.onopen.emit(self._id) def send(self, data): """Send data over an opened connection.""" self._backend.send_to_client(self._id, data) def close(self): """Close the connection.""" self._backend.server_close(self._id) class NetworkHandler(): """A Class dealing with requests from the embedded webkit. Subclass or ducktype it to implement your own request/websocket handlers. """ def startup(self, window): """Called after application startup. window is the created WebkitWindow instance. """ pass # HTTP def request(self, request): """Incoming Request. Use request.respond(message) to respond. """ pass # WebSocket def connect(self, websocket): """Incoming WebSocket conncetion. Call .connected() on the provided websocket object to confirm the connection Call .close() to close or abort the connection. """ pass def receive(self, websocket, data): """Incoming WebSocket data. Call .send() on the provided websocket object to send data back. """ pass def close(self, websocket): """Client has closed the websocket connection.""" pass class AnyValue(QtCore.QObject): def __init__(self, value): self.value = value class AsyncNetworkHandler(QtCore.QObject): _request = QtCore.pyqtSignal(object) _connect = QtCore.pyqtSignal(object) _receive = QtCore.pyqtSignal(object, str) _close = QtCore.pyqtSignal(object) def __init__(self, network_handler): super(AsyncNetworkHandler, self).__init__() self._nh = network_handler self._request.connect(self.request) self._connect.connect(self.connect) self._receive.connect(self.receive) self._close.connect(self.close) # HTTP @QtCore.pyqtSlot(object) def request(self, request): self._nh.request(request) # object @QtCore.pyqtSlot(object) def connect(self, websocket): self._nh.connect(websocket) @QtCore.pyqtSlot(object, str) def receive(self, websocket, data): self._nh.receive(websocket, unicode(data)) @QtCore.pyqtSlot(object) def close(self, websocket): self._nh.close(websocket) class LocalDispatchNetworkAccessManager(QtNetwork.QNetworkAccessManager): """ Custom NetworkAccessManager to intercept requests and dispatch them locally. """ operation_strings = { QtNetwork.QNetworkAccessManager.HeadOperation: 'HEAD', QtNetwork.QNetworkAccessManager.GetOperation: 'GET', QtNetwork.QNetworkAccessManager.PutOperation: 'PUT', QtNetwork.QNetworkAccessManager.PostOperation: 'POST', QtNetwork.QNetworkAccessManager.DeleteOperation: 'DELETE', QtNetwork.QNetworkAccessManager.CustomOperation: None, } def set_network_handler(self, network_handler): # overwriting the ctor with new arguments is not allowed -> use a setter instead self.network_handler = network_handler def createRequest(self, operation, request, data): reply = None # decode operation (== request method) op_str = self.operation_strings[operation] if op_str: method = op_str else: # custom method = str(request.attribute(QNetwork.QNetworkRequest.CustomVerbAttribute).toString()) url = str(request.url().toString()) headers = dict((str(h),str(request.rawHeader(h))) for h in request.rawHeaderList()) # data is a QIODevice or None msg = Message(headers=headers, body=data and str(data.readAll())) reply = FakeReply(self, request, operation) self.network_handler._request.emit(Request(method=method, url=url, message=msg, fake_reply=reply)) # will .set_response the FakeReply to reply QtCore.QTimer.singleShot(0, lambda:self.finished.emit(reply)) return reply class FakeReply(QtNetwork.QNetworkReply): """ QNetworkReply implementation that returns a given response. """ fake_response = QtCore.pyqtSignal(int, str, object, object) fake_response_write = QtCore.pyqtSignal(object) fake_response_close = QtCore.pyqtSignal() def __init__(self, parent, request, operation): QtNetwork.QNetworkReply.__init__(self, parent) self.fake_response.connect(self._fake_response) self.fake_response_write.connect(self._fake_response_write) self.fake_response_close.connect(self._fake_response_close) self._streaming = False self._content = None self._offset = 0 # know when to stop writing into the reply self.aborted = False self.setRequest(request) self.setUrl(request.url()) self.setOperation(operation) self.open(self.ReadOnly | self.Unbuffered) @QtCore.pyqtSlot(int, str, object, object) def _fake_response(self, status, status_text, response, streaming): assert isinstance(response, Message) # status self.setAttribute(QtNetwork.QNetworkRequest.HttpStatusCodeAttribute, status) self.setAttribute(QtNetwork.QNetworkRequest.HttpReasonPhraseAttribute, status_text) # headers for k,v in response.headers.items(): self.setRawHeader(QtCore.QByteArray(k), QtCore.QByteArray(v)) if streaming: # streaming response, call fake_response_write and fake_response_close self._streaming = True self._content = StringIO.StringIO() else: self._content = response.body self._offset = 0 # respond immediately if self._content and not 'Content-Length' in response.headers: self.setHeader(QtNetwork.QNetworkRequest.ContentLengthHeader, QtCore.QVariant(len(self._content))) QtCore.QTimer.singleShot(0, lambda : self.readyRead.emit()) QtCore.QTimer.singleShot(0, lambda : self.finished.emit()) @QtCore.pyqtSlot(object) def _fake_response_write(self, response): assert isinstance(response, basestring) assert self._streaming, "not a streaming response" self._content.write(response) self.readyRead.emit() @QtCore.pyqtSlot() def _fake_response_close(self): assert self._streaming, "not a streaming response" self.finished.emit() def abort(self): self.aborted = True self.finished.emit() def bytesAvailable(self): if isinstance(self._content, StringIO.StringIO): c = self._content.getvalue() else: c = self._content avail = long(len(c) - self._offset + super(FakeReply, self).bytesAvailable()) return avail def isSequential(self): return True def readData(self, max_size): if isinstance(self._content, StringIO.StringIO): c = self._content.getvalue() else: c = self._content if self._offset < len(c): size = min(max_size, len(c)-self._offset) data = c[self._offset:self._offset+size] self._offset += size return data else: return None class WebSocketBackend(QtCore.QObject): # javascript websocket events fo the given connection_id onmessage = QtCore.pyqtSignal(int, str) onopen = QtCore.pyqtSignal(int) onclose = QtCore.pyqtSignal(int) def __init__(self, network_handler): super(WebSocketBackend, self).__init__() self._connections = {} self._ids = itertools.count() self._network_handler = network_handler @QtCore.pyqtSlot(str, result=int) def connect(self, url): """Create a websocket connection.""" id = self._ids.next() ws = WebSocket(str(url), self, id) self._connections[id] = ws QtCore.QTimer.singleShot(0, lambda: self._network_handler._connect.emit(ws)) #?????? return id @QtCore.pyqtSlot(int) def client_close(self, id): """Close the given websocket connection, initiated from the client.""" self._network_handler._close.emit(self._connections[id]) del self._connections[id] def server_close(self, id): """Close the given websocket connection, initiated from the server.""" del self._connections[id] self.onclose.emit(id) @QtCore.pyqtSlot(int, str) def send_to_server(self, id, data): """Send data on the given websocket connection to the network_handler.""" self._network_handler._receive.emit(self._connections[id], data) def send_to_client(self, id, data): """Send data from the backend to the given websocket in the browser.""" assert self._connections[id] self.onmessage.emit(id, data) class CustomQWebPage(QtWebKit.QWebPage): """QWebPage subclass to be able to implement shouldInterruptJavaScript. See http://doc.qt.io/qt-4.8/qwebpage.html#shouldInterruptJavaScript Additionally provides a configurable javascript console message handler, possible values: 'print' .. print the console message to stdout (the default) function .. call function on each message with a dict of message, line_number and source_id keys None .. do nothing The underlying javaScriptConsoleMessage method will be called for console.log() calls, ignoring everything but the first args and for javascript errors. TODO: - allow for customization of shouldInterruptJavaScript - custom settings for each created iframe - implement the other javascript* handlers (alert, prompt, confirm """ def __init__(self, console_message='print'): self._console_message = console_message QtWebKit.QWebPage.__init__(self) @QtCore.pyqtSlot(result=bool) def shouldInterruptJavaScript(self): return False def javaScriptConsoleMessage(self, message, lineNumber, sourceID): if self._console_message == 'print': print u'js-console: {} ({}:{})'.format(unicode(message), unicode(sourceID), unicode(lineNumber)).encode('utf-8', 'ignore') elif self._console_message: self._console_message({'message': unicode(message), 'line_number': unicode(lineNumber), 'source_id': unicode(sourceID)}) else: pass class _WebkitWindow(QtGui.QMainWindow): _close_window = QtCore.pyqtSignal() _set_zoom_factor = QtCore.pyqtSignal(float) def __init__(self, network_handler, url=None, console_message='print', no_focus_classname=None): self._console_message = console_message self.url = url or "http://localhost" self.network_handler = AsyncNetworkHandler(network_handler) self.no_focus_classname = no_focus_classname QtGui.QMainWindow.__init__(self) self.setup() self._set_zoom_factor.connect(self.zoom_factor) def setup(self): centralwidget = QtGui.QWidget() centralwidget.setObjectName("centralwidget") horizontalLayout = QtGui.QHBoxLayout(centralwidget) horizontalLayout.setObjectName("horizontalLayout") self.webview = QtWebKit.QWebView(centralwidget) webpage = CustomQWebPage(console_message=self._console_message) # set the custom NAM nam = LocalDispatchNetworkAccessManager() nam.set_network_handler(self.network_handler) webpage.setNetworkAccessManager(nam) # websocket requests do not go through the custom NAM # -> catch them in the javascript directly self.websocket_backend = WebSocketBackend(self.network_handler) self.setup_local_websockets(webpage) self.webview.setPage(webpage) # implement the custom focus rule for iframes self.setup_micro_focus_handler(webpage) horizontalLayout.addWidget(self.webview) horizontalLayout.setContentsMargins(0, 0, 0, 0) self.setCentralWidget(centralwidget) self.webview.setUrl(QtCore.QUrl(self.url)) # setup webkit gs = QtWebKit.QWebSettings.globalSettings() gs.setAttribute(QtWebKit.QWebSettings.PluginsEnabled, True) gs.setAttribute(QtWebKit.QWebSettings.JavascriptEnabled, True) gs.setAttribute(QtWebKit.QWebSettings.AutoLoadImages, True) gs.setAttribute(QtWebKit.QWebSettings.JavascriptCanOpenWindows, True) gs.setAttribute(QtWebKit.QWebSettings.DeveloperExtrasEnabled, True) gs.setAttribute(QtWebKit.QWebSettings.LocalContentCanAccessRemoteUrls, True) # setup app details QtGui.QApplication.setApplicationName("Panel") QtGui.QApplication.setOrganizationName("Panel") # close slot def _close_handler(): # without resetting the QtWebView widget, I get segfaults # when closing this window self.setCentralWidget(QtGui.QWidget()) self.close() self._close_window.connect(_close_handler) ### Capturing Websocket Connections # For WebSockets, QtWebKit does not use the # QNetworkAccessManager. Thus we 'intercept' WebSocket connection # attempts by adding our own implementation of the WebSocket # interface to the javascript window context of each new frame. websocket_js = """ /** * Provide a Websocket interface that uses a QT object (_wsExt) * instead of the network to be able to proxy the websocket * communication. */ (function() { // pass the local interfacing object via window globals var wsExt = window._wsExt; window._wsExt = undefined; window.WebSocket = function(url) { var self = this, connId; self.CONNECTING = 0; // The connection has not yet been established. self.OPEN = 1; // The WebSocket connection is established and communication is possible. self.CLOSING = 2; // The connection is going through the closing handshake. self.CLOSED = 4; // The connection has been closed or could not be opened. self.url = url; self.readyState = self.CONNECTING; self.extensions = ""; self.protocol = ""; self.onopen = undefined; self.onmessage = undefined; self.onerror = undefined; self.onclose = undefined; self.send = function(data) { wsExt.send_to_server(connId, data); }; self.close = function(code, reason) { if (self.readyState === self.CLOSING || self.readyState === self.CLOSED) { // nothing } else if (self.readyState === self.OPEN) { self.readyState = self.CLOSING; wsExt.close(connId); if (self.onclose) { self.onclose(); } } else { self.readyState == CLOSED; } }; // register callbacks on the Qt side wsExt.onopen.connect(function(id) { if (id === connId) { self.readyState = self.OPEN; if (self.onopen) { self.onopen(); } } }); wsExt.onmessage.connect(function(id, data) { if (id === connId) { if (self.onmessage) { self.onmessage({data:data}); } } }); wsExt.onclose.connect(function(id) { if (id === connId) { self.readyState = self.CLOSED; if (self.onclose) { self.onclose(); } } }); // init connId = wsExt.connect(url); }; })(); """ def setup_local_websockets_on_frame(self, qwebframe): def _load_js(f=qwebframe, js=self.websocket_js, websocket_backend=self.websocket_backend): # without passing arguments as default keyword arguments, I get strange errors: # "NameError: free variable 'self' referenced before assignment in enclosing scope" # which looks like sombody is trying to null all local # arguments at the end of my function f.addToJavaScriptWindowObject("_wsExt", websocket_backend) f.evaluateJavaScript(js) # TODO: 'dispose' the websocket object when the frame is gone (e.g. after reload) qwebframe.javaScriptWindowObjectCleared.connect(_load_js) def setup_local_websockets(self, qwebpage): qwebpage.frameCreated.connect(lambda frame: self.setup_local_websockets_on_frame(frame)) def setup_micro_focus_handler(self, qwebpage): """Allow defining IFRAMEs that can't be focused. All iframes that have a css class of `.no_focus_classname` set will pass their (keyboard) focus back to their parent. """ def _steal_focus_from_frame(): p = qwebpage.currentFrame().parentFrame() if p: # blindly assume that .findAllElements and childFrames # return things in the *same* order for e,f in zip(p.findAllElements('iframe'), p.childFrames()): if f.hasFocus() and self.no_focus_classname in list(e.classes()): # TODO: break circles in case `p` is trying to # assign the focus back to `f` p.setFocus() if self.no_focus_classname: qwebpage.microFocusChanged.connect(_steal_focus_from_frame) @QtCore.pyqtSlot(float) def zoom_factor(self, zf=None): """Get or set the zoom factor for the embedded webview.""" if zf == None: return self.webview.zoomFactor() else: assert isinstance(zf, float) self.webview.setZoomFactor(zf) class WebkitWindow(object): @classmethod def run(self, handler, url="http://localhost", exit=True, console_message='print', no_focus_classname=None): """Open a window displaying a single webkit instance. handler must be an object implementing the NetworkHandler interface (or deriving from it). Navigate the webkit to url after opening it. console_message ('print', function that receives a dict or None) controls how to deal with javascript console messages, see CustomQWebPage. no_focus_classname should be a css classname that, when set on an iframe element, will prevent this element from being focused permanently - it will pass the focus back to its parent iframe. Use None (the default) to turn this feature off. If exit is true, sys.exit after closing the window. """ win = self(handler, url, exit, console_message, no_focus_classname) return win._run() @staticmethod def run_later(f, timeout=None): """Enqueue and run function f on the main thread.""" QtCore.QTimer.singleShot(timeout or 0, f) def __init__(self, handler, url, exit, console_message, no_focus_classname): self._handler = handler self._url = url self._exit = exit self._console_message = console_message self._no_focus_classname = no_focus_classname def _run(self): app = QtGui.QApplication(sys.argv) self._window = _WebkitWindow(self._handler, self._url, self._console_message, self._no_focus_classname) self._window.show() if getattr(self._handler, 'startup', None): self.run_later(lambda:self._handler.startup(self)) if self._exit: sys.exit(app.exec_()) else: return app.exec_() def close(self): """Close this WebkitWindow and exit.""" self._window._close_window.emit() def zoom_factor(self, zoom_factor=None): """Get or set the zoom factor.""" if zoom_factor == None: return self._window.zoom_factor() else: assert isinstance(zoom_factor, (int, long, float)) self._window._set_zoom_factor.emit(float(zoom_factor))
bsd-3-clause
7,293,582,756,853,621,000
33.096059
150
0.612728
false
sivas2811/mocha_739
hotdot_env/lib/python2.7/site-packages/setuptools/_backport/hashlib/_sha512.py
77
14505
""" This code was Ported from CPython's sha512module.c """ import struct SHA_BLOCKSIZE = 128 SHA_DIGESTSIZE = 64 def new_shaobject(): return { 'digest': [0]*8, 'count_lo': 0, 'count_hi': 0, 'data': [0]* SHA_BLOCKSIZE, 'local': 0, 'digestsize': 0 } ROR64 = lambda x, y: (((x & 0xffffffffffffffff) >> (y & 63)) | (x << (64 - (y & 63)))) & 0xffffffffffffffff Ch = lambda x, y, z: (z ^ (x & (y ^ z))) Maj = lambda x, y, z: (((x | y) & z) | (x & y)) S = lambda x, n: ROR64(x, n) R = lambda x, n: (x & 0xffffffffffffffff) >> n Sigma0 = lambda x: (S(x, 28) ^ S(x, 34) ^ S(x, 39)) Sigma1 = lambda x: (S(x, 14) ^ S(x, 18) ^ S(x, 41)) Gamma0 = lambda x: (S(x, 1) ^ S(x, 8) ^ R(x, 7)) Gamma1 = lambda x: (S(x, 19) ^ S(x, 61) ^ R(x, 6)) def sha_transform(sha_info): W = [] d = sha_info['data'] for i in xrange(0,16): W.append( (d[8*i]<<56) + (d[8*i+1]<<48) + (d[8*i+2]<<40) + (d[8*i+3]<<32) + (d[8*i+4]<<24) + (d[8*i+5]<<16) + (d[8*i+6]<<8) + d[8*i+7]) for i in xrange(16,80): W.append( (Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]) & 0xffffffffffffffff ) ss = sha_info['digest'][:] def RND(a,b,c,d,e,f,g,h,i,ki): t0 = (h + Sigma1(e) + Ch(e, f, g) + ki + W[i]) & 0xffffffffffffffff t1 = (Sigma0(a) + Maj(a, b, c)) & 0xffffffffffffffff d = (d + t0) & 0xffffffffffffffff h = (t0 + t1) & 0xffffffffffffffff return d & 0xffffffffffffffff, h & 0xffffffffffffffff ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],0,0x428a2f98d728ae22) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],1,0x7137449123ef65cd) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],2,0xb5c0fbcfec4d3b2f) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],3,0xe9b5dba58189dbbc) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],4,0x3956c25bf348b538) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],5,0x59f111f1b605d019) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],6,0x923f82a4af194f9b) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],7,0xab1c5ed5da6d8118) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],8,0xd807aa98a3030242) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],9,0x12835b0145706fbe) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],10,0x243185be4ee4b28c) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],11,0x550c7dc3d5ffb4e2) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],12,0x72be5d74f27b896f) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],13,0x80deb1fe3b1696b1) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],14,0x9bdc06a725c71235) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],15,0xc19bf174cf692694) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],16,0xe49b69c19ef14ad2) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],17,0xefbe4786384f25e3) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],18,0x0fc19dc68b8cd5b5) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],19,0x240ca1cc77ac9c65) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],20,0x2de92c6f592b0275) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],21,0x4a7484aa6ea6e483) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],22,0x5cb0a9dcbd41fbd4) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],23,0x76f988da831153b5) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],24,0x983e5152ee66dfab) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],25,0xa831c66d2db43210) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],26,0xb00327c898fb213f) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],27,0xbf597fc7beef0ee4) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],28,0xc6e00bf33da88fc2) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],29,0xd5a79147930aa725) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],30,0x06ca6351e003826f) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],31,0x142929670a0e6e70) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],32,0x27b70a8546d22ffc) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],33,0x2e1b21385c26c926) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],34,0x4d2c6dfc5ac42aed) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],35,0x53380d139d95b3df) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],36,0x650a73548baf63de) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],37,0x766a0abb3c77b2a8) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],38,0x81c2c92e47edaee6) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],39,0x92722c851482353b) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],40,0xa2bfe8a14cf10364) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],41,0xa81a664bbc423001) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],42,0xc24b8b70d0f89791) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],43,0xc76c51a30654be30) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],44,0xd192e819d6ef5218) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],45,0xd69906245565a910) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],46,0xf40e35855771202a) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],47,0x106aa07032bbd1b8) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],48,0x19a4c116b8d2d0c8) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],49,0x1e376c085141ab53) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],50,0x2748774cdf8eeb99) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],51,0x34b0bcb5e19b48a8) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],52,0x391c0cb3c5c95a63) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],53,0x4ed8aa4ae3418acb) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],54,0x5b9cca4f7763e373) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],55,0x682e6ff3d6b2b8a3) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],56,0x748f82ee5defb2fc) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],57,0x78a5636f43172f60) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],58,0x84c87814a1f0ab72) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],59,0x8cc702081a6439ec) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],60,0x90befffa23631e28) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],61,0xa4506cebde82bde9) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],62,0xbef9a3f7b2c67915) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],63,0xc67178f2e372532b) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],64,0xca273eceea26619c) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],65,0xd186b8c721c0c207) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],66,0xeada7dd6cde0eb1e) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],67,0xf57d4f7fee6ed178) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],68,0x06f067aa72176fba) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],69,0x0a637dc5a2c898a6) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],70,0x113f9804bef90dae) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],71,0x1b710b35131c471b) ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],72,0x28db77f523047d84) ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],73,0x32caab7b40c72493) ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],74,0x3c9ebe0a15c9bebc) ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],75,0x431d67c49c100d4c) ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],76,0x4cc5d4becb3e42b6) ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],77,0x597f299cfc657e2a) ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],78,0x5fcb6fab3ad6faec) ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],79,0x6c44198c4a475817) dig = [] for i, x in enumerate(sha_info['digest']): dig.append( (x + ss[i]) & 0xffffffffffffffff ) sha_info['digest'] = dig def sha_init(): sha_info = new_shaobject() sha_info['digest'] = [ 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179] sha_info['count_lo'] = 0 sha_info['count_hi'] = 0 sha_info['local'] = 0 sha_info['digestsize'] = 64 return sha_info def sha384_init(): sha_info = new_shaobject() sha_info['digest'] = [ 0xcbbb9d5dc1059ed8, 0x629a292a367cd507, 0x9159015a3070dd17, 0x152fecd8f70e5939, 0x67332667ffc00b31, 0x8eb44a8768581511, 0xdb0c2e0d64f98fa7, 0x47b5481dbefa4fa4] sha_info['count_lo'] = 0 sha_info['count_hi'] = 0 sha_info['local'] = 0 sha_info['digestsize'] = 48 return sha_info def getbuf(s): if isinstance(s, str): return s elif isinstance(s, unicode): return str(s) else: return buffer(s) def sha_update(sha_info, buffer): count = len(buffer) buffer_idx = 0 clo = (sha_info['count_lo'] + (count << 3)) & 0xffffffff if clo < sha_info['count_lo']: sha_info['count_hi'] += 1 sha_info['count_lo'] = clo sha_info['count_hi'] += (count >> 29) if sha_info['local']: i = SHA_BLOCKSIZE - sha_info['local'] if i > count: i = count # copy buffer for x in enumerate(buffer[buffer_idx:buffer_idx+i]): sha_info['data'][sha_info['local']+x[0]] = struct.unpack('B', x[1])[0] count -= i buffer_idx += i sha_info['local'] += i if sha_info['local'] == SHA_BLOCKSIZE: sha_transform(sha_info) sha_info['local'] = 0 else: return while count >= SHA_BLOCKSIZE: # copy buffer sha_info['data'] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + SHA_BLOCKSIZE]] count -= SHA_BLOCKSIZE buffer_idx += SHA_BLOCKSIZE sha_transform(sha_info) # copy buffer pos = sha_info['local'] sha_info['data'][pos:pos+count] = [struct.unpack('B',c)[0] for c in buffer[buffer_idx:buffer_idx + count]] sha_info['local'] = count def sha_final(sha_info): lo_bit_count = sha_info['count_lo'] hi_bit_count = sha_info['count_hi'] count = (lo_bit_count >> 3) & 0x7f sha_info['data'][count] = 0x80; count += 1 if count > SHA_BLOCKSIZE - 16: # zero the bytes in data after the count sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count)) sha_transform(sha_info) # zero bytes in data sha_info['data'] = [0] * SHA_BLOCKSIZE else: sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count)) sha_info['data'][112] = 0; sha_info['data'][113] = 0; sha_info['data'][114] = 0; sha_info['data'][115] = 0; sha_info['data'][116] = 0; sha_info['data'][117] = 0; sha_info['data'][118] = 0; sha_info['data'][119] = 0; sha_info['data'][120] = (hi_bit_count >> 24) & 0xff sha_info['data'][121] = (hi_bit_count >> 16) & 0xff sha_info['data'][122] = (hi_bit_count >> 8) & 0xff sha_info['data'][123] = (hi_bit_count >> 0) & 0xff sha_info['data'][124] = (lo_bit_count >> 24) & 0xff sha_info['data'][125] = (lo_bit_count >> 16) & 0xff sha_info['data'][126] = (lo_bit_count >> 8) & 0xff sha_info['data'][127] = (lo_bit_count >> 0) & 0xff sha_transform(sha_info) dig = [] for i in sha_info['digest']: dig.extend([ ((i>>56) & 0xff), ((i>>48) & 0xff), ((i>>40) & 0xff), ((i>>32) & 0xff), ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ]) return ''.join([chr(i) for i in dig]) class sha512(object): digest_size = digestsize = SHA_DIGESTSIZE block_size = SHA_BLOCKSIZE def __init__(self, s=None): self._sha = sha_init() if s: sha_update(self._sha, getbuf(s)) def update(self, s): sha_update(self._sha, getbuf(s)) def digest(self): return sha_final(self._sha.copy())[:self._sha['digestsize']] def hexdigest(self): return ''.join(['%.2x' % ord(i) for i in self.digest()]) def copy(self): new = sha512.__new__(sha512) new._sha = self._sha.copy() return new class sha384(sha512): digest_size = digestsize = 48 def __init__(self, s=None): self._sha = sha384_init() if s: sha_update(self._sha, getbuf(s)) def copy(self): new = sha384.__new__(sha384) new._sha = self._sha.copy() return new if __name__ == "__main__": a_str = "just a test string" assert sha512().hexdigest() == "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e" assert sha512(a_str).hexdigest() == "68be4c6664af867dd1d01c8d77e963d87d77b702400c8fabae355a41b8927a5a5533a7f1c28509bbd65c5f3ac716f33be271fbda0ca018b71a84708c9fae8a53" assert sha512(a_str*7).hexdigest() == "3233acdbfcfff9bff9fc72401d31dbffa62bd24e9ec846f0578d647da73258d9f0879f7fde01fe2cc6516af3f343807fdef79e23d696c923d79931db46bf1819" s = sha512(a_str) s.update(a_str) assert s.hexdigest() == "341aeb668730bbb48127d5531115f3c39d12cb9586a6ca770898398aff2411087cfe0b570689adf328cddeb1f00803acce6737a19f310b53bbdb0320828f75bb"
unlicense
8,364,937,748,310,473,000
49.364583
186
0.569459
false
moreati/ppeg
setup.py
1
1037
#!/usr/bin/env python import io import os from setuptools import setup, Extension def read(fname, encoding='utf-8'): here = os.path.dirname(__file__) with io.open(os.path.join(here, fname), encoding=encoding) as f: return f.read() setup ( name='PPeg', version='0.9.4', description="A Python port of Lua's LPeg pattern matching library", long_description=read('README.rst'), url='https://github.com/moreati/ppeg', author='Alex Willmer', author_email='[email protected]', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2 :: Only', 'Topic :: Text Processing :: General', ], keywords='parsing peg grammar regex', ext_modules = [Extension('_ppeg', ['_ppeg.c', 'lpeg.c']), Extension('_cpeg', ['_cpeg.c'])], py_modules=[ 'PythonImpl', 'pegmatcher', ], )
mit
8,715,464,515,694,957,000
22.568182
71
0.588235
false
tomSny/XStarLogViewer
LogAnalyzer/py2exe/LogAnalyzer.py
3
12537
#!/usr/bin/env python # # A module to analyze and identify any common problems which can be determined from log files # # Initial code by Andrew Chapman ([email protected]), 16th Jan 2014 # # some logging oddities noticed while doing this, to be followed up on: # - tradheli MOT labels Mot1,Mot2,Mot3,Mot4,GGain # - Pixhawk doesn't output one of the FMT labels... forget which one # - MAG offsets seem to be constant (only seen data on Pixhawk) # - MAG offsets seem to be cast to int before being output? (param is -84.67, logged as -84) # - copter+plane use 'V' in their vehicle type/version/build line, rover uses lower case 'v'. Copter+Rover give a build number, plane does not # - CTUN.ThrOut on copter is 0-1000, on plane+rover it is 0-100 # TODO: add test for noisy baro values # TODO: support loading binary log files (use Tridge's mavlogdump?) import DataflashLog import pprint # temp import imp import glob import inspect import os, sys import argparse import datetime import time from xml.sax.saxutils import escape from VehicleType import VehicleType class TestResult(object): '''all tests return a standardized result type''' class StatusType: # NA means not applicable for this log (e.g. copter tests against a plane log), UNKNOWN means it is missing data required for the test GOOD, FAIL, WARN, UNKNOWN, NA = range(5) status = None statusMessage = "" # can be multi-line class Test(object): '''base class to be inherited by log tests. Each test should be quite granular so we have lots of small tests with clear results''' def __init__(self): self.name = "" self.result = None # will be an instance of TestResult after being run self.execTime = None self.enable = True def run(self, logdata, verbose=False): pass class TestSuite(object): '''registers test classes, loading using a basic plugin architecture, and can run them all in one run() operation''' def __init__(self): self.tests = [] self.logfile = None self.logdata = None # dynamically load in Test subclasses from the 'tests' folder # to prevent one being loaded, move it out of that folder, or set that test's .enable attribute to False dirName = os.path.dirname(os.path.abspath(__file__)) dirName = dirName.replace('library.zip','') testScripts = glob.glob(dirName + '/tests/*.py') testClasses = [] for script in testScripts: m = imp.load_source("m",script) for name, obj in inspect.getmembers(m, inspect.isclass): if name not in testClasses and inspect.getsourcefile(obj) == script: testClasses.append(name) self.tests.append(obj()) # and here's an example of explicitly loading a Test class if you wanted to do that # m = imp.load_source("m", dirName + '/tests/TestBadParams.py') # self.tests.append(m.TestBadParams()) def run(self, logdata, verbose): '''run all registered tests in a single call, gathering execution timing info''' self.logdata = logdata if 'GPS' not in self.logdata.channels and 'GPS2' in self.logdata.channels: # *cough* self.logdata.channels['GPS'] = self.logdata.channels['GPS2'] self.logfile = logdata.filename for test in self.tests: # run each test in turn, gathering timing info if test.enable: startTime = time.time() test.run(self.logdata, verbose) # RUN THE TEST endTime = time.time() test.execTime = 1000 * (endTime-startTime) def outputPlainText(self, outputStats): '''output test results in plain text''' print 'Dataflash log analysis report for file: ' + self.logfile print 'Log size: %.2fmb (%d lines)' % (self.logdata.filesizeKB / 1024.0, self.logdata.lineCount) print 'Log duration: %s' % str(datetime.timedelta(seconds=self.logdata.durationSecs)) + '\n' if self.logdata.vehicleType == VehicleType.Copter and self.logdata.getCopterType(): print 'Vehicle Type: %s (%s)' % (self.logdata.vehicleTypeString, self.logdata.getCopterType()) else: print 'Vehicle Type: %s' % self.logdata.vehicleTypeString print 'Firmware Version: %s (%s)' % (self.logdata.firmwareVersion, self.logdata.firmwareHash) print 'Hardware: %s' % self.logdata.hardwareType print 'Free RAM: %s' % self.logdata.freeRAM if self.logdata.skippedLines: print "\nWARNING: %d malformed log lines skipped during read" % self.logdata.skippedLines print '\n' print "Test Results:" for test in self.tests: if not test.enable: continue statusMessageFirstLine = test.result.statusMessage.strip('\n\r').split('\n')[0] statusMessageExtra = test.result.statusMessage.strip('\n\r').split('\n')[1:] execTime = "" if outputStats: execTime = " (%6.2fms)" % (test.execTime) if test.result.status == TestResult.StatusType.GOOD: print " %20s: GOOD %-55s%s" % (test.name, statusMessageFirstLine, execTime) elif test.result.status == TestResult.StatusType.FAIL: print " %20s: FAIL %-55s%s [GRAPH]" % (test.name, statusMessageFirstLine, execTime) elif test.result.status == TestResult.StatusType.WARN: print " %20s: WARN %-55s%s [GRAPH]" % (test.name, statusMessageFirstLine, execTime) elif test.result.status == TestResult.StatusType.NA: # skip any that aren't relevant for this vehicle/hardware/etc continue else: print " %20s: UNKNOWN %-55s%s" % (test.name, statusMessageFirstLine, execTime) #if statusMessageExtra: for line in statusMessageExtra: print " %29s %s" % ("",line) print '\n' print 'The Log Analyzer is currently BETA code.\nFor any support or feedback on the log analyzer please email Andrew Chapman ([email protected])' print '\n' def outputXML(self, xmlFile): '''output test results to an XML file''' # open the file for writing xml = None try: if xmlFile == '-': xml = sys.stdout else: xml = open(xmlFile, 'w') except: sys.stderr.write("Error opening output xml file: %s" % xmlFile) sys.exit(1) # output header info print >>xml, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" print >>xml, "<loganalysis>" print >>xml, "<header>" print >>xml, " <logfile>" + escape(self.logfile) + "</logfile>" print >>xml, " <sizekb>" + escape(`self.logdata.filesizeKB`) + "</sizekb>" print >>xml, " <sizelines>" + escape(`self.logdata.lineCount`) + "</sizelines>" print >>xml, " <duration>" + escape(str(datetime.timedelta(seconds=self.logdata.durationSecs))) + "</duration>" print >>xml, " <vehicletype>" + escape(self.logdata.vehicleTypeString) + "</vehicletype>" if self.logdata.vehicleType == VehicleType.Copter and self.logdata.getCopterType(): print >>xml, " <coptertype>" + escape(self.logdata.getCopterType()) + "</coptertype>" print >>xml, " <firmwareversion>" + escape(self.logdata.firmwareVersion) + "</firmwareversion>" print >>xml, " <firmwarehash>" + escape(self.logdata.firmwareHash) + "</firmwarehash>" print >>xml, " <hardwaretype>" + escape(self.logdata.hardwareType) + "</hardwaretype>" print >>xml, " <freemem>" + escape(`self.logdata.freeRAM`) + "</freemem>" print >>xml, " <skippedlines>" + escape(`self.logdata.skippedLines`) + "</skippedlines>" print >>xml, "</header>" # output parameters print >>xml, "<params>" for param, value in self.logdata.parameters.items(): print >>xml, " <param name=\"%s\" value=\"%s\" />" % (param,escape(`value`)) print >>xml, "</params>" # output test results print >>xml, "<results>" for test in self.tests: if not test.enable: continue print >>xml, " <result>" if test.result.status == TestResult.StatusType.GOOD: print >>xml, " <name>" + escape(test.name) + "</name>" print >>xml, " <status>GOOD</status>" print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>" elif test.result.status == TestResult.StatusType.FAIL: print >>xml, " <name>" + escape(test.name) + "</name>" print >>xml, " <status>FAIL</status>" print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>" print >>xml, " <data>(test data will be embeded here at some point)</data>" elif test.result.status == TestResult.StatusType.WARN: print >>xml, " <name>" + escape(test.name) + "</name>" print >>xml, " <status>WARN</status>" print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>" print >>xml, " <data>(test data will be embeded here at some point)</data>" elif test.result.status == TestResult.StatusType.NA: print >>xml, " <name>" + escape(test.name) + "</name>" print >>xml, " <status>NA</status>" else: print >>xml, " <name>" + escape(test.name) + "</name>" print >>xml, " <status>UNKNOWN</status>" print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>" print >>xml, " </result>" print >>xml, "</results>" print >>xml, "</loganalysis>" xml.close() def main(): dirName = os.path.dirname(os.path.abspath(__file__)) # deal with command line arguments parser = argparse.ArgumentParser(description='Analyze an APM Dataflash log for known issues') parser.add_argument('logfile', type=argparse.FileType('r'), help='path to Dataflash log file (or - for stdin)') parser.add_argument('-f', '--format', metavar='', type=str, action='store', choices=['bin','log','auto'], default='auto', help='log file format: \'bin\',\'log\' or \'auto\'') parser.add_argument('-q', '--quiet', metavar='', action='store_const', const=True, help='quiet mode, do not print results') parser.add_argument('-p', '--profile', metavar='', action='store_const', const=True, help='output performance profiling data') parser.add_argument('-s', '--skip_bad', metavar='', action='store_const', const=True, help='skip over corrupt dataflash lines') parser.add_argument('-e', '--empty', metavar='', action='store_const', const=True, help='run an initial check for an empty log') parser.add_argument('-x', '--xml', type=str, metavar='XML file', nargs='?', const='', default='', help='write output to specified XML file (or - for stdout)') parser.add_argument('-v', '--verbose', metavar='', action='store_const', const=True, help='verbose output') args = parser.parse_args() # load the log startTime = time.time() logdata = DataflashLog.DataflashLog(args.logfile.name, format=args.format, ignoreBadlines=args.skip_bad) # read log endTime = time.time() if args.profile: print "Log file read time: %.2f seconds" % (endTime-startTime) # check for empty log if requested if args.empty: emptyErr = DataflashLog.DataflashLogHelper.isLogEmpty(logdata) if emptyErr: sys.stderr.write("Empty log file: %s, %s" % (logdata.filename, emptyErr)) sys.exit(1) #run the tests, and gather timings testSuite = TestSuite() startTime = time.time() testSuite.run(logdata, args.verbose) # run tests endTime = time.time() if args.profile: print "Test suite run time: %.2f seconds" % (endTime-startTime) # deal with output if not args.quiet: testSuite.outputPlainText(args.profile) if args.xml: testSuite.outputXML(args.xml) if not args.quiet: print "XML output written to file: %s\n" % args.xml if __name__ == "__main__": main()
gpl-3.0
1,564,750,051,588,259,800
46.851145
179
0.602457
false
ubic135/odoo-design
addons/crm_partner_assign/__openerp__.py
114
2453
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Partner Assignation & Geolocation', 'version': '1.0', 'category': 'Customer Relationship Management', 'description': """ This is the module used by OpenERP SA to redirect customers to its partners, based on geolocation. ====================================================================================================== This modules lets you geolocate Leads, Opportunities and Partners based on their address. Once the coordinates of the Lead/Opportunity is known, they can be automatically assigned to an appropriate local partner, based on the distance and the weight that was assigned to the partner. """, 'author': 'OpenERP SA', 'depends': ['base_geolocalize', 'crm', 'account', 'portal'], 'data': [ 'security/ir.model.access.csv', 'res_partner_view.xml', 'wizard/crm_forward_to_partner_view.xml', 'wizard/crm_channel_interested_view.xml', 'crm_lead_view.xml', 'crm_partner_assign_data.xml', 'crm_portal_view.xml', 'portal_data.xml', 'report/crm_lead_report_view.xml', 'report/crm_partner_report_view.xml', ], 'demo': [ 'res_partner_demo.xml', 'crm_lead_demo.xml' ], 'test': ['test/partner_assign.yml'], 'installable': True, 'auto_install': False, 'images': ['images/partner_geo_localization.jpeg','images/partner_grade.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-5,485,563,293,609,356,000
39.883333
103
0.602528
false
jviada/QuantEcon.py
quantecon/tests/test_lqcontrol.py
7
2299
""" Author: Chase Coleman Filename: test_lqcontrol Tests for lqcontrol.py file """ import sys import os import unittest import numpy as np from scipy.linalg import LinAlgError from numpy.testing import assert_allclose from quantecon.lqcontrol import LQ class TestLQControl(unittest.TestCase): def setUp(self): # Initial Values q = 1. r = 1. rf = 1. a = .95 b = -1. c = .05 beta = .95 T = 1 self.lq_scalar = LQ(q, r, a, b, C=c, beta=beta, T=T, Rf=rf) Q = np.array([[0., 0.], [0., 1]]) R = np.array([[1., 0.], [0., 0]]) RF = np.eye(2) * 100 A = np.ones((2, 2)) * .95 B = np.ones((2, 2)) * -1 self.lq_mat = LQ(Q, R, A, B, beta=beta, T=T, Rf=RF) def tearDown(self): del self.lq_scalar del self.lq_mat def test_scalar_sequences(self): lq_scalar = self.lq_scalar x0 = 2 x_seq, u_seq, w_seq = lq_scalar.compute_sequence(x0) # Solution found by hand u_0 = (-2*lq_scalar.A*lq_scalar.B*lq_scalar.beta*lq_scalar.Rf) / \ (2*lq_scalar.Q+lq_scalar.beta*lq_scalar.Rf*2*lq_scalar.B**2) \ * x0 x_1 = lq_scalar.A * x0 + lq_scalar.B * u_0 + w_seq[0, -1] assert_allclose(u_0, u_seq, rtol=1e-4) assert_allclose(x_1, x_seq[0, -1], rtol=1e-4) def test_mat_sequences(self): lq_mat = self.lq_mat x0 = np.random.randn(2) * 25 x_seq, u_seq, w_seq = lq_mat.compute_sequence(x0) assert_allclose(np.sum(u_seq), .95 * np.sum(x0), atol=1e-3) assert_allclose(x_seq[:, -1], np.zeros_like(x0), atol=1e-3) def test_stationary_mat(self): x0 = np.random.randn(2) * 25 lq_mat = self.lq_mat P, F, d = lq_mat.stationary_values() f_answer = np.array([[-.95, -.95], [0., 0.]]) p_answer = np.array([[1., 0], [0., 0.]]) val_func_lq = np.dot(x0, P).dot(x0) val_func_answer = x0[0]**2 assert_allclose(f_answer, F, atol=1e-3) assert_allclose(val_func_lq, val_func_answer, atol=1e-3) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestLQControl) unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite)
bsd-3-clause
5,212,364,272,007,663,000
23.457447
74
0.541975
false
4dn-dcic/fourfront
src/encoded/upgrade/file.py
2
2394
from snovault import ( upgrade_step, ) @upgrade_step('file_fastq', '1', '2') @upgrade_step('file_calibration', '1', '2') @upgrade_step('file_microscopy', '1', '2') @upgrade_step('file_processed', '1', '2') @upgrade_step('file_reference', '1', '2') def file_1_2(value, system): file_format = value.get('file_format') formats = system['registry']['collections']['FileFormat'] format_item = formats.get(file_format) fuuid = None try: fuuid = str(format_item.uuid) except AttributeError: pass if not fuuid: other_format = formats.get('other') fuuid = str(other_format.uuid) note = value.get('notes', '') note = note + ' FILE FORMAT: ' + file_format value['notes'] = note value['file_format'] = fuuid # need to also check for extra files to upgrade_step extras = value.get('extra_files') if extras: for i, extra in enumerate(extras): eformat = extra.get('file_format') eformat_item = formats.get(eformat) efuuid = None try: efuuid = str(eformat_item.uuid) except AttributeError: pass if not efuuid: other_format = formats.get('other') efuuid = str(other_format.uuid) note = value.get('notes', '') note = note + ' EXTRA FILE FORMAT: ' + str(i) + '-' + eformat value['notes'] = note value['extra_files'][i]['file_format'] = efuuid @upgrade_step('file_processed', '2', '3') @upgrade_step('file_vistrack', '1', '2') def file_track_data_upgrade(value, system): field_map = { "dataset_type": "override_experiment_type", "assay_info": "override_assay_info", "replicate_identifiers": "override_replicate_info", "biosource_name": "override_biosource_name", "experiment_bucket": "override_experiment_bucket", "project_lab": "override_lab_name" } for oldprop, newprop in field_map.items(): oldpropval = value.get(oldprop) if oldpropval: if oldprop == 'replicate_identifiers': if len(oldpropval) > 1: oldpropval = 'merged replicates' else: oldpropval = oldpropval[0] value[newprop] = oldpropval del value[oldprop]
mit
-838,971,727,424,048,300
34.205882
77
0.559315
false
ContextLogic/luigi
test/contrib/bigquery_test.py
5
6811
# -*- coding: utf-8 -*- # # Copyright 2015 Twitter Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ These are the unit tests for the BigQuery-luigi binding. """ import luigi from luigi.contrib import bigquery from helpers import unittest from mock import MagicMock PROJECT_ID = 'projectid' DATASET_ID = 'dataset' class TestRunQueryTask(bigquery.BigQueryRunQueryTask): client = MagicMock() query = ''' SELECT 'hello' as field1, 2 as field2 ''' table = luigi.Parameter() def output(self): return bigquery.BigQueryTarget(PROJECT_ID, DATASET_ID, self.table, client=self.client) class TestRunQueryTaskDontFlattenResults(TestRunQueryTask): @property def flatten_results(self): return False class TestRunQueryTaskWithRequires(bigquery.BigQueryRunQueryTask): client = MagicMock() table = luigi.Parameter() def requires(self): return TestRunQueryTask(table='table1') @property def query(self): requires = self.requires().output().table dataset = requires.dataset_id table = requires.table_id return 'SELECT * FROM [{dataset}.{table}]'.format(dataset=dataset, table=table) def output(self): return bigquery.BigQueryTarget(PROJECT_ID, DATASET_ID, self.table, client=self.client) class TestRunQueryTaskWithUdf(bigquery.BigqueryRunQueryTask): client = MagicMock() table = luigi.Parameter() @property def udf_resource_uris(self): return ["gs://test/file1.js", "gs://test/file2.js"] @property def query(self): return 'SELECT 1' def output(self): return bigquery.BigqueryTarget(PROJECT_ID, DATASET_ID, self.table, client=self.client) class TestRunQueryTaskWithoutLegacySql(bigquery.BigqueryRunQueryTask): client = MagicMock() table = luigi.Parameter() @property def use_legacy_sql(self): return False @property def query(self): return 'SELECT 1' def output(self): return bigquery.BigqueryTarget(PROJECT_ID, DATASET_ID, self.table, client=self.client) class TestExternalBigQueryTask(bigquery.ExternalBigQueryTask): client = MagicMock() def output(self): return bigquery.BigQueryTarget(PROJECT_ID, DATASET_ID, 'table1', client=self.client) class TestCreateViewTask(bigquery.BigQueryCreateViewTask): client = MagicMock() view = '''SELECT * FROM table LIMIT 10''' def output(self): return bigquery.BigQueryTarget(PROJECT_ID, DATASET_ID, 'view1', client=self.client) class BigQueryTest(unittest.TestCase): def test_bulk_complete(self): parameters = ['table1', 'table2'] client = MagicMock() client.dataset_exists.return_value = True client.list_tables.return_value = ['table2', 'table3'] TestRunQueryTask.client = client complete = list(TestRunQueryTask.bulk_complete(parameters)) self.assertEqual(complete, ['table2']) def test_dataset_doesnt_exist(self): client = MagicMock() client.dataset_exists.return_value = False TestRunQueryTask.client = client complete = list(TestRunQueryTask.bulk_complete(['table1'])) self.assertEqual(complete, []) def test_query_property(self): task = TestRunQueryTask(table='table2') task.client = MagicMock() task.run() (_, job), _ = task.client.run_job.call_args query = job['configuration']['query']['query'] self.assertEqual(query, TestRunQueryTask.query) def test_override_query_property(self): task = TestRunQueryTaskWithRequires(table='table2') task.client = MagicMock() task.run() (_, job), _ = task.client.run_job.call_args query = job['configuration']['query']['query'] expected_table = '[' + DATASET_ID + '.' + task.requires().output().table.table_id + ']' self.assertIn(expected_table, query) self.assertEqual(query, task.query) def test_query_udf(self): task = TestRunQueryTaskWithUdf(table='table2') task.client = MagicMock() task.run() (_, job), _ = task.client.run_job.call_args udfs = [ {'resourceUri': 'gs://test/file1.js'}, {'resourceUri': 'gs://test/file2.js'}, ] self.assertEqual(job['configuration']['query']['userDefinedFunctionResources'], udfs) def test_query_with_legacy_sql(self): task = TestRunQueryTask(table='table2') task.client = MagicMock() task.run() (_, job), _ = task.client.run_job.call_args self.assertEqual(job['configuration']['query']['useLegacySql'], True) def test_query_without_legacy_sql(self): task = TestRunQueryTaskWithoutLegacySql(table='table2') task.client = MagicMock() task.run() (_, job), _ = task.client.run_job.call_args self.assertEqual(job['configuration']['query']['useLegacySql'], False) def test_external_task(self): task = TestExternalBigQueryTask() self.assertIsInstance(task, luigi.ExternalTask) self.assertIsInstance(task, bigquery.MixinBigQueryBulkComplete) def test_create_view(self): task = TestCreateViewTask() task.client.get_view.return_value = None self.assertFalse(task.complete()) task.run() (table, view), _ = task.client.update_view.call_args self.assertEqual(task.output().table, table) self.assertEqual(task.view, view) def test_update_view(self): task = TestCreateViewTask() task.client.get_view.return_value = 'some other query' self.assertFalse(task.complete()) task.run() (table, view), _ = task.client.update_view.call_args self.assertEqual(task.output().table, table) self.assertEqual(task.view, view) def test_view_completed(self): task = TestCreateViewTask() task.client.get_view.return_value = task.view self.assertTrue(task.complete()) def test_flatten_results(self): task = TestRunQueryTask(table='table3') self.assertTrue(task.flatten_results) def test_dont_flatten_results(self): task = TestRunQueryTaskDontFlattenResults(table='table3') self.assertFalse(task.flatten_results)
apache-2.0
3,045,274,923,417,055,000
29.004405
95
0.661283
false
aricchen/openHR
openerp/addons/portal_project/tests/__init__.py
170
1124
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import test_access_rights checks = [ test_access_rights, ] # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
4,672,943,897,118,079,000
39.142857
78
0.618327
false
public-ink/public-ink
server/appengine/lib/numpy/core/tests/test_function_base.py
16
11429
from __future__ import division, absolute_import, print_function from numpy import (logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan, ndarray, sqrt, nextafter) from numpy.testing import ( TestCase, run_module_suite, assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose, suppress_warnings ) class PhysicalQuantity(float): def __new__(cls, value): return float.__new__(cls, value) def __add__(self, x): assert_(isinstance(x, PhysicalQuantity)) return PhysicalQuantity(float(x) + float(self)) __radd__ = __add__ def __sub__(self, x): assert_(isinstance(x, PhysicalQuantity)) return PhysicalQuantity(float(self) - float(x)) def __rsub__(self, x): assert_(isinstance(x, PhysicalQuantity)) return PhysicalQuantity(float(x) - float(self)) def __mul__(self, x): return PhysicalQuantity(float(x) * float(self)) __rmul__ = __mul__ def __div__(self, x): return PhysicalQuantity(float(self) / float(x)) def __rdiv__(self, x): return PhysicalQuantity(float(x) / float(self)) class PhysicalQuantity2(ndarray): __array_priority__ = 10 class TestLogspace(TestCase): def test_basic(self): y = logspace(0, 6) assert_(len(y) == 50) y = logspace(0, 6, num=100) assert_(y[-1] == 10 ** 6) y = logspace(0, 6, endpoint=0) assert_(y[-1] < 10 ** 6) y = logspace(0, 6, num=7) assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) def test_dtype(self): y = logspace(0, 6, dtype='float32') assert_equal(y.dtype, dtype('float32')) y = logspace(0, 6, dtype='float64') assert_equal(y.dtype, dtype('float64')) y = logspace(0, 6, dtype='int32') assert_equal(y.dtype, dtype('int32')) def test_physical_quantities(self): a = PhysicalQuantity(1.0) b = PhysicalQuantity(5.0) assert_equal(logspace(a, b), logspace(1.0, 5.0)) def test_subclass(self): a = array(1).view(PhysicalQuantity2) b = array(7).view(PhysicalQuantity2) ls = logspace(a, b) assert type(ls) is PhysicalQuantity2 assert_equal(ls, logspace(1.0, 7.0)) ls = logspace(a, b, 1) assert type(ls) is PhysicalQuantity2 assert_equal(ls, logspace(1.0, 7.0, 1)) class TestGeomspace(TestCase): def test_basic(self): y = geomspace(1, 1e6) assert_(len(y) == 50) y = geomspace(1, 1e6, num=100) assert_(y[-1] == 10 ** 6) y = geomspace(1, 1e6, endpoint=False) assert_(y[-1] < 10 ** 6) y = geomspace(1, 1e6, num=7) assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) y = geomspace(8, 2, num=3) assert_allclose(y, [8, 4, 2]) assert_array_equal(y.imag, 0) y = geomspace(-1, -100, num=3) assert_array_equal(y, [-1, -10, -100]) assert_array_equal(y.imag, 0) y = geomspace(-100, -1, num=3) assert_array_equal(y, [-100, -10, -1]) assert_array_equal(y.imag, 0) def test_complex(self): # Purely imaginary y = geomspace(1j, 16j, num=5) assert_allclose(y, [1j, 2j, 4j, 8j, 16j]) assert_array_equal(y.real, 0) y = geomspace(-4j, -324j, num=5) assert_allclose(y, [-4j, -12j, -36j, -108j, -324j]) assert_array_equal(y.real, 0) y = geomspace(1+1j, 1000+1000j, num=4) assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j]) y = geomspace(-1+1j, -1000+1000j, num=4) assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j]) # Logarithmic spirals y = geomspace(-1, 1, num=3, dtype=complex) assert_allclose(y, [-1, 1j, +1]) y = geomspace(0+3j, -3+0j, 3) assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) y = geomspace(0+3j, 3+0j, 3) assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j]) y = geomspace(-3+0j, 0-3j, 3) assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j]) y = geomspace(0+3j, -3+0j, 3) assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) y = geomspace(-2-3j, 5+7j, 7) assert_allclose(y, [-2-3j, -0.29058977-4.15771027j, 2.08885354-4.34146838j, 4.58345529-3.16355218j, 6.41401745-0.55233457j, 6.75707386+3.11795092j, 5+7j]) # Type promotion should prevent the -5 from becoming a NaN y = geomspace(3j, -5, 2) assert_allclose(y, [3j, -5]) y = geomspace(-5, 3j, 2) assert_allclose(y, [-5, 3j]) def test_dtype(self): y = geomspace(1, 1e6, dtype='float32') assert_equal(y.dtype, dtype('float32')) y = geomspace(1, 1e6, dtype='float64') assert_equal(y.dtype, dtype('float64')) y = geomspace(1, 1e6, dtype='int32') assert_equal(y.dtype, dtype('int32')) # Native types y = geomspace(1, 1e6, dtype=float) assert_equal(y.dtype, dtype('float_')) y = geomspace(1, 1e6, dtype=complex) assert_equal(y.dtype, dtype('complex')) def test_array_scalar(self): lim1 = array([120, 100], dtype="int8") lim2 = array([-120, -100], dtype="int8") lim3 = array([1200, 1000], dtype="uint16") t1 = geomspace(lim1[0], lim1[1], 5) t2 = geomspace(lim2[0], lim2[1], 5) t3 = geomspace(lim3[0], lim3[1], 5) t4 = geomspace(120.0, 100.0, 5) t5 = geomspace(-120.0, -100.0, 5) t6 = geomspace(1200.0, 1000.0, 5) # t3 uses float32, t6 uses float64 assert_allclose(t1, t4, rtol=1e-2) assert_allclose(t2, t5, rtol=1e-2) assert_allclose(t3, t6, rtol=1e-5) def test_physical_quantities(self): a = PhysicalQuantity(1.0) b = PhysicalQuantity(5.0) assert_equal(geomspace(a, b), geomspace(1.0, 5.0)) def test_subclass(self): a = array(1).view(PhysicalQuantity2) b = array(7).view(PhysicalQuantity2) gs = geomspace(a, b) assert type(gs) is PhysicalQuantity2 assert_equal(gs, geomspace(1.0, 7.0)) gs = geomspace(a, b, 1) assert type(gs) is PhysicalQuantity2 assert_equal(gs, geomspace(1.0, 7.0, 1)) def test_bounds(self): assert_raises(ValueError, geomspace, 0, 10) assert_raises(ValueError, geomspace, 10, 0) assert_raises(ValueError, geomspace, 0, 0) class TestLinspace(TestCase): def test_basic(self): y = linspace(0, 10) assert_(len(y) == 50) y = linspace(2, 10, num=100) assert_(y[-1] == 10) y = linspace(2, 10, endpoint=0) assert_(y[-1] < 10) assert_raises(ValueError, linspace, 0, 10, num=-1) def test_corner(self): y = list(linspace(0, 1, 1)) assert_(y == [0.0], y) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*safely interpreted as an integer") y = list(linspace(0, 1, 2.5)) assert_(y == [0.0, 1.0]) def test_type(self): t1 = linspace(0, 1, 0).dtype t2 = linspace(0, 1, 1).dtype t3 = linspace(0, 1, 2).dtype assert_equal(t1, t2) assert_equal(t2, t3) def test_dtype(self): y = linspace(0, 6, dtype='float32') assert_equal(y.dtype, dtype('float32')) y = linspace(0, 6, dtype='float64') assert_equal(y.dtype, dtype('float64')) y = linspace(0, 6, dtype='int32') assert_equal(y.dtype, dtype('int32')) def test_array_scalar(self): lim1 = array([-120, 100], dtype="int8") lim2 = array([120, -100], dtype="int8") lim3 = array([1200, 1000], dtype="uint16") t1 = linspace(lim1[0], lim1[1], 5) t2 = linspace(lim2[0], lim2[1], 5) t3 = linspace(lim3[0], lim3[1], 5) t4 = linspace(-120.0, 100.0, 5) t5 = linspace(120.0, -100.0, 5) t6 = linspace(1200.0, 1000.0, 5) assert_equal(t1, t4) assert_equal(t2, t5) assert_equal(t3, t6) def test_complex(self): lim1 = linspace(1 + 2j, 3 + 4j, 5) t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j]) lim2 = linspace(1j, 10, 5) t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j]) assert_equal(lim1, t1) assert_equal(lim2, t2) def test_physical_quantities(self): a = PhysicalQuantity(0.0) b = PhysicalQuantity(1.0) assert_equal(linspace(a, b), linspace(0.0, 1.0)) def test_subclass(self): a = array(0).view(PhysicalQuantity2) b = array(1).view(PhysicalQuantity2) ls = linspace(a, b) assert type(ls) is PhysicalQuantity2 assert_equal(ls, linspace(0.0, 1.0)) ls = linspace(a, b, 1) assert type(ls) is PhysicalQuantity2 assert_equal(ls, linspace(0.0, 1.0, 1)) def test_array_interface(self): # Regression test for https://github.com/numpy/numpy/pull/6659 # Ensure that start/stop can be objects that implement # __array_interface__ and are convertible to numeric scalars class Arrayish(object): """ A generic object that supports the __array_interface__ and hence can in principle be converted to a numeric scalar, but is not otherwise recognized as numeric, but also happens to support multiplication by floats. Data should be an object that implements the buffer interface, and contains at least 4 bytes. """ def __init__(self, data): self._data = data @property def __array_interface__(self): # Ideally should be `'shape': ()` but the current interface # does not allow that return {'shape': (1,), 'typestr': '<i4', 'data': self._data, 'version': 3} def __mul__(self, other): # For the purposes of this test any multiplication is an # identity operation :) return self one = Arrayish(array(1, dtype='<i4')) five = Arrayish(array(5, dtype='<i4')) assert_equal(linspace(one, five), linspace(1, 5)) def test_denormal_numbers(self): # Regression test for gh-5437. Will probably fail when compiled # with ICC, which flushes denormals to zero for ftype in sctypes['float']: stop = nextafter(ftype(0), ftype(1)) * 5 # A denormal number assert_(any(linspace(0, stop, 10, endpoint=False, dtype=ftype))) def test_equivalent_to_arange(self): for j in range(1000): assert_equal(linspace(0, j, j+1, dtype=int), arange(j+1, dtype=int)) def test_retstep(self): y = linspace(0, 1, 2, retstep=True) assert_(isinstance(y, tuple) and len(y) == 2) for num in (0, 1): for ept in (False, True): y = linspace(0, 1, num, endpoint=ept, retstep=True) assert_(isinstance(y, tuple) and len(y) == 2 and len(y[0]) == num and isnan(y[1]), 'num={0}, endpoint={1}'.format(num, ept)) if __name__ == "__main__": run_module_suite()
gpl-3.0
8,972,132,414,561,593,000
34.166154
80
0.549392
false
ismael-liceras/php-invaders
gameengine.py
1
12699
import pygame from pygame.locals import * from player import Player from stagegenerator import StageGenerator from gamestatus import GameStatus from gameconfig import GameConfig class GameEngine(): def __init__(self, modes=None): pygame.init() self.screen = pygame.display.set_mode((800, 600)) #Modes self.cheater_mode = False if modes is not None and 'cheater' in modes and modes['cheater'] is True: self.cheater_mode = True # Stage Generator self.stage_generator = StageGenerator() # Game Status self.game_status = GameStatus() pygame.display.set_caption('PHP Invaders') pygame.mouse.set_visible(0) self.stage_quantity = GameConfig.get_stage_quantity() self.clock = pygame.time.Clock() self.enemy_box = None #Screens self.screen_type_data = {"mainmenu": 1, "playing": 2, "pause": 3, "waiting4stage": 4, "gameover": 5} self.screen_type = None #Sprites self.sprites = { 'friendly_fire': pygame.sprite.RenderPlain(), 'enemy_fire': pygame.sprite.RenderPlain(), 'enemies': pygame.sprite.RenderPlain(), 'friends': pygame.sprite.RenderPlain(), 'menu_options': pygame.sprite.RenderPlain(), 'special_items': pygame.sprite.RenderPlain(), 'prisoners': pygame.sprite.RenderPlain(), 'others': pygame.sprite.RenderPlain(), } self.player = Player() self.player.add(self.sprites['friends']) #Others self.show_main_stage() self.screen.blit(self.stage_generator.get_background(), (0, 0)) pygame.display.flip() def is_current_screen(self, key): return self.screen_type_data[key] == self.screen_type def set_current_screen(self, key): self.screen_type = self.screen_type_data[key] def clock_tick(self): self.clock.tick(60) def reset_from_gameover(self): self.sprites['enemies'].empty() self.sprites['enemy_fire'].empty() self.sprites['prisoners'].empty() self.player.reset() self.game_status.reset() self.stage_generator.reset() def show_main_stage(self): self.screen_type = self.screen_type_data["mainmenu"] self.sprites['menu_options'] = self.stage_generator.show_main_stage() #Game Status Bar def refresh_status_bar(self): screen = pygame.display.get_surface() offset_y = screen.get_height()-20 pygame.draw.rect(self.stage_generator.get_background(), (0, 0, 0), pygame.Rect(0, screen.get_height() - 25, screen.get_width(), 25), 0); if pygame.font: font = pygame.font.Font(None, 20) fontcolor = (250, 250, 250) status_data = [ ("ver. " + GameConfig.get_version() + " by Ismael Liceras", 600), ("STAGE " + str(self.game_status.get_stage()), 10), ("LIVES " + str(self.game_status.get_lives()), 110), ("TIME " + str(self.game_status.get_time()), 210), ("SCORE " + str(self.game_status.get_score()), 310) ] for data_piece in status_data: text = font.render(data_piece[0], 1, fontcolor) self.stage_generator.get_background().blit(text, (data_piece[1], offset_y)) def handle_lifecycle_events(self, event): if event.type == QUIT: return -1 elif event.type == KEYDOWN and event.key == K_ESCAPE: return -1 elif event.type == KEYDOWN and event.key == K_HOME: self.reset_from_gameover() self.show_main_stage() elif event.type == KEYDOWN and event.key == K_PAUSE: if self.is_current_screen("pause"): self.set_current_screen("playing") self.stage_generator.refresh_background() elif self.is_current_screen("playing"): self.set_current_screen("pause") self.stage_generator.show_pause_banner() def handle_playerops_events(self, event): if event.type == KEYDOWN and not self.is_current_screen('pause'): if event.key == K_LEFT: self.player.go_left() elif event.key == K_RIGHT: self.player.go_right() elif event.key == K_SPACE and not self.is_current_screen('gameover'): shoot = self.player.do_shoot() shoot.add(self.sprites['friendly_fire']) elif event.type == KEYUP and \ ((event.key == K_LEFT and self.player.get_direction() == 'left') or (event.key == K_RIGHT and self.player.get_direction() == 'right')): self.player.stop_flying() def handle_timer_events(self, event): #Game's time if event.type == USEREVENT + 1: if self.is_current_screen("playing"): time = self.game_status.run_1_sec() if time == 0: self.go_to_gameover() #Ready's screen elif event.type == USEREVENT + 2: self.sprites['enemies'], self.sprites['prisoners'],\ self.sprites['enemy_fire'], self.enemy_box = self.stage_generator.start_next_stage() pygame.time.set_timer(USEREVENT+1, 1000) self.set_current_screen('playing') self.stage_generator.refresh_background() def handle_cheat_mode_events(self, event): if event.type == KEYDOWN and self.is_current_screen('playing'): if event.key == K_n: print "Next Stage!" for dead_enemy in self.sprites['enemies']: dead_enemy.add(self.sprites['others']) dead_enemy.remove(self.sprites['enemies']) dead_enemy.kill_enemy() def handle_events(self): for event in pygame.event.get(): # Exit and pause if self.handle_lifecycle_events(event) == -1: return -1 # Player's ops self.handle_playerops_events(event) # Timer's events self.handle_timer_events(event) # Cheat mode events if self.cheater_mode is True: self.handle_cheat_mode_events(event) def check_player2enemies_collision(self): collision = pygame.sprite.groupcollide(self.sprites['friendly_fire'], self.sprites['enemies'], True, False) if len(collision) > 0: key, value = collision.popitem() dead_enemy = value[0] dead_enemy.add(self.sprites['others']) dead_enemy.remove(self.sprites['enemies']) dead_enemy.kill_enemy() self.game_status.add_score(dead_enemy.get_score()) special_item = dead_enemy.drop_special_item() if special_item is not None: special_item.add(self.sprites['special_items']) def check_enemies2player_collision(self): collision = pygame.sprite.groupcollide(self.sprites['enemy_fire'], self.sprites['friends'], True, False) if len(collision) > 0: if self.player.is_invincible() is not True: self.hit_player() def hit_player(self): if self.game_status.remove_life() == 0: self.go_to_gameover() else: self.player.shocked() self.game_status.set_stage_invictus(False) def check_menu_collision(self): collision = \ pygame.sprite.groupcollide(self.sprites['friendly_fire'], self.sprites['menu_options'], True, True) if len(collision) > 0: key, value = collision.popitem() option_choosen = value[0] self.sprites['menu_options'].empty() if option_choosen.get_type_id() == "play": self.set_current_screen('waiting4stage') self.go_to_next_stage() elif option_choosen.get_type_id() == "about": self.stage_generator.refresh_background() self.sprites['menu_options'] = self.stage_generator.show_about_stage() elif option_choosen.get_type_id() == "rules": self.sprites['menu_options'] = self.stage_generator.show_rules_stage() elif option_choosen.get_type_id() == "back": self.stage_generator.refresh_background() self.sprites['menu_options'] = self.stage_generator.show_main_stage() elif option_choosen.get_type_id() == "exit": exit() def check_player2specialitem(self): collision = pygame.sprite.groupcollide(self.sprites['friends'], self.sprites['special_items'], False, True) if len(collision) > 0: #Player pilla objeto especial! key, value = collision.popitem() item = value[0] item.do_action(self) def check_player2prisoners_collision(self): collision = pygame.sprite.groupcollide(self.sprites['friendly_fire'], self.sprites['prisoners'], True, True) if len(collision) > 0: self.hit_player() def check_collisions(self): if self.is_current_screen('playing'): # Player shoots enemies self.check_player2enemies_collision() # Player shoots prisoners self.check_player2prisoners_collision() # Enemies shoot player self.check_enemies2player_collision() elif self.is_current_screen('mainmenu'): self.check_menu_collision() if self.is_current_screen('playing') or self.is_current_screen('waiting4stage'): self.check_player2specialitem() def go_to_next_stage(self, bonus=None): self.set_current_screen("waiting4stage") self.stage_generator.get_ready_to_next_stage(self.game_status.get_score(), self.game_status.get_stage_score(), bonus) self.game_status.reset_to_next_stage(self.stage_generator.get_current_stage()) def check_stage_clear(self): if self.is_current_screen('playing') and len(self.sprites['enemies']) == 0: bonus = self.add_stage_bonus() if self.stage_generator.get_current_stage() >= self.stage_quantity: self.goto_to_victory() else: self.go_to_next_stage(bonus) def add_stage_bonus(self): bonus = {} bonus['time'] = self.game_status.add_bonus_time() if self.game_status.get_stage_invictus(): bonus['invictus'] = self.game_status.add_bonus_invictus() if len(self.sprites['prisoners']) > 0: bonus['prisoners'] = self.game_status.add_bonus_prisoners(len(self.sprites['prisoners'])) return bonus # GameEngine's main method def do_play(self): self.clock_tick() self.check_collisions() if self.is_current_screen('playing'): self.check_stage_clear() self.update_sprites() self.draw_everything() def draw_everything(self): if not self.is_current_screen('mainmenu'): self.refresh_status_bar() self.screen.blit(self.stage_generator.get_background(), (0, 0)) self.sprites['friendly_fire'].draw(self.screen) self.sprites['enemy_fire'].draw(self.screen) self.sprites['enemies'].draw(self.screen) self.sprites['friends'].draw(self.screen) self.sprites['menu_options'].draw(self.screen) self.sprites['special_items'].draw(self.screen) self.sprites['prisoners'].draw(self.screen) self.sprites['others'].draw(self.screen) pygame.display.flip() def update_sprites(self): if not self.is_current_screen('pause'): self.update_enemies() self.sprites['friendly_fire'].update() self.sprites['enemy_fire'].update() self.sprites['enemies'].update() self.sprites['friends'].update() self.sprites['special_items'].update() self.sprites['prisoners'].update() self.sprites['others'].update() def update_enemies(self): if self.enemy_box is not None: self.enemy_box.update() def go_to_gameover(self): self.player.kill_player() self.set_current_screen("gameover") self.stage_generator.refresh_background() self.stage_generator.show_gameover_banner(self.game_status.score, self.game_status.stage) def goto_to_victory(self): self.player.make_winner() self.set_current_screen("gameover") self.stage_generator.refresh_background() self.stage_generator.show_victory_banner(self.game_status.score)
mit
628,601,575,994,094,200
40.368078
144
0.586739
false
AutorestCI/azure-sdk-for-python
azure-mgmt-sql/azure/mgmt/sql/models/location_capabilities.py
2
1723
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class LocationCapabilities(Model): """The capabilities for a location. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The location name. :vartype name: str :ivar status: Azure SQL Database's status for the location. Possible values include: 'Visible', 'Available', 'Default', 'Disabled' :vartype status: str or ~azure.mgmt.sql.models.CapabilityStatus :ivar supported_server_versions: The list of supported server versions. :vartype supported_server_versions: list[~azure.mgmt.sql.models.ServerVersionCapability] """ _validation = { 'name': {'readonly': True}, 'status': {'readonly': True}, 'supported_server_versions': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'status': {'key': 'status', 'type': 'CapabilityStatus'}, 'supported_server_versions': {'key': 'supportedServerVersions', 'type': '[ServerVersionCapability]'}, } def __init__(self): super(LocationCapabilities, self).__init__() self.name = None self.status = None self.supported_server_versions = None
mit
-1,544,947,971,819,445,500
35.659574
109
0.609983
false
Forage/Gramps
gramps/plugins/docgen/rtfdoc.py
1
21285
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2000-2006 Donald N. Allingham # Copyright (C) 2007-2009 Brian G. Matherly # Copyright (C) 2009 Gary Burton # Copyright (C) 2010 Peter Landgren # Copyright (C) 2011 Adam Stein <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # $Id$ #------------------------------------------------------------------------ # # python modules # #------------------------------------------------------------------------ from __future__ import print_function from gramps.gen.const import GRAMPS_LOCALE as glocale _ = glocale.get_translation().gettext import logging LOG = logging.getLogger(".rtfdoc") #------------------------------------------------------------------------ # # Load the base BaseDoc class # #------------------------------------------------------------------------ from gramps.gen.plug.docgen import (BaseDoc, TextDoc, FONT_SERIF, PARA_ALIGN_RIGHT, PARA_ALIGN_CENTER, PARA_ALIGN_JUSTIFY, URL_PATTERN) from gramps.gen.utils.image import image_size, image_actual_size, resize_to_jpeg_buffer from gramps.gen.errors import ReportError from gramps.gen.constfunc import cuni #------------------------------------------------------------------------ # # Set up to make links clickable # #------------------------------------------------------------------------ _CLICKABLE = r'''{\\field{\\*\\fldinst HYPERLINK "\1"}{\\fldrslt \1}}''' #------------------------------------------------------------------------ # # RTF uses a unit called "twips" for its measurements. According to the # RTF specification, 1 point is 20 twips. This routines converts # centimeters to twips # # 2.54 cm/inch 72pts/inch, 20twips/pt # #------------------------------------------------------------------------ def twips(cm): return int(((cm/2.54)*72)+0.5)*20 #------------------------------------------------------------------------ # # Rich Text Format Document interface. The current interface does not # use style sheets. Instead it writes raw formatting. # #------------------------------------------------------------------------ class RTFDoc(BaseDoc,TextDoc): #-------------------------------------------------------------------- # # Opens the file, and writes the header. Builds the color and font # tables. Fonts are chosen using the MS TrueType fonts, since it # is assumed that if you are generating RTF, you are probably # targeting Word. This generator assumes a Western Europe character # set. # #-------------------------------------------------------------------- def open(self,filename): if filename[-4:] != ".rtf": self.filename = filename + ".rtf" else: self.filename = filename try: self.f = open(self.filename,"w") except IOError as msg: errmsg = "%s\n%s" % (_("Could not create %s") % self.filename, msg) raise ReportError(errmsg) except: raise ReportError(_("Could not create %s") % self.filename) style_sheet = self.get_style_sheet() self.f.write( '{\\rtf1\\ansi\\ansicpg1252\\deff0\n' '{\\fonttbl\n' '{\\f0\\froman\\fcharset0\\fprq0 Times New Roman;}\n' '{\\f1\\fswiss\\fcharset0\\fprq0 Arial;}}\n' '{\colortbl\n' ) self.color_map = {} index = 1 self.color_map[(0,0,0)] = 0 self.f.write('\\red0\\green0\\blue0;') for style_name in style_sheet.get_paragraph_style_names(): style = style_sheet.get_paragraph_style(style_name) fgcolor = style.get_font().get_color() bgcolor = style.get_background_color() if fgcolor not in self.color_map: self.color_map[fgcolor] = index self.f.write('\\red%d\\green%d\\blue%d;' % fgcolor) index += 1 if bgcolor not in self.color_map: self.f.write('\\red%d\\green%d\\blue%d;' % bgcolor) self.color_map[bgcolor] = index index += 1 self.f.write('}\n') self.f.write( '\\kerning0\\cf0\\viewkind1' + '\\paperw%d' % twips(self.paper.get_size().get_width()) + '\\paperh%d' % twips(self.paper.get_size().get_height()) + '\\margl%d' % twips(self.paper.get_left_margin()) + '\\margr%d' % twips(self.paper.get_right_margin()) + '\\margt%d' % twips(self.paper.get_top_margin()) + '\\margb%d' % twips(self.paper.get_bottom_margin()) + '\\widowctl\n' ) self.in_table = 0 self.text = "" #-------------------------------------------------------------------- # # Write the closing brace, and close the file. # #-------------------------------------------------------------------- def close(self): self.f.write('}\n') self.f.close() #-------------------------------------------------------------------- # # Force a section page break # #-------------------------------------------------------------------- def end_page(self): self.f.write('\\sbkpage\n') #-------------------------------------------------------------------- # # Starts a paragraph. Instead of using a style sheet, generate the # the style for each paragraph on the fly. Not the ideal, but it # does work. # #-------------------------------------------------------------------- def start_paragraph(self,style_name,leader=None): self.opened = 0 style_sheet = self.get_style_sheet() p = style_sheet.get_paragraph_style(style_name) # build font information f = p.get_font() size = f.get_size()*2 bgindex = self.color_map[p.get_background_color()] fgindex = self.color_map[f.get_color()] if f.get_type_face() == FONT_SERIF: self.font_type = '\\f0' else: self.font_type = '\\f1' self.font_type += '\\fs%d\\cf%d\\cb%d' % (size,fgindex,bgindex) if f.get_bold(): self.font_type += "\\b" if f.get_underline(): self.font_type += "\\ul" if f.get_italic(): self.font_type += "\\i" # build paragraph information if not self.in_table: self.f.write('\\pard') if p.get_alignment() == PARA_ALIGN_RIGHT: self.f.write('\\qr') elif p.get_alignment() == PARA_ALIGN_CENTER: self.f.write('\\qc') self.f.write( '\\ri%d' % twips(p.get_right_margin()) + '\\li%d' % twips(p.get_left_margin()) + '\\fi%d' % twips(p.get_first_indent()) ) if p.get_alignment() == PARA_ALIGN_JUSTIFY: self.f.write('\\qj') if p.get_padding(): self.f.write('\\sa%d' % twips(p.get_padding()/2.0)) if p.get_top_border(): self.f.write('\\brdrt\\brdrs') if p.get_bottom_border(): self.f.write('\\brdrb\\brdrs') if p.get_left_border(): self.f.write('\\brdrl\\brdrs') if p.get_right_border(): self.f.write('\\brdrr\\brdrs') if p.get_first_indent(): self.f.write('\\fi%d' % twips(p.get_first_indent())) if p.get_left_margin(): self.f.write('\\li%d' % twips(p.get_left_margin())) if p.get_right_margin(): self.f.write('\\ri%d' % twips(p.get_right_margin())) if leader: self.opened = 1 self.f.write('\\tx%d' % twips(p.get_left_margin())) self.f.write('{%s ' % self.font_type) self.write_text(leader) self.f.write(self.text) self.text = "" self.f.write('\\tab}') self.opened = 0 #-------------------------------------------------------------------- # # Ends a paragraph. Care has to be taken to make sure that the # braces are closed properly. The self.opened flag is used to indicate # if braces are currently open. If the last write was the end of # a bold-faced phrase, braces may already be closed. # #-------------------------------------------------------------------- def end_paragraph(self): # FIXME: I don't understand why no end paragraph marker is output when # we are inside a table. Since at least version 3.2.2, this seems to mean that # there is no new paragraph after the first line of a table entry. # For example in the birth cell, the first paragraph should be the # description (21 Jan 1900 in London); if there is a note following this, # there is no newline between the description and the note. if not self.in_table: self.f.write(self.text) LOG.debug("end_paragraph: opened: %d write: %s" % (self.opened, self.text + '}' if self.opened else "" + "newline")) if self.opened: self.f.write('}') self.opened = 0 self.f.write('\n\\par') self.text = "" else: if self.text == "": self.write_text(" ") self.text += '}' #-------------------------------------------------------------------- # # Inserts a manual page break # #-------------------------------------------------------------------- def page_break(self): self.f.write('\\page\n') #-------------------------------------------------------------------- # # Starts boldfaced text, enclosed the braces # #-------------------------------------------------------------------- def start_bold(self): LOG.debug("start_bold: opened: %d saved text: %s" % (self.opened, '}' if self.opened else "" + '{%s\\b ' % self.font_type)) if self.opened: self.text += '}' self.text += '{%s\\b ' % self.font_type self.opened = 1 #-------------------------------------------------------------------- # # Ends boldfaced text, closing the braces # #-------------------------------------------------------------------- def end_bold(self): LOG.debug("end_bold: opened: %d saved text: %s" % (self.opened, self.text + '}')) if not self.opened == 1: print(self.opened) raise RuntimeError self.opened = 0 self.text += '}' def start_superscript(self): self.text += '{{\\*\\updnprop5801}\\up10 ' def end_superscript(self): self.text += '}' #-------------------------------------------------------------------- # # Start a table. Grab the table style, and store it. Keep a flag to # indicate that we are in a table. This helps us deal with paragraphs # internal to a table. RTF does not require anything to start a # table, since a table is treated as a bunch of rows. # #-------------------------------------------------------------------- def start_table(self, name,style_name): self.in_table = 1 styles = self.get_style_sheet() self.tbl_style = styles.get_table_style(style_name) #-------------------------------------------------------------------- # # End a table. Turn off the table flag # #-------------------------------------------------------------------- def end_table(self): self.in_table = 0 #-------------------------------------------------------------------- # # Start a row. RTF uses the \trowd to start a row. RTF also specifies # all the cell data after it has specified the cell definitions for # the row. Therefore it is necessary to keep a list of cell contents # that is to be written after all the cells are defined. # #-------------------------------------------------------------------- def start_row(self): self.contents = [] self.cell = 0 self.prev = 0 self.cell_percent = 0.0 self.f.write('\\trowd\n') #-------------------------------------------------------------------- # # End a row. Write the cell contents, separated by the \cell marker, # then terminate the row # #-------------------------------------------------------------------- def end_row(self): self.f.write('{') for line in self.contents: self.f.write(line) self.f.write('\\cell ') self.f.write('}\\pard\\intbl\\row\n') #-------------------------------------------------------------------- # # Start a cell. Dump out the cell specifics, such as borders. Cell # widths are kind of interesting. RTF doesn't specify how wide a cell # is, but rather where it's right edge is in relationship to the # left margin. This means that each cell is the cumlative of the # previous cells plus its own width. # #-------------------------------------------------------------------- def start_cell(self,style_name,span=1): styles = self.get_style_sheet() s = styles.get_cell_style(style_name) self.remain = span -1 if s.get_top_border(): self.f.write('\\clbrdrt\\brdrs\\brdrw10\n') if s.get_bottom_border(): self.f.write('\\clbrdrb\\brdrs\\brdrw10\n') if s.get_left_border(): self.f.write('\\clbrdrl\\brdrs\\brdrw10\n') if s.get_right_border(): self.f.write('\\clbrdrr\\brdrs\\brdrw10\n') table_width = float(self.paper.get_usable_width()) for cell in range(self.cell,self.cell+span): self.cell_percent += float(self.tbl_style.get_column_width(cell)) cell_width = twips((table_width * self.cell_percent)/100.0) self.f.write('\\cellx%d\\pard\intbl\n' % cell_width) self.cell += 1 #-------------------------------------------------------------------- # # End a cell. Save the current text in the content lists, since data # must be saved until all cells are defined. # #-------------------------------------------------------------------- def end_cell(self): self.contents.append(self.text) self.text = "" #-------------------------------------------------------------------- # # Add a photo. Embed the photo in the document. Use the Python # imaging library to load and scale the photo. The image is converted # to JPEG, since it is smaller, and supported by RTF. The data is # dumped as a string of HEX numbers. # #-------------------------------------------------------------------- def add_media_object(self, name, pos, x_cm, y_cm, alt='', style_name=None, crop=None): nx, ny = image_size(name) if (nx, ny) == (0,0): return (act_width, act_height) = image_actual_size(x_cm, y_cm, nx, ny) act_width = twips(act_width) act_height = twips(act_height) size = [act_width, act_height] buf = resize_to_jpeg_buffer(name, size, crop=crop) act_width = size[0] # In case it changed because of cropping or keeping the ratio act_height = size[1] self.f.write('{\*\shppict{\\pict\\jpegblip') self.f.write('\\picwgoal%d\\pichgoal%d\n' % (act_width,act_height)) index = 1 for i in buf: self.f.write('%02x' % ord(i)) if index%32==0: self.f.write('\n') index = index+1 self.f.write('}}\\par\n') if len(alt): self.f.write('%s\n\\par\n' % '\\par'.join(alt)) def write_styled_note(self, styledtext, format, style_name, contains_html=False, links=False): """ Convenience function to write a styledtext to the RTF doc. styledtext : assumed a StyledText object to write format : = 0 : Flowed, = 1 : Preformatted style_name : name of the style to use for default presentation contains_html: bool, the backend should not check if html is present. If contains_html=True, then the textdoc is free to handle that in some way. Eg, a textdoc could remove all tags, or could make sure a link is clickable. RTFDoc prints the html without handling it links: bool, make URLs clickable if True """ text = str(styledtext) self.start_paragraph(style_name) linenb = 1 for line in text.split('\n'): [line, sigcount] = process_spaces(line, format) if sigcount == 0: if self.in_table: # # Add LF when in table as in indiv_complete report self.write_text('\n') self.end_paragraph() self.start_paragraph(style_name) linenb = 1 else: if ( linenb > 1 ): self.write_text('\\line ') self.write_text(line, links=links) linenb += 1 # FIXME: I don't understand why these newlines are necessary. # It may be related to the behaviour of end_paragraph inside tables, and # write_text converting \n to end paragraph. # This code prevents the whole document going wrong, but seems to produce an extra # paragraph mark at the end of each table cell. if self.in_table: # # Add LF when in table as in indiv_complete report self.write_text('\n') self.end_paragraph() #-------------------------------------------------------------------- # # Writes text. If braces are not currently open, open them. Loop # character by character (terribly inefficient, but it works). If a # character is 8 bit (>127), convert it to a hex representation in # the form of \`XX. Make sure to escape braces. # #-------------------------------------------------------------------- def write_text(self, text, mark=None, links=False): # Convert to unicode, just in case it's not. Fix of bug 2449. text = cuni(text) text = text.replace('\n','\n\\par ') LOG.debug("write_text: opened: %d input text: %s" % (self.opened, text)) if self.opened == 0: self.opened = 1 self.text += '{%s ' % self.font_type for i in text: if ord(i) > 127: if ord(i) < 256: self.text += '\\\'%2x' % ord(i) else: # If (uni)code with more than 8 bits: # RTF req valus in decimal, not hex. self.text += '\\uc1\\u%d\\uc0' % ord(i) elif i == '{' or i == '}' : self.text += '\\%s' % i else: self.text += i if links == True: import re self.text = re.sub(URL_PATTERN, _CLICKABLE, self.text) LOG.debug("write_text, exit: opened: %d saved text: %s" % (self.opened, self.text)) def process_spaces(line, format): """ Function to process spaces in text lines for flowed and pre-formatted notes. line : text to process format : = 0 : Flowed, = 1 : Preformatted If the text is flowed (format==0), then leading spaces are removed, and multiple spaces are reduced to one. If the text is pre-formatted (format==1). then all spaces are preserved Note that xml is just treated like any other text, because it will be from the original note, and it is just printed, not interpreted. Returns the processed text, and the number of significant (i.e. non-white-space) chars. """ txt = "" xml = False space = False sigcount = 0 # we loop through every character, which is very inefficient, but an attempt to use # a regex replace didn't always work. for char in line: if char == " " or char == "\t": if format == 1: txt += char elif format == 0 and sigcount == 0: pass elif format == 0 and space == False: space = True txt += char elif format == 0 and space == True: pass else: sigcount += 1 space = False txt += char return [txt, sigcount]
gpl-2.0
2,147,923,212,365,953,000
38.198895
91
0.477097
false
taedla01/MissionPlanner
Lib/dumbdbm.py
63
9070
"""A dumb and slow but simple dbm clone. For database spam, spam.dir contains the index (a text file), spam.bak *may* contain a backup of the index (also a text file), while spam.dat contains the data (a binary file). XXX TO DO: - seems to contain a bug when updating... - reclaim free space (currently, space once occupied by deleted or expanded items is never reused) - support concurrent access (currently, if two processes take turns making updates, they can mess up the index) - support efficient access to large databases (currently, the whole index is read when the database is opened, and some updates rewrite the whole index) - support opening for read-only (flag = 'm') """ import os as _os import __builtin__ import UserDict _open = __builtin__.open _BLOCKSIZE = 512 error = IOError # For anydbm class _Database(UserDict.DictMixin): # The on-disk directory and data files can remain in mutually # inconsistent states for an arbitrarily long time (see comments # at the end of __setitem__). This is only repaired when _commit() # gets called. One place _commit() gets called is from __del__(), # and if that occurs at program shutdown time, module globals may # already have gotten rebound to None. Since it's crucial that # _commit() finish successfully, we can't ignore shutdown races # here, and _commit() must not reference any globals. _os = _os # for _commit() _open = _open # for _commit() def __init__(self, filebasename, mode): self._mode = mode # The directory file is a text file. Each line looks like # "%r, (%d, %d)\n" % (key, pos, siz) # where key is the string key, pos is the offset into the dat # file of the associated value's first byte, and siz is the number # of bytes in the associated value. self._dirfile = filebasename + _os.extsep + 'dir' # The data file is a binary file pointed into by the directory # file, and holds the values associated with keys. Each value # begins at a _BLOCKSIZE-aligned byte offset, and is a raw # binary 8-bit string value. self._datfile = filebasename + _os.extsep + 'dat' self._bakfile = filebasename + _os.extsep + 'bak' # The index is an in-memory dict, mirroring the directory file. self._index = None # maps keys to (pos, siz) pairs # Mod by Jack: create data file if needed try: f = _open(self._datfile, 'r') except IOError: f = _open(self._datfile, 'w') self._chmod(self._datfile) f.close() self._update() # Read directory file into the in-memory index dict. def _update(self): self._index = {} try: f = _open(self._dirfile) except IOError: pass else: for line in f: line = line.rstrip() key, pos_and_siz_pair = eval(line) self._index[key] = pos_and_siz_pair f.close() # Write the index dict to the directory file. The original directory # file (if any) is renamed with a .bak extension first. If a .bak # file currently exists, it's deleted. def _commit(self): # CAUTION: It's vital that _commit() succeed, and _commit() can # be called from __del__(). Therefore we must never reference a # global in this routine. if self._index is None: return # nothing to do try: self._os.unlink(self._bakfile) except self._os.error: pass try: self._os.rename(self._dirfile, self._bakfile) except self._os.error: pass f = self._open(self._dirfile, 'w') self._chmod(self._dirfile) for key, pos_and_siz_pair in self._index.iteritems(): f.write("%r, %r\n" % (key, pos_and_siz_pair)) f.close() sync = _commit def __getitem__(self, key): pos, siz = self._index[key] # may raise KeyError f = _open(self._datfile, 'rb') f.seek(pos) dat = f.read(siz) f.close() return dat # Append val to the data file, starting at a _BLOCKSIZE-aligned # offset. The data file is first padded with NUL bytes (if needed) # to get to an aligned offset. Return pair # (starting offset of val, len(val)) def _addval(self, val): f = _open(self._datfile, 'rb+') f.seek(0, 2) pos = int(f.tell()) npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE f.write('\0'*(npos-pos)) pos = npos f.write(val) f.close() return (pos, len(val)) # Write val to the data file, starting at offset pos. The caller # is responsible for ensuring that there's enough room starting at # pos to hold val, without overwriting some other value. Return # pair (pos, len(val)). def _setval(self, pos, val): f = _open(self._datfile, 'rb+') f.seek(pos) f.write(val) f.close() return (pos, len(val)) # key is a new key whose associated value starts in the data file # at offset pos and with length siz. Add an index record to # the in-memory index dict, and append one to the directory file. def _addkey(self, key, pos_and_siz_pair): self._index[key] = pos_and_siz_pair f = _open(self._dirfile, 'a') self._chmod(self._dirfile) f.write("%r, %r\n" % (key, pos_and_siz_pair)) f.close() def __setitem__(self, key, val): if not type(key) == type('') == type(val): raise TypeError, "keys and values must be strings" if key not in self._index: self._addkey(key, self._addval(val)) else: # See whether the new value is small enough to fit in the # (padded) space currently occupied by the old value. pos, siz = self._index[key] oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE if newblocks <= oldblocks: self._index[key] = self._setval(pos, val) else: # The new value doesn't fit in the (padded) space used # by the old value. The blocks used by the old value are # forever lost. self._index[key] = self._addval(val) # Note that _index may be out of synch with the directory # file now: _setval() and _addval() don't update the directory # file. This also means that the on-disk directory and data # files are in a mutually inconsistent state, and they'll # remain that way until _commit() is called. Note that this # is a disaster (for the database) if the program crashes # (so that _commit() never gets called). def __delitem__(self, key): # The blocks used by the associated value are lost. del self._index[key] # XXX It's unclear why we do a _commit() here (the code always # XXX has, so I'm not changing it). _setitem__ doesn't try to # XXX keep the directory file in synch. Why should we? Or # XXX why shouldn't __setitem__? self._commit() def keys(self): return self._index.keys() def has_key(self, key): return key in self._index def __contains__(self, key): return key in self._index def iterkeys(self): return self._index.iterkeys() __iter__ = iterkeys def __len__(self): return len(self._index) def close(self): self._commit() self._index = self._datfile = self._dirfile = self._bakfile = None __del__ = close def _chmod (self, file): if hasattr(self._os, 'chmod'): self._os.chmod(file, self._mode) def open(file, flag=None, mode=0666): """Open the database file, filename, and return corresponding object. The flag argument, used to control how the database is opened in the other DBM implementations, is ignored in the dumbdbm module; the database is always opened for update, and will be created if it does not exist. The optional mode argument is the UNIX mode of the file, used only when the database has to be created. It defaults to octal code 0666 (and will be modified by the prevailing umask). """ # flag argument is currently ignored # Modify mode depending on the umask try: um = _os.umask(0) _os.umask(um) except AttributeError: pass else: # Turn off any bits that are set in the umask mode = mode & (~um) return _Database(file, mode)
gpl-3.0
8,398,417,208,798,621,000
34.28
78
0.574642
false
cmcdowell/weatherpy
weatherpy/wind.py
1
1807
class Wind(object): """ Current forecast information about the wind. Attributes: chill: Wind chill in degrees (integer). If a value for wind chill is not found, chill will be None. direction: Wind direction in degrees (integer). If a value for wind direction is not found, direction will be None. speed: Wind speed in units specified in the speed attribute of the Units class (integer). If a value for wind speed is not found, speed will be None. """ def __init__(self, wind): try: self.chill = int(wind['chill']) except ValueError: self.chill = None try: self.direction = int(wind['direction']) except ValueError: self.direction = None try: self.speed = float(wind['speed']) except ValueError: self.speed = None def cardinal_direction(self): """ Returns the cardinal direction of the wind as a string. Possible returned values are N, E, S, W, and None. 315 degrees to 45 degrees exclusive -> N 45 degrees to 135 degrees exclusive -> E 135 degrees to 225 degrees exclusive -> S 225 degrees to 315 degrees exclusive -> W None if no direction found. """ if self.direction is None: return None if self.direction > 360 or self.direction < 0: raise Exception('Direction out of range') if (315 <= self.direction) <= 360 or 0 <= (self.direction) < 45: return 'N' elif 45 <= self.direction < 135: return 'E' elif 135 <= self.direction < 225: return 'S' elif 225 <= self.direction < 315: return 'W'
mit
-1,106,267,896,262,292,900
31.267857
80
0.563918
false
WURFL/wurfl-cloud-client-python
setup.py
1
1722
from setuptools import setup import os.path __license__ = """ Copyright (c) 2015 ScientiaMobile Inc. The WURFL Cloud Client is intended to be used in both open-source and commercial environments. To allow its use in as many situations as possible, the WURFL Cloud Client is dual-licensed. You may choose to use the WURFL Cloud Client under either the GNU GENERAL PUBLIC LICENSE, Version 2.0, or the MIT License. Refer to the COPYING.txt file distributed with this package. """ __copyright__ = "2015 ScientiaMobile Incorporated, All Rights Reserved" __version__ = "1.1.1" def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() #doc = __doc__.strip() setup (name="wurfl-cloud", version=__version__, author="ScientiaMobile", author_email="[email protected]", license=__license__, packages=['wurfl_cloud', 'wurfl_cloud.cache'], #description=doc, #long_description=read('doc/README'), platforms="All", classifiers=['Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: Telecommunications Industry', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Database :: Front-Ends', 'Topic :: Internet :: WAP', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Utilities' ])
gpl-2.0
-1,736,531,335,121,346,800
34.875
83
0.5964
false
richardnpaul/FWL-Website
lib/python2.7/site-packages/django/contrib/sitemaps/tests/urls/http.py
97
1691
from datetime import datetime from django.conf.urls import patterns, url from django.contrib.sitemaps import Sitemap, GenericSitemap, FlatPageSitemap, views from django.contrib.auth.models import User from django.views.decorators.cache import cache_page from django.contrib.sitemaps.tests.base import TestModel class SimpleSitemap(Sitemap): changefreq = "never" priority = 0.5 location = '/location/' lastmod = datetime.now() def items(self): return [object()] simple_sitemaps = { 'simple': SimpleSitemap, } generic_sitemaps = { 'generic': GenericSitemap({'queryset': TestModel.objects.all()}), } flatpage_sitemaps = { 'flatpages': FlatPageSitemap, } urlpatterns = patterns('django.contrib.sitemaps.views', (r'^simple/index\.xml$', 'index', {'sitemaps': simple_sitemaps}), (r'^simple/custom-index\.xml$', 'index', {'sitemaps': simple_sitemaps, 'template_name': 'custom_sitemap_index.xml'}), (r'^simple/sitemap-(?P<section>.+)\.xml$', 'sitemap', {'sitemaps': simple_sitemaps}), (r'^simple/sitemap\.xml$', 'sitemap', {'sitemaps': simple_sitemaps}), (r'^simple/custom-sitemap\.xml$', 'sitemap', {'sitemaps': simple_sitemaps, 'template_name': 'custom_sitemap.xml'}), (r'^generic/sitemap\.xml$', 'sitemap', {'sitemaps': generic_sitemaps}), (r'^flatpages/sitemap\.xml$', 'sitemap', {'sitemaps': flatpage_sitemaps}), url(r'^cached/index\.xml$', cache_page(1)(views.index), {'sitemaps': simple_sitemaps, 'sitemap_url_name': 'cached_sitemap'}), url(r'^cached/sitemap-(?P<section>.+)\.xml', cache_page(1)(views.sitemap), {'sitemaps': simple_sitemaps}, name='cached_sitemap') )
gpl-3.0
100,383,664,389,942,800
35.76087
84
0.668835
false