repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
matthiasdiener/spack | lib/spack/spack/test/cmd/list.py | 3 | 2522 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import pytest
from spack.main import SpackCommand
list = SpackCommand('list')
def test_list():
output = list()
assert 'cloverleaf3d' in output
assert 'hdf5' in output
def test_list_filter():
output = list('py-*')
assert 'py-numpy' in output
assert 'perl-file-copy-recursive' not in output
output = list('py-')
assert 'py-numpy' in output
assert 'perl-file-copy-recursive' in output
@pytest.mark.maybeslow
def test_list_search_description():
output = list('--search-description', 'xml')
assert 'expat' in output
def test_list_tags():
output = list('--tags', 'proxy-app')
assert 'cloverleaf3d' in output
assert 'hdf5' not in output
def test_list_format_name_only():
output = list('--format', 'name_only')
assert 'cloverleaf3d' in output
assert 'hdf5' in output
@pytest.mark.maybeslow
def test_list_format_rst():
output = list('--format', 'rst')
assert '.. _cloverleaf3d:' in output
assert '.. _hdf5:' in output
@pytest.mark.maybeslow
def test_list_format_html():
output = list('--format', 'html')
assert '<div class="section" id="cloverleaf3d">' in output
assert '<h1>cloverleaf3d' in output
assert '<div class="section" id="hdf5">' in output
assert '<h1>hdf5' in output
| lgpl-2.1 | -6,654,112,589,072,553,000 | 30.525 | 78 | 0.664155 | false |
dmlc/tvm | python/tvm/topi/arm_cpu/depthwise_conv2d.py | 1 | 24416 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable
"""Depthwise convolution schedule for ARM CPU"""
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from .. import nn
from ..utils import traverse_inline, get_const_tuple, get_const_int
from ..nn.utils import get_pad_tuple
from .tensor_intrin import smlal_int16_int32
from .arm_utils import is_aarch64_arm
@autotvm.register_topi_compute("depthwise_conv2d_nchw.arm_cpu")
def depthwise_conv2d_nchw(_, data, kernel, strides, padding, dilation, out_dtype):
"""Compute depthwise_conv2d with NCHW layout"""
return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("depthwise_conv2d_nchw.arm_cpu")
def schedule_depthwise_conv2d_nchw(cfg, outs):
"""Schedule depthwise conv2d
Parameters
----------
cfg: ConfigEntity
The configuration of this template
outs: Array of Tensor
The computation graph description of depthwise convolution2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d nchw.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(cfg, s, data, data_pad, kernel, output):
A, B, C = data, kernel, output
s[data_pad].compute_inline()
##### space definition begin #####
n, c, h, w = s[output].op.axis
_, vc = cfg.define_split("tile_c", c, num_outputs=2)
_, vh = cfg.define_split("tile_h", h, num_outputs=2)
_, vw = cfg.define_split("tile_w", w, num_outputs=2)
cfg.define_annotate("ann", [vh, vw, vc], policy="try_unroll_vec")
# fallback support
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
"arm_cpu", "rk3399", "depthwise_conv2d_nchw.arm_cpu"
)
cfg.fallback_with_reference_log(ref_log)
##### space definition end #####
# park data to vector form [n, c, h, w] -> [n, C, h, w, VC]
A0 = s.cache_read(data_pad, "global", C)
n, c, h, w = s[A0].op.axis
c, vc = cfg["tile_c"].apply(s, A0, c)
s[A0].reorder(n, c, h, w, vc)
A1 = s.cache_write(A0, "global")
s[A0].compute_inline()
# park kernel to vector form [co, ci, kh, kw] -> [CO, ci, kh, kw, VC]
B0 = s.cache_read(B, "global", C)
c, m, h, w = s[B0].op.axis
c, vc, = cfg[
"tile_c"
].apply(s, B0, c)
s[B0].reorder(c, m, h, w, vc)
B1 = s.cache_write(B0, "global")
s[B0].compute_inline()
n, c, h, w = s[C].op.axis
c, vc, = cfg[
"tile_c"
].apply(s, C, c)
s[C].reorder(n, c, h, w, vc)
# depthwise conv
C0 = s.cache_write(C, "global")
_, c, h, w, vc = s[C0].op.axis
dh, dw = s[C0].op.reduce_axis
oh, ih = cfg["tile_h"].apply(s, C0, h)
ow, iw = cfg["tile_w"].apply(s, C0, w)
s[C0].reorder(c, oh, ow, dh, dw, ih, iw, vc)
s[A1].compute_at(s[C0], oh)
# try unroll and vectorization
cfg["ann"].apply(
s,
C0,
[ih, iw, vc],
axis_lens=[cfg["tile_h"].size[-1], cfg["tile_w"].size[-1], cfg["tile_c"].size[-1]],
max_unroll=16,
cfg=cfg,
)
# fusion
if C.op not in s.outputs:
s[C].compute_inline()
# mark parallel
last = outs[0]
n, c, h, w = s[last].op.axis
s[last].parallel(c)
n, c, h, w, vc = s[C0].op.axis
s[C0].parallel(c)
c, m, h, w, vc = s[B1].op.axis
s[B1].parallel(c)
return s
def _callback(op):
if op.tag == "depthwise_conv2d_nchw":
output = op.output(0)
kernel = op.input_tensors[1]
data = op.input_tensors[0]
data_pad = None
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
_schedule(cfg, s, data, data_pad, kernel, output)
traverse_inline(s, outs[0].op, _callback)
return s
# TODO:
# This schedule has incorrect result on some hardware platforms (like NV Jetson TX2)
# Let us comment it out but not remove.
# see discussion:
# https://discuss.tvm.apache.org/t/autotuner-incorrect-result-after-tuning-mobilenetv2-on-arm-cpu/6088
@autotvm.register_topi_compute("depthwise_conv2d_nchw_spatial_pack.arm_cpu")
def depthwise_conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""TOPI compute callback for depthwise_conv2d nchw
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [num_filter, multiplier, filter_height, filter_width] or
pre-packed 5-D with shape [num_filter_chunk, multiplier, filter_height,
filter_width, num_filter_block]
strides : list of two ints
[stride_height, stride_width]
padding : list of two ints
[pad_height, pad_width]
dilation : list of two ints
[dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return _decl_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile=2)
@autotvm.register_topi_compute("depthwise_conv2d_nhwc.arm_cpu")
def compute_depthwise_conv2d_nhwc(_, data, kernel, strides, padding, dilation, out_dtype):
"""TOPI compute callback for depthwise_conv2d nhwc
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
kernel : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, channel_multiplier]
strides : list of two ints
[stride_height, stride_width]
padding : list of two ints
[pad_height, pad_width]
dilation : list of two ints
[dilation_height, dilation_width]
out_dtype: str
The output type. This is used for mixed precision.
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
out_dtype = out_dtype or data.dtype
N, IH, IW, IC = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
KH, KW, IC, channel_multiplier = get_const_tuple(kernel.shape)
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
if pad_top or pad_left or pad_down or pad_right:
data_pad = nn.pad(
data, [0, pad_top, pad_left, 0], [0, pad_down, pad_right, 0], name="data_pad"
)
else:
data_pad = data
output_shape = (N, OH, OW, IC * channel_multiplier)
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
reduce_h = te.reduce_axis((0, KH), name="reduce_h")
reduce_w = te.reduce_axis((0, KW), name="reduce_w")
out = te.compute(
output_shape,
lambda n, h, w, c: te.sum(
data_pad[
n,
HSTR * h + dilation_h * reduce_h,
w * WSTR + reduce_w * dilation_w,
idxdiv(c, channel_multiplier),
].astype(out_dtype)
* kernel[
reduce_h, reduce_w, idxdiv(c, channel_multiplier), idxmod(c, channel_multiplier)
].astype(out_dtype),
axis=[reduce_h, reduce_w],
),
name="depthwise_conv2d_nhwc_output",
)
return out
@autotvm.register_topi_schedule("depthwise_conv2d_nhwc.arm_cpu")
def schedule_depthwise_conv2d_nhwc(cfg, outs):
"""Create the schedule for depthwise_conv2d_nchw_spatial_pack"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
out = outs[0]
##### space definition begin #####
n, h, w, c = s[out].op.axis
# Split the number of input/output channels
cfg.define_split("tile_c", c, num_outputs=2)
# Split the height of the convolution
_, hi = cfg.define_split("tile_h", h, num_outputs=2)
# Split the width of the convolution
_, wi = cfg.define_split("tile_w", w, num_outputs=2)
# Additional out (e.g., requantization, bias addition, etc..)
# 0: locate the output on the second last axis of the main compuation
# 1: locate the output closest to the main computation
cfg.define_knob("locate_output", [0, 1])
# Determine if we should unroll the computation of the inner tile
cfg.define_knob("unroll_tile", [True, False])
# fallback support
if cfg.is_fallback:
cfg["tile_c"] = SplitEntity([-1, 8])
cfg["tile_h"] = SplitEntity([-1, 2])
cfg["tile_w"] = SplitEntity([-1, 2])
cfg["locate_output"] = OtherOptionEntity(1)
cfg["unroll_tile"] = OtherOptionEntity(True)
##### space definition end #####
def schedule_conv(conv):
conv_data = conv.op.input_tensors[0]
kernel_data = conv.op.input_tensors[1]
in_type = conv_data.dtype
_, _, IC, channel_multiplier = get_const_tuple(kernel_data.shape)
n, w, h, c = conv.op.axis
r_h, r_w = conv.op.reduce_axis
ho, hi = cfg["tile_h"].apply(s, conv, h)
wo, wi = cfg["tile_w"].apply(s, conv, w)
co, ci = cfg["tile_c"].apply(s, conv, c)
split_val = cfg["tile_c"].size[-1]
use_tensorization = (
(in_type == "int16")
and (split_val == 8)
and (IC % split_val == 0)
and (channel_multiplier == 1)
and is_aarch64_arm()
)
data_pad_value = -1
if conv_data.name == "data_pad":
assert isinstance(conv_data.op, tvm.te.ComputeOp)
# Define a strategy for padding computation
cfg.define_knob("data_pad_strategy", [1, 2, 3])
if cfg.is_fallback:
# We cannot inline padding when tensorizing.
# So, if we can tensorize, let's compute_at the closest axis
cfg["data_pad_strategy"] = (
OtherOptionEntity(2) if use_tensorization else OtherOptionEntity(3)
)
# Compute padding on the third to last axis of the computation
if cfg["data_pad_strategy"].val == 1:
s[conv_data].vectorize(list(s[conv_data].op.axis)[-1])
s[conv_data].compute_at(s[conv], ho)
# Compute padding on the second to last axis of the computation
if cfg["data_pad_strategy"].val == 2:
s[conv_data].vectorize(list(s[conv_data].op.axis)[-1])
s[conv_data].compute_at(s[conv], wo)
# Inline padding during computation
if cfg["data_pad_strategy"].val == 3:
s[conv_data].compute_inline()
data_pad_value = cfg["data_pad_strategy"].val
if use_tensorization and data_pad_value != 3:
smlal = smlal_int16_int32()
s[conv].tensorize(ci, smlal)
else:
s[conv].vectorize(ci)
if cfg["unroll_tile"].val:
s[conv].unroll(r_h)
s[conv].unroll(r_w)
s[conv].unroll(wi)
s[conv].unroll(hi)
s[conv].reorder(n, ho, wo, co, hi, wi, r_h, r_w, ci)
fused_n_ho = s[conv].fuse(n, ho)
return fused_n_ho
def schedule_conv_out(out):
n, h, w, c = out.op.axis
co, ci = cfg["tile_c"].apply(s, out, c)
wo, wi = cfg["tile_w"].apply(s, out, w)
ho, hi = cfg["tile_h"].apply(s, out, h)
s[out].reorder(n, ho, wo, co, hi, wi, ci)
if cfg["unroll_tile"]:
s[out].unroll(wi)
s[out].unroll(hi)
if out.dtype in ["int8", "uint8"]:
# In case of quantized convolution further split the channel in batches of 4 elements
# so that we can use arm intrinsics to run fixed_point_multiplication
ci_outer, ci_inner = s[out].split(ci, 4)
s[out].vectorize(ci_inner)
s[out].unroll(ci_outer)
fused_n_ho = s[out].fuse(n, ho)
return hi, wi, fused_n_ho
def _callback(op):
if op.name == "depthwise_conv2d_nhwc_output":
conv = op.output(0)
if conv != out:
hi, wi, p_axis = schedule_conv_out(out)
schedule_conv(conv)
if cfg["locate_output"].val == 0:
s[conv].compute_at(s[out], hi)
if cfg["locate_output"].val == 1:
s[conv].compute_at(s[out], wi)
else:
p_axis = schedule_conv(out)
s[out].parallel(p_axis)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_schedule("depthwise_conv2d_nchw_spatial_pack.arm_cpu")
def schedule_depthwise_conv2d_nchw_spatial_pack(cfg, outs):
"""Create the schedule for depthwise_conv2d_nchw_spatial_pack"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "spatial_depthwise_conv2d_nchw_output":
output = op.output(0)
conv = op.input_tensors[0]
data_vec = conv.op.input_tensors[0]
kernel_vec = conv.op.input_tensors[1]
if kernel_vec.op.name == "kernel_vec":
kernel = kernel_vec.op.input_tensors[0]
else:
kernel = kernel_vec
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
_schedule_spatial_pack(cfg, s, data_vec, kernel_vec, conv, output, outs[0])
traverse_inline(s, outs[0].op, _callback)
return s
def _decl_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype, num_tile):
out_dtype = out_dtype or data.dtype
N, C, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if len(kernel.shape) == 4:
pre_packed = False
C, M, KH, KW = get_const_tuple(kernel.shape)
else: # kernel tensor is pre packed
pre_packed = True
C, M, KH, KW, VC = get_const_tuple(kernel.shape)
C = C * VC
dilated_kernel_h = (KH - 1) * dilation_h + 1
dilated_kernel_w = (KW - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides)
OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1
OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1
# pack data
HPAD = pad_top + pad_down
WPAD = pad_left + pad_right
DOPAD = HPAD != 0 or WPAD != 0
if DOPAD:
data_pad = nn.pad(
data, (0, 0, pad_top, pad_left), (0, 0, pad_down, pad_right), name="data_pad"
)
else:
data_pad = data
# fallback support
# Currently, Mali schedule doesn't use it like conv2d.
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
"arm_cpu", "rk3399", "depthwise_conv2d_nchw_spatial_pack.arm_cpu"
)
cfg.fallback_with_reference_log(ref_log)
# ==================== define configuration space ====================
n, c, oh, ow = cfg.axis(N), cfg.axis(C), cfg.axis(OH), cfg.axis(OW)
kh, kw = cfg.reduce_axis(KH), cfg.reduce_axis(KW)
# Currently, Mali schedule doesn't use it like conv2d.
# Leave num_tile for possible future use of Mali schedule
if num_tile == 2: # for arm cpu
co, vc = cfg.define_split("tile_co", c, num_outputs=2)
oh, vh = cfg.define_split("tile_oh", oh, num_outputs=2)
ow, vw = cfg.define_split("tile_ow", ow, num_outputs=2)
else:
raise RuntimeError("Invalid num_tile")
cfg.define_reorder(
"reorder_0",
[n, co, oh, ow, kh, kw, vh, vw, vc],
policy="candidate",
candidate=[[n, co, oh, ow, kh, kw, vh, vw, vc], [n, co, oh, ow, kh, kw, vc, vh, vw]],
)
cfg.define_reorder(
"reorder_1",
[n, co, oh, ow, vh, vw, vc],
policy="candidate",
candidate=[
[n, co, oh, ow, vh, vw, vc],
[n, co, oh, ow, vc, vh, vw],
[n, co, oh, ow, vh, vc, vw],
],
)
cfg.define_annotate("ann_reduce", [kh, kw], policy="try_unroll")
cfg.define_annotate("ann_spatial", [vh, vw, vc], policy="try_unroll_vec")
# ====================================================================
VC = cfg["tile_co"].size[-1]
VH = cfg["tile_oh"].size[-1]
VW = cfg["tile_ow"].size[-1]
kvshape = (C // VC, M, KH, KW, VC)
ovshape = (N, C * M // VC, OH // VH, OW // VW, VH, VW, VC)
oshape = (N, C * M, OH, OW)
if dilation_h != 1 or dilation_w != 1:
# undilate input data
dvshape = (N, OH // VH, OW // VW, C, KH, KW, VH, VW)
data_vec = te.compute(
dvshape,
lambda n, h, w, c, kh, kw, vh, vw: data_pad[n][c][
(h * VH + vh) * HSTR + kh * dilation_h
][(w * VW + vw) * WSTR + kw * dilation_w],
name="data_vec_undilated",
)
else:
dvshape = (N, OH // VH, OW // VW, C, VH * HSTR + KH - 1, VW * WSTR + KW - 1)
data_vec = te.compute(
dvshape,
lambda n, h, w, c, vh, vw: data_pad[n][c][h * VH * HSTR + vh][w * VW * WSTR + vw],
name="data_vec",
)
if pre_packed:
kernel_vec = kernel
else:
kernel_vec = te.compute(
kvshape, lambda co, m, kh, kw, vc: kernel[co * VC + vc][m][kh][kw], name="kernel_vec"
)
kh = te.reduce_axis((0, KH), name="kh")
kw = te.reduce_axis((0, KW), name="kw")
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
if dilation_h != 1 or dilation_w != 1:
conv = te.compute(
ovshape,
lambda n, co, h, w, vh, vw, vc: te.sum(
data_vec[n, h, w, idxdiv(co * VC + vc, M), kh, kw, vh, vw].astype(out_dtype)
* kernel_vec[idxdiv(co, M), idxmod(co, M), kh, kw, vc].astype(out_dtype),
axis=[kh, kw],
),
name="depthwise_conv",
)
else:
conv = te.compute(
ovshape,
lambda n, co, h, w, vh, vw, vc: te.sum(
data_vec[n, h, w, idxdiv((co * VC + vc), M), vh * HSTR + kh, vw * WSTR + kw].astype(
out_dtype
)
* kernel_vec[idxdiv(co, M), idxmod(co, M), kh, kw, vc].astype(out_dtype),
axis=[kh, kw],
),
name="depthwise_conv",
)
output = te.compute(
oshape,
lambda n, co, h, w: conv[
n,
idxdiv(co, VC),
idxdiv(h, VH),
idxdiv(w, VW),
idxmod(h, VH),
idxmod(w, VW),
idxmod(co, VC),
],
name="output_unpack",
tag="spatial_depthwise_conv2d_nchw_output",
)
return output
def _schedule_spatial_pack(cfg, s, data_vec, kernel_vec, conv, output, last):
"""schedule implementation"""
n, co, oh, ow, vh, vw, vc = s[conv].op.axis
kh, kw = s[conv].op.reduce_axis
if data_vec.op.name == "data_vec_undilated":
_, dv_oh, dv_ow, dv_c, _, _, dv_vh, dv_vw = s[data_vec].op.axis
else:
_, dv_oh, dv_ow, dv_c, dv_vh, dv_vw = s[data_vec].op.axis
data_pad = data_vec.op.input_tensors[0]
if data_pad.op.name == "data_pad":
assert isinstance(data_pad.op, tvm.te.ComputeOp)
has_padding = True
else:
assert isinstance(data_pad.op, tvm.te.PlaceholderOp)
has_padding = False
cfg.define_knob("data_pad_inline", [0, 1, 2, 3, 4])
if cfg["data_pad_inline"].val == 1 and has_padding:
s[data_pad].compute_inline()
if cfg["data_pad_inline"].val == 2 and has_padding:
s[data_pad].vectorize(list(s[data_pad].op.axis)[-1])
if cfg["data_pad_inline"].val == 3 and has_padding:
s[data_pad].vectorize(list(s[data_pad].op.axis)[-1])
s[data_pad].compute_at(s[data_vec], dv_oh)
if cfg["data_pad_inline"].val == 4 and has_padding:
s[data_pad].vectorize(list(s[data_pad].op.axis)[-1])
s[data_pad].compute_at(s[data_vec], dv_ow)
cfg.define_knob("data_vec_inline", [0, 1, 2, 3])
if cfg["data_vec_inline"].val == 1:
s[data_vec].compute_at(s[conv], oh)
if cfg["data_vec_inline"].val == 2:
s[data_vec].compute_at(s[conv], ow)
if cfg["data_vec_inline"].val == 3:
s[data_vec].compute_at(s[conv], co)
# schedule conv
cfg["reorder_0"].apply(s, conv, [n, co, oh, ow, kh, kw, vh, vw, vc])
cfg["ann_reduce"].apply(
s,
conv,
[kh, kw],
axis_lens=[get_const_int(kh.dom.extent), get_const_int(kw.dom.extent)],
max_unroll=16,
cfg=cfg,
)
cfg["ann_spatial"].apply(
s,
conv,
[vh, vw, vc],
axis_lens=[cfg["tile_oh"].size[-1], cfg["tile_ow"].size[-1], cfg["tile_co"].size[-1]],
max_unroll=16,
cfg=cfg,
)
# schedule fusion
n, co, h, w = s[last].op.axis
co, vc = cfg["tile_co"].apply(s, last, co)
oh, vh = cfg["tile_oh"].apply(s, last, h)
ow, vw = cfg["tile_ow"].apply(s, last, w)
cfg["reorder_1"].apply(s, last, [n, co, oh, ow, vh, vw, vc])
if last != output:
s[output].compute_inline()
cfg["ann_spatial"].apply(
s,
last,
[vh, vw, vc],
axis_lens=[cfg["tile_oh"].size[-1], cfg["tile_ow"].size[-1], cfg["tile_co"].size[-1]],
max_unroll=16,
cfg=cfg,
)
else:
s[last].vectorize(vw)
cfg.define_knob("conv_inline", [0, 1, 2, 3])
if cfg["conv_inline"].val == 1:
s[conv].compute_at(s[last], ow)
if cfg["conv_inline"].val == 2:
s[conv].compute_at(s[last], oh)
if cfg["conv_inline"].val == 3:
s[conv].compute_at(s[last], co)
# mark parallel
s[last].parallel(co)
if data_vec.op.name == "data_vec_undilated":
_, h, _, _, _, _, _, _ = s[data_vec].op.axis
else:
_, h, _, _, _, _ = s[data_vec].op.axis
s[data_vec].parallel(h)
if kernel_vec.op.name == "kernel_vec":
co, _, _, _, _ = s[kernel_vec].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# kernel packing will be pre-computed during compliation, so we skip
# this part to make tuning records correct
s[kernel_vec].pragma(co, "debug_skip_region")
else:
s[kernel_vec].parallel(co)
return s
| apache-2.0 | -3,259,450,551,458,108,000 | 33.830243 | 102 | 0.553981 | false |
PaulKinlan/cli-caniuse | site/app/scripts/bower_components/jsrepl-build/extern/python/unclosured/lib/python2.7/msilib/__init__.py | 43 | 17579 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005 Martin v. Löwis
# Licensed to PSF under a Contributor Agreement.
from _msi import *
import os, string, re, sys
AMD64 = "AMD64" in sys.version
Itanium = "Itanium" in sys.version
Win64 = AMD64 or Itanium
# Partially taken from Wine
datasizemask= 0x00ff
type_valid= 0x0100
type_localizable= 0x0200
typemask= 0x0c00
type_long= 0x0000
type_short= 0x0400
type_string= 0x0c00
type_binary= 0x0800
type_nullable= 0x1000
type_key= 0x2000
# XXX temporary, localizable?
knownbits = datasizemask | type_valid | type_localizable | \
typemask | type_nullable | type_key
class Table:
def __init__(self, name):
self.name = name
self.fields = []
def add_field(self, index, name, type):
self.fields.append((index,name,type))
def sql(self):
fields = []
keys = []
self.fields.sort()
fields = [None]*len(self.fields)
for index, name, type in self.fields:
index -= 1
unk = type & ~knownbits
if unk:
print "%s.%s unknown bits %x" % (self.name, name, unk)
size = type & datasizemask
dtype = type & typemask
if dtype == type_string:
if size:
tname="CHAR(%d)" % size
else:
tname="CHAR"
elif dtype == type_short:
assert size==2
tname = "SHORT"
elif dtype == type_long:
assert size==4
tname="LONG"
elif dtype == type_binary:
assert size==0
tname="OBJECT"
else:
tname="unknown"
print "%s.%sunknown integer type %d" % (self.name, name, size)
if type & type_nullable:
flags = ""
else:
flags = " NOT NULL"
if type & type_localizable:
flags += " LOCALIZABLE"
fields[index] = "`%s` %s%s" % (name, tname, flags)
if type & type_key:
keys.append("`%s`" % name)
fields = ", ".join(fields)
keys = ", ".join(keys)
return "CREATE TABLE %s (%s PRIMARY KEY %s)" % (self.name, fields, keys)
def create(self, db):
v = db.OpenView(self.sql())
v.Execute(None)
v.Close()
class _Unspecified:pass
def change_sequence(seq, action, seqno=_Unspecified, cond = _Unspecified):
"Change the sequence number of an action in a sequence list"
for i in range(len(seq)):
if seq[i][0] == action:
if cond is _Unspecified:
cond = seq[i][1]
if seqno is _Unspecified:
seqno = seq[i][2]
seq[i] = (action, cond, seqno)
return
raise ValueError, "Action not found in sequence"
def add_data(db, table, values):
v = db.OpenView("SELECT * FROM `%s`" % table)
count = v.GetColumnInfo(MSICOLINFO_NAMES).GetFieldCount()
r = CreateRecord(count)
for value in values:
assert len(value) == count, value
for i in range(count):
field = value[i]
if isinstance(field, (int, long)):
r.SetInteger(i+1,field)
elif isinstance(field, basestring):
r.SetString(i+1,field)
elif field is None:
pass
elif isinstance(field, Binary):
r.SetStream(i+1, field.name)
else:
raise TypeError, "Unsupported type %s" % field.__class__.__name__
try:
v.Modify(MSIMODIFY_INSERT, r)
except Exception, e:
raise MSIError("Could not insert "+repr(values)+" into "+table)
r.ClearData()
v.Close()
def add_stream(db, name, path):
v = db.OpenView("INSERT INTO _Streams (Name, Data) VALUES ('%s', ?)" % name)
r = CreateRecord(1)
r.SetStream(1, path)
v.Execute(r)
v.Close()
def init_database(name, schema,
ProductName, ProductCode, ProductVersion,
Manufacturer):
try:
os.unlink(name)
except OSError:
pass
ProductCode = ProductCode.upper()
# Create the database
db = OpenDatabase(name, MSIDBOPEN_CREATE)
# Create the tables
for t in schema.tables:
t.create(db)
# Fill the validation table
add_data(db, "_Validation", schema._Validation_records)
# Initialize the summary information, allowing atmost 20 properties
si = db.GetSummaryInformation(20)
si.SetProperty(PID_TITLE, "Installation Database")
si.SetProperty(PID_SUBJECT, ProductName)
si.SetProperty(PID_AUTHOR, Manufacturer)
if Itanium:
si.SetProperty(PID_TEMPLATE, "Intel64;1033")
elif AMD64:
si.SetProperty(PID_TEMPLATE, "x64;1033")
else:
si.SetProperty(PID_TEMPLATE, "Intel;1033")
si.SetProperty(PID_REVNUMBER, gen_uuid())
si.SetProperty(PID_WORDCOUNT, 2) # long file names, compressed, original media
si.SetProperty(PID_PAGECOUNT, 200)
si.SetProperty(PID_APPNAME, "Python MSI Library")
# XXX more properties
si.Persist()
add_data(db, "Property", [
("ProductName", ProductName),
("ProductCode", ProductCode),
("ProductVersion", ProductVersion),
("Manufacturer", Manufacturer),
("ProductLanguage", "1033")])
db.Commit()
return db
def add_tables(db, module):
for table in module.tables:
add_data(db, table, getattr(module, table))
def make_id(str):
identifier_chars = string.ascii_letters + string.digits + "._"
str = "".join([c if c in identifier_chars else "_" for c in str])
if str[0] in (string.digits + "."):
str = "_" + str
assert re.match("^[A-Za-z_][A-Za-z0-9_.]*$", str), "FILE"+str
return str
def gen_uuid():
return "{"+UuidCreate().upper()+"}"
class CAB:
def __init__(self, name):
self.name = name
self.files = []
self.filenames = set()
self.index = 0
def gen_id(self, file):
logical = _logical = make_id(file)
pos = 1
while logical in self.filenames:
logical = "%s.%d" % (_logical, pos)
pos += 1
self.filenames.add(logical)
return logical
def append(self, full, file, logical):
if os.path.isdir(full):
return
if not logical:
logical = self.gen_id(file)
self.index += 1
self.files.append((full, logical))
return self.index, logical
def commit(self, db):
from tempfile import mktemp
filename = mktemp()
FCICreate(filename, self.files)
add_data(db, "Media",
[(1, self.index, None, "#"+self.name, None, None)])
add_stream(db, self.name, filename)
os.unlink(filename)
db.Commit()
_directories = set()
class Directory:
def __init__(self, db, cab, basedir, physical, _logical, default, componentflags=None):
"""Create a new directory in the Directory table. There is a current component
at each point in time for the directory, which is either explicitly created
through start_component, or implicitly when files are added for the first
time. Files are added into the current component, and into the cab file.
To create a directory, a base directory object needs to be specified (can be
None), the path to the physical directory, and a logical directory name.
Default specifies the DefaultDir slot in the directory table. componentflags
specifies the default flags that new components get."""
index = 1
_logical = make_id(_logical)
logical = _logical
while logical in _directories:
logical = "%s%d" % (_logical, index)
index += 1
_directories.add(logical)
self.db = db
self.cab = cab
self.basedir = basedir
self.physical = physical
self.logical = logical
self.component = None
self.short_names = set()
self.ids = set()
self.keyfiles = {}
self.componentflags = componentflags
if basedir:
self.absolute = os.path.join(basedir.absolute, physical)
blogical = basedir.logical
else:
self.absolute = physical
blogical = None
add_data(db, "Directory", [(logical, blogical, default)])
def start_component(self, component = None, feature = None, flags = None, keyfile = None, uuid=None):
"""Add an entry to the Component table, and make this component the current for this
directory. If no component name is given, the directory name is used. If no feature
is given, the current feature is used. If no flags are given, the directory's default
flags are used. If no keyfile is given, the KeyPath is left null in the Component
table."""
if flags is None:
flags = self.componentflags
if uuid is None:
uuid = gen_uuid()
else:
uuid = uuid.upper()
if component is None:
component = self.logical
self.component = component
if Win64:
flags |= 256
if keyfile:
keyid = self.cab.gen_id(self.absolute, keyfile)
self.keyfiles[keyfile] = keyid
else:
keyid = None
add_data(self.db, "Component",
[(component, uuid, self.logical, flags, None, keyid)])
if feature is None:
feature = current_feature
add_data(self.db, "FeatureComponents",
[(feature.id, component)])
def make_short(self, file):
oldfile = file
file = file.replace('+', '_')
file = ''.join(c for c in file if not c in ' "/\[]:;=,')
parts = file.split(".")
if len(parts) > 1:
prefix = "".join(parts[:-1]).upper()
suffix = parts[-1].upper()
if not prefix:
prefix = suffix
suffix = None
else:
prefix = file.upper()
suffix = None
if len(parts) < 3 and len(prefix) <= 8 and file == oldfile and (
not suffix or len(suffix) <= 3):
if suffix:
file = prefix+"."+suffix
else:
file = prefix
else:
file = None
if file is None or file in self.short_names:
prefix = prefix[:6]
if suffix:
suffix = suffix[:3]
pos = 1
while 1:
if suffix:
file = "%s~%d.%s" % (prefix, pos, suffix)
else:
file = "%s~%d" % (prefix, pos)
if file not in self.short_names: break
pos += 1
assert pos < 10000
if pos in (10, 100, 1000):
prefix = prefix[:-1]
self.short_names.add(file)
assert not re.search(r'[\?|><:/*"+,;=\[\]]', file) # restrictions on short names
return file
def add_file(self, file, src=None, version=None, language=None):
"""Add a file to the current component of the directory, starting a new one
one if there is no current component. By default, the file name in the source
and the file table will be identical. If the src file is specified, it is
interpreted relative to the current directory. Optionally, a version and a
language can be specified for the entry in the File table."""
if not self.component:
self.start_component(self.logical, current_feature, 0)
if not src:
# Allow relative paths for file if src is not specified
src = file
file = os.path.basename(file)
absolute = os.path.join(self.absolute, src)
assert not re.search(r'[\?|><:/*]"', file) # restrictions on long names
if file in self.keyfiles:
logical = self.keyfiles[file]
else:
logical = None
sequence, logical = self.cab.append(absolute, file, logical)
assert logical not in self.ids
self.ids.add(logical)
short = self.make_short(file)
full = "%s|%s" % (short, file)
filesize = os.stat(absolute).st_size
# constants.msidbFileAttributesVital
# Compressed omitted, since it is the database default
# could add r/o, system, hidden
attributes = 512
add_data(self.db, "File",
[(logical, self.component, full, filesize, version,
language, attributes, sequence)])
#if not version:
# # Add hash if the file is not versioned
# filehash = FileHash(absolute, 0)
# add_data(self.db, "MsiFileHash",
# [(logical, 0, filehash.IntegerData(1),
# filehash.IntegerData(2), filehash.IntegerData(3),
# filehash.IntegerData(4))])
# Automatically remove .pyc/.pyo files on uninstall (2)
# XXX: adding so many RemoveFile entries makes installer unbelievably
# slow. So instead, we have to use wildcard remove entries
if file.endswith(".py"):
add_data(self.db, "RemoveFile",
[(logical+"c", self.component, "%sC|%sc" % (short, file),
self.logical, 2),
(logical+"o", self.component, "%sO|%so" % (short, file),
self.logical, 2)])
return logical
def glob(self, pattern, exclude = None):
"""Add a list of files to the current component as specified in the
glob pattern. Individual files can be excluded in the exclude list."""
files = glob.glob1(self.absolute, pattern)
for f in files:
if exclude and f in exclude: continue
self.add_file(f)
return files
def remove_pyc(self):
"Remove .pyc/.pyo files on uninstall"
add_data(self.db, "RemoveFile",
[(self.component+"c", self.component, "*.pyc", self.logical, 2),
(self.component+"o", self.component, "*.pyo", self.logical, 2)])
class Binary:
def __init__(self, fname):
self.name = fname
def __repr__(self):
return 'msilib.Binary(os.path.join(dirname,"%s"))' % self.name
class Feature:
def __init__(self, db, id, title, desc, display, level = 1,
parent=None, directory = None, attributes=0):
self.id = id
if parent:
parent = parent.id
add_data(db, "Feature",
[(id, parent, title, desc, display,
level, directory, attributes)])
def set_current(self):
global current_feature
current_feature = self
class Control:
def __init__(self, dlg, name):
self.dlg = dlg
self.name = name
def event(self, event, argument, condition = "1", ordering = None):
add_data(self.dlg.db, "ControlEvent",
[(self.dlg.name, self.name, event, argument,
condition, ordering)])
def mapping(self, event, attribute):
add_data(self.dlg.db, "EventMapping",
[(self.dlg.name, self.name, event, attribute)])
def condition(self, action, condition):
add_data(self.dlg.db, "ControlCondition",
[(self.dlg.name, self.name, action, condition)])
class RadioButtonGroup(Control):
def __init__(self, dlg, name, property):
self.dlg = dlg
self.name = name
self.property = property
self.index = 1
def add(self, name, x, y, w, h, text, value = None):
if value is None:
value = name
add_data(self.dlg.db, "RadioButton",
[(self.property, self.index, value,
x, y, w, h, text, None)])
self.index += 1
class Dialog:
def __init__(self, db, name, x, y, w, h, attr, title, first, default, cancel):
self.db = db
self.name = name
self.x, self.y, self.w, self.h = x,y,w,h
add_data(db, "Dialog", [(name, x,y,w,h,attr,title,first,default,cancel)])
def control(self, name, type, x, y, w, h, attr, prop, text, next, help):
add_data(self.db, "Control",
[(self.name, name, type, x, y, w, h, attr, prop, text, next, help)])
return Control(self, name)
def text(self, name, x, y, w, h, attr, text):
return self.control(name, "Text", x, y, w, h, attr, None,
text, None, None)
def bitmap(self, name, x, y, w, h, text):
return self.control(name, "Bitmap", x, y, w, h, 1, None, text, None, None)
def line(self, name, x, y, w, h):
return self.control(name, "Line", x, y, w, h, 1, None, None, None, None)
def pushbutton(self, name, x, y, w, h, attr, text, next):
return self.control(name, "PushButton", x, y, w, h, attr, None, text, next, None)
def radiogroup(self, name, x, y, w, h, attr, prop, text, next):
add_data(self.db, "Control",
[(self.name, name, "RadioButtonGroup",
x, y, w, h, attr, prop, text, next, None)])
return RadioButtonGroup(self, name, prop)
def checkbox(self, name, x, y, w, h, attr, prop, text, next):
return self.control(name, "CheckBox", x, y, w, h, attr, prop, text, next, None)
| apache-2.0 | 8,855,166,320,949,250,000 | 36.008421 | 105 | 0.550771 | false |
wuga214/Django-Wuga | env/lib/python2.7/site-packages/django/template/__init__.py | 165 | 1889 | """
Django's support for templates.
The django.template namespace contains two independent subsystems:
1. Multiple Template Engines: support for pluggable template backends,
built-in backends and backend-independent APIs
2. Django Template Language: Django's own template engine, including its
built-in loaders, context processors, tags and filters.
Ideally these subsystems would be implemented in distinct packages. However
keeping them together made the implementation of Multiple Template Engines
less disruptive .
Here's a breakdown of which modules belong to which subsystem.
Multiple Template Engines:
- django.template.backends.*
- django.template.loader
- django.template.response
Django Template Language:
- django.template.base
- django.template.context
- django.template.context_processors
- django.template.loaders.*
- django.template.debug
- django.template.defaultfilters
- django.template.defaulttags
- django.template.engine
- django.template.loader_tags
- django.template.smartif
Shared:
- django.template.utils
"""
# Multiple Template Engines
from .engine import Engine
from .utils import EngineHandler
engines = EngineHandler()
__all__ = ('Engine', 'engines')
# Django Template Language
# Public exceptions
from .base import VariableDoesNotExist # NOQA isort:skip
from .context import ContextPopException # NOQA isort:skip
from .exceptions import TemplateDoesNotExist, TemplateSyntaxError # NOQA isort:skip
# Template parts
from .base import ( # NOQA isort:skip
Context, Node, NodeList, Origin, RequestContext, StringOrigin, Template,
Variable,
)
# Library management
from .library import Library # NOQA isort:skip
__all__ += ('Template', 'Context', 'RequestContext')
| apache-2.0 | 4,751,144,151,942,030,000 | 26.376812 | 89 | 0.715723 | false |
binhqnguyen/lena | src/lte/test/examples-to-run.py | 151 | 3664 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# A list of C++ examples to run in order to ensure that they remain
# buildable and runnable over time. Each tuple in the list contains
#
# (example_name, do_run, do_valgrind_run).
#
# See test.py for more information.
cpp_examples = [
("lena-cqi-threshold", "True", "True"),
("lena-dual-stripe", "True", "True"),
("lena-dual-stripe --simTime=0.0 --nApartmentsX=1 --homeEnbDeploymentRatio=0.5 --nMacroEnbSites=0 --macroUeDensity=0 --nBlocks=1", "True", "True"),
("lena-dual-stripe --epc=1 --simTime=0.0 --nApartmentsX=1 --homeEnbDeploymentRatio=0.5 --nMacroEnbSites=0 --macroUeDensity=0 --nBlocks=1", "True", "True"),
("lena-dual-stripe --simTime=0.01", "True", "True"),
("lena-dual-stripe --epc=1 --simTime=0.01", "True", "True"),
("lena-dual-stripe --epc=1 --useUdp=0 --simTime=0.01", "True", "True"),
("lena-dual-stripe --epc=1 --fadingTrace=../../src/lte/model/fading-traces/fading_trace_EPA_3kmph.fad --simTime=0.01", "True", "True"),
("lena-dual-stripe --nBlocks=1 --nMacroEnbSites=0 --macroUeDensity=0 --homeEnbDeploymentRatio=1 --homeEnbActivationRatio=1 --homeUesHomeEnbRatio=2 --macroEnbTxPowerDbm=0 --simTime=0.01", "True", "True"),
("lena-dual-stripe --nMacroEnbSites=0 --macroUeDensity=0 --nBlocks=1 --nApartmentsX=4 --nMacroEnbSitesX=0 --homeEnbDeploymentRatio=1 --homeEnbActivationRatio=1 --macroEnbTxPowerDbm=0 --epcDl=1 --epcUl=0 --epc=1 --numBearersPerUe=4 --homeUesHomeEnbRatio=15 --simTime=0.01", "True", "True"),
("lena-fading", "True", "True"),
("lena-gtpu-tunnel", "True", "True"),
("lena-intercell-interference --simTime=0.1", "True", "True"),
("lena-pathloss-traces", "True", "True"),
("lena-profiling", "True", "True"),
("lena-profiling --simTime=0.1 --nUe=2 --nEnb=5 --nFloors=0", "True", "True"),
("lena-profiling --simTime=0.1 --nUe=3 --nEnb=6 --nFloors=1", "True", "True"),
("lena-rlc-traces", "True", "True"),
("lena-rem", "True", "True"),
("lena-rem-sector-antenna", "True", "True"),
("lena-simple", "True", "True"),
("lena-simple-epc", "True", "True"),
("lena-x2-handover", "True", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TtaFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TdTbfqFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TdMtFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TdBetFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::RrFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::PssFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::PfFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::FdTbfqFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::FdMtFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::FdBetFfMacScheduler", "options.valgrind", "True"),
]
# A list of Python examples to run in order to ensure that they remain
# runnable over time. Each tuple in the list contains
#
# (example_name, do_run).
#
# See test.py for more information.
python_examples = []
| gpl-2.0 | -218,724,546,961,001,060 | 68.132075 | 293 | 0.659662 | false |
andela-earinde/bellatrix-py | app/js/lib/lib/modules/pyrepl/pygame_console.py | 13 | 11980 | # Copyright 2000-2004 Michael Hudson-Doyle <[email protected]>
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# the pygame console is currently thoroughly broken.
# there's a fundamental difference from the UnixConsole: here we're
# the terminal emulator too, in effect. This means, e.g., for pythoni
# we really need a separate process (or thread) to monitor for ^C
# during command execution and zap the executor process. Making this
# work on non-Unix is expected to be even more entertaining.
from pygame.locals import *
from pyrepl.console import Console, Event
from pyrepl import pygame_keymap
import pygame
import types
lmargin = 5
rmargin = 5
tmargin = 5
bmargin = 5
try:
bool
except NameError:
def bool(x):
return not not x
modcolors = {K_LCTRL:1,
K_RCTRL:1,
K_LMETA:1,
K_RMETA:1,
K_LALT:1,
K_RALT:1,
K_LSHIFT:1,
K_RSHIFT:1}
class colors:
fg = 250,240,230
bg = 5, 5, 5
cursor = 230, 0, 230
margin = 5, 5, 15
class FakeStdout:
def __init__(self, con):
self.con = con
def write(self, text):
self.con.write(text)
def flush(self):
pass
class FakeStdin:
def __init__(self, con):
self.con = con
def read(self, n=None):
# argh!
raise NotImplementedError
def readline(self, n=None):
from reader import Reader
try:
# this isn't quite right: it will clobber any prompt that's
# been printed. Not sure how to get around this...
return Reader(self.con).readline()
except EOFError:
return ''
class PyGameConsole(Console):
"""Attributes:
(keymap),
(fd),
screen,
height,
width,
"""
def __init__(self):
self.pygame_screen = pygame.display.set_mode((800, 600))
pygame.font.init()
pygame.key.set_repeat(500, 30)
self.font = pygame.font.Font(
"/usr/X11R6/lib/X11/fonts/TTF/luximr.ttf", 15)
self.fw, self.fh = self.fontsize = self.font.size("X")
self.cursor = pygame.Surface(self.fontsize)
self.cursor.fill(colors.cursor)
self.clear()
self.curs_vis = 1
self.height, self.width = self.getheightwidth()
pygame.display.update()
pygame.event.set_allowed(None)
pygame.event.set_allowed(KEYDOWN)
def install_keymap(self, keymap):
"""Install a given keymap.
keymap is a tuple of 2-element tuples; each small tuple is a
pair (keyspec, event-name). The format for keyspec is
modelled on that used by readline (so read that manual for
now!)."""
self.k = self.keymap = pygame_keymap.compile_keymap(keymap)
def char_rect(self, x, y):
return self.char_pos(x, y), self.fontsize
def char_pos(self, x, y):
return (lmargin + x*self.fw,
tmargin + y*self.fh + self.cur_top + self.scroll)
def paint_margin(self):
s = self.pygame_screen
c = colors.margin
s.fill(c, [0, 0, 800, tmargin])
s.fill(c, [0, 0, lmargin, 600])
s.fill(c, [0, 600 - bmargin, 800, bmargin])
s.fill(c, [800 - rmargin, 0, lmargin, 600])
def refresh(self, screen, cxy):
self.screen = screen
self.pygame_screen.fill(colors.bg,
[0, tmargin + self.cur_top + self.scroll,
800, 600])
self.paint_margin()
line_top = self.cur_top
width, height = self.fontsize
self.cxy = cxy
cp = self.char_pos(*cxy)
if cp[1] < tmargin:
self.scroll = - (cy*self.fh + self.cur_top)
self.repaint()
elif cp[1] + self.fh > 600 - bmargin:
self.scroll += (600 - bmargin) - (cp[1] + self.fh)
self.repaint()
if self.curs_vis:
self.pygame_screen.blit(self.cursor, self.char_pos(*cxy))
for line in screen:
if 0 <= line_top + self.scroll <= (600 - bmargin - tmargin - self.fh):
if line:
ren = self.font.render(line, 1, colors.fg)
self.pygame_screen.blit(ren, (lmargin,
tmargin + line_top + self.scroll))
line_top += self.fh
pygame.display.update()
def prepare(self):
self.cmd_buf = ''
self.k = self.keymap
self.height, self.width = self.getheightwidth()
self.curs_vis = 1
self.cur_top = self.pos[0]
self.event_queue = []
def restore(self):
pass
def blit_a_char(self, linen, charn):
line = self.screen[linen]
if charn < len(line):
text = self.font.render(line[charn], 1, colors.fg)
self.pygame_screen.blit(text, self.char_pos(charn, linen))
def move_cursor(self, x, y):
cp = self.char_pos(x, y)
if cp[1] < tmargin or cp[1] + self.fh > 600 - bmargin:
self.event_queue.append(Event('refresh', '', ''))
else:
if self.curs_vis:
cx, cy = self.cxy
self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy))
self.blit_a_char(cy, cx)
self.pygame_screen.blit(self.cursor, cp)
self.blit_a_char(y, x)
pygame.display.update()
self.cxy = (x, y)
def set_cursor_vis(self, vis):
self.curs_vis = vis
if vis:
self.move_cursor(*self.cxy)
else:
cx, cy = self.cxy
self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy))
self.blit_a_char(cy, cx)
pygame.display.update()
def getheightwidth(self):
"""Return (height, width) where height and width are the height
and width of the terminal window in characters."""
return ((600 - tmargin - bmargin)/self.fh,
(800 - lmargin - rmargin)/self.fw)
def tr_event(self, pyg_event):
shift = bool(pyg_event.mod & KMOD_SHIFT)
ctrl = bool(pyg_event.mod & KMOD_CTRL)
meta = bool(pyg_event.mod & (KMOD_ALT|KMOD_META))
try:
return self.k[(pyg_event.unicode, meta, ctrl)], pyg_event.unicode
except KeyError:
try:
return self.k[(pyg_event.key, meta, ctrl)], pyg_event.unicode
except KeyError:
return "invalid-key", pyg_event.unicode
def get_event(self, block=1):
"""Return an Event instance. Returns None if |block| is false
and there is no event pending, otherwise waits for the
completion of an event."""
while 1:
if self.event_queue:
return self.event_queue.pop(0)
elif block:
pyg_event = pygame.event.wait()
else:
pyg_event = pygame.event.poll()
if pyg_event.type == NOEVENT:
return
if pyg_event.key in modcolors:
continue
k, c = self.tr_event(pyg_event)
self.cmd_buf += c.encode('ascii', 'replace')
self.k = k
if not isinstance(k, types.DictType):
e = Event(k, self.cmd_buf, [])
self.k = self.keymap
self.cmd_buf = ''
return e
def beep(self):
# uhh, can't be bothered now.
# pygame.sound.something, I guess.
pass
def clear(self):
"""Wipe the screen"""
self.pygame_screen.fill(colors.bg)
#self.screen = []
self.pos = [0, 0]
self.grobs = []
self.cur_top = 0
self.scroll = 0
def finish(self):
"""Move the cursor to the end of the display and otherwise get
ready for end. XXX could be merged with restore? Hmm."""
if self.curs_vis:
cx, cy = self.cxy
self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy))
self.blit_a_char(cy, cx)
for line in self.screen:
self.write_line(line, 1)
if self.curs_vis:
self.pygame_screen.blit(self.cursor,
(lmargin + self.pos[1],
tmargin + self.pos[0] + self.scroll))
pygame.display.update()
def flushoutput(self):
"""Flush all output to the screen (assuming there's some
buffering going on somewhere)"""
# no buffering here, ma'am (though perhaps there should be!)
pass
def forgetinput(self):
"""Forget all pending, but not yet processed input."""
while pygame.event.poll().type <> NOEVENT:
pass
def getpending(self):
"""Return the characters that have been typed but not yet
processed."""
events = []
while 1:
event = pygame.event.poll()
if event.type == NOEVENT:
break
events.append(event)
return events
def wait(self):
"""Wait for an event."""
raise Exception, "erp!"
def repaint(self):
# perhaps we should consolidate grobs?
self.pygame_screen.fill(colors.bg)
self.paint_margin()
for (y, x), surf, text in self.grobs:
if surf and 0 < y + self.scroll:
self.pygame_screen.blit(surf, (lmargin + x,
tmargin + y + self.scroll))
pygame.display.update()
def write_line(self, line, ret):
charsleft = (self.width*self.fw - self.pos[1])/self.fw
while len(line) > charsleft:
self.write_line(line[:charsleft], 1)
line = line[charsleft:]
if line:
ren = self.font.render(line, 1, colors.fg, colors.bg)
self.grobs.append((self.pos[:], ren, line))
self.pygame_screen.blit(ren,
(lmargin + self.pos[1],
tmargin + self.pos[0] + self.scroll))
else:
self.grobs.append((self.pos[:], None, line))
if ret:
self.pos[0] += self.fh
if tmargin + self.pos[0] + self.scroll + self.fh > 600 - bmargin:
self.scroll = 600 - bmargin - self.pos[0] - self.fh - tmargin
self.repaint()
self.pos[1] = 0
else:
self.pos[1] += self.fw*len(line)
def write(self, text):
lines = text.split("\n")
if self.curs_vis:
self.pygame_screen.fill(colors.bg,
(lmargin + self.pos[1],
tmargin + self.pos[0] + self.scroll,
self.fw, self.fh))
for line in lines[:-1]:
self.write_line(line, 1)
self.write_line(lines[-1], 0)
if self.curs_vis:
self.pygame_screen.blit(self.cursor,
(lmargin + self.pos[1],
tmargin + self.pos[0] + self.scroll))
pygame.display.update()
def flush(self):
pass
| mit | 2,514,440,179,459,600,000 | 32.937677 | 84 | 0.541653 | false |
ROMFactory/android_external_chromium_org | build/android/symbolize.py | 97 | 2639 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Symbolizes stack traces generated by Chromium for Android.
Sample usage:
adb logcat chromium:V | symbolize.py
"""
import os
import re
import sys
from pylib import constants
# Uses symbol.py from third_party/android_platform, not python's.
sys.path.insert(0,
os.path.join(constants.DIR_SOURCE_ROOT,
'third_party/android_platform/development/scripts'))
import symbol
# Sample output from base/debug/stack_trace_android.cc
#00 0x693cd34f /path/to/some/libfoo.so+0x0007434f
TRACE_LINE = re.compile('(?P<frame>\#[0-9]+ 0x[0-9a-f]{8,8}) '
'(?P<lib>[^+]+)\+0x(?P<addr>[0-9a-f]{8,8})')
class Symbolizer(object):
def __init__(self, output):
self._output = output
def write(self, data):
while True:
match = re.search(TRACE_LINE, data)
if not match:
self._output.write(data)
break
frame = match.group('frame')
lib = match.group('lib')
addr = match.group('addr')
# TODO(scherkus): Doing a single lookup per line is pretty slow,
# especially with larger libraries. Consider caching strategies such as:
# 1) Have Python load the libraries and do symbol lookups instead of
# calling out to addr2line each time.
# 2) Have Python keep multiple addr2line instances open as subprocesses,
# piping addresses and reading back symbols as we find them
# 3) Read ahead the entire stack trace until we find no more, then batch
# the symbol lookups.
#
# TODO(scherkus): These results are memoized, which could result in
# incorrect lookups when running this script on long-lived instances
# (e.g., adb logcat) when doing incremental development. Consider clearing
# the cache when modification timestamp of libraries change.
sym = symbol.SymbolInformation(lib, addr, False)[0][0]
if not sym:
post = match.end('addr')
self._output.write(data[:post])
data = data[post:]
continue
pre = match.start('frame')
post = match.end('addr')
self._output.write(data[:pre])
self._output.write(frame)
self._output.write(' ')
self._output.write(sym)
data = data[post:]
def flush(self):
self._output.flush()
def main():
symbolizer = Symbolizer(sys.stdout)
for line in sys.stdin:
symbolizer.write(line)
symbolizer.flush()
if __name__ == '__main__':
main()
| bsd-3-clause | -1,194,287,657,845,490,700 | 28.988636 | 80 | 0.64532 | false |
dietrichc/streamline-ppc-reports | examples/dfp/v201411/custom_targeting_service/update_custom_targeting_keys.py | 4 | 2407 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates the display name of a single custom targeting key.
To determine which custom targeting keys exist, run
get_all_custom_targeting_keys_and_values.py."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
CUSTOM_TARGETING_KEY_ID = 'INSERT_CUSTOM_TARGETING_KEY_ID_HERE'
def main(client, key_id):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201411')
values = [{
'key': 'keyId',
'value': {
'xsi_type': 'NumberValue',
'value': key_id
}
}]
query = 'WHERE id = :keyId'
statement = dfp.FilterStatement(query, values, 1)
# Get custom targeting keys by statement.
response = custom_targeting_service.getCustomTargetingKeysByStatement(
statement.ToStatement())
# Update each local custom targeting key object by changing its display name.
if 'results' in response:
updated_keys = []
for key in response['results']:
if not key['displayName']:
key['displayName'] = key['name']
key['displayName'] += ' (Deprecated)'
updated_keys.append(key)
keys = custom_targeting_service.updateCustomTargetingKeys(updated_keys)
# Display results.
if keys:
for key in keys:
print ('Custom targeting key with id \'%s\', name \'%s\', display name '
'\'%s\', and type \'%s\' was updated.'
% (key['id'], key['name'], key['displayName'], key['type']))
else:
print 'No custom targeting keys were found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CUSTOM_TARGETING_KEY_ID)
| apache-2.0 | -7,608,391,753,555,470,000 | 32.430556 | 80 | 0.677607 | false |
ioam/holoviews | holoviews/tests/core/testcollation.py | 2 | 3718 | """
Test cases for Collator
"""
import itertools
import numpy as np
from holoviews.core import Collator, HoloMap, NdOverlay, Overlay, GridSpace
from holoviews.element import Curve
from holoviews.element.comparison import ComparisonTestCase
class TestCollation(ComparisonTestCase):
def setUp(self):
alphas, betas, deltas = 2, 2, 2
Bs = list(range(100))
coords = itertools.product(*(range(n) for n in [alphas, betas, deltas]))
mus=np.random.rand(alphas, betas, 100, 10)
self.phase_boundaries = {(a, b, d): Curve(zip(Bs, mus[a, b, :, i]*a+b))
for i in range(10) for a, b, d in coords}
self.dimensions = ['alpha', 'beta', 'delta']
self.nesting_hmap = HoloMap(self.phase_boundaries, kdims=self.dimensions)
self.nested_hmap = self.nesting_hmap.groupby(['alpha'])
self.nested_overlay = self.nesting_hmap.overlay(['delta'])
self.nested_grid = self.nested_overlay.grid(['alpha', 'beta'])
self.nested_layout = self.nested_overlay.layout(['alpha', 'beta'])
def test_collate_hmap(self):
collated = self.nested_hmap.collate()
self.assertEqual(collated.kdims, self.nesting_hmap.kdims)
self.assertEqual(collated.keys(), self.nesting_hmap.keys())
self.assertEqual(collated.type, self.nesting_hmap.type)
self.assertEqual(repr(collated), repr(self.nesting_hmap))
def test_collate_ndoverlay(self):
collated = self.nested_overlay.collate(NdOverlay)
ndoverlay = NdOverlay(self.phase_boundaries, kdims=self.dimensions)
self.assertEqual(collated.kdims, ndoverlay.kdims)
self.assertEqual(collated.keys(), ndoverlay.keys())
self.assertEqual(repr(collated), repr(ndoverlay))
def test_collate_gridspace_ndoverlay(self):
grid = self.nesting_hmap.groupby(['delta']).collate(NdOverlay).grid(['alpha', 'beta'])
self.assertEqual(grid.dimensions(), self.nested_grid.dimensions())
self.assertEqual(grid.keys(), self.nested_grid.keys())
self.assertEqual(repr(grid), repr(self.nested_grid))
def test_collate_ndlayout_ndoverlay(self):
layout = self.nesting_hmap.groupby(['delta']).collate(NdOverlay).layout(['alpha', 'beta'])
self.assertEqual(layout.dimensions(), self.nested_layout.dimensions())
self.assertEqual(layout.keys(), self.nested_layout.keys())
self.assertEqual(repr(layout), repr(self.nested_layout))
def test_collate_layout_overlay(self):
layout = self.nested_overlay + self.nested_overlay
collated = Collator(kdims=['alpha', 'beta'])
for k, v in self.nested_overlay.items():
collated[k] = v + v
collated = collated()
self.assertEqual(collated.dimensions(), layout.dimensions())
def test_collate_layout_hmap(self):
layout = self.nested_overlay + self.nested_overlay
collated = Collator(kdims=['delta'], merge_type=NdOverlay)
for k, v in self.nesting_hmap.groupby(['delta']).items():
collated[k] = v + v
collated = collated()
self.assertEqual(repr(collated), repr(layout))
self.assertEqual(collated.dimensions(), layout.dimensions())
def test_overlay_hmap_collate(self):
hmap = HoloMap({i: Curve(np.arange(10)*i) for i in range(3)})
overlaid = Overlay([hmap, hmap, hmap]).collate()
self.assertEqual(overlaid, hmap*hmap*hmap)
def test_overlay_gridspace_collate(self):
grid = GridSpace({(i,j): Curve(np.arange(10)*i) for i in range(3)
for j in range(3)})
overlaid = Overlay([grid, grid, grid]).collate()
self.assertEqual(overlaid, grid*grid*grid)
| bsd-3-clause | -3,597,647,010,628,444,000 | 46.063291 | 98 | 0.650888 | false |
abreen/socrates.py | blessed/tests/test_core.py | 1 | 14455 | # -*- coding: utf-8 -*-
"Core blessed Terminal() tests."
# std
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import collections
import warnings
import platform
import locale
import sys
import imp
import os
# local
from .accessories import (
as_subprocess,
TestTerminal,
unicode_cap,
all_terms
)
# 3rd party
import mock
import pytest
def test_export_only_Terminal():
"Ensure only Terminal instance is exported for import * statements."
import blessed
assert blessed.__all__ == ('Terminal',)
def test_null_location(all_terms):
"Make sure ``location()`` with no args just does position restoration."
@as_subprocess
def child(kind):
t = TestTerminal(stream=StringIO(), force_styling=True)
with t.location():
pass
expected_output = u''.join(
(unicode_cap('sc'), unicode_cap('rc')))
assert (t.stream.getvalue() == expected_output)
child(all_terms)
def test_flipped_location_move(all_terms):
"``location()`` and ``move()`` receive counter-example arguments."
@as_subprocess
def child(kind):
buf = StringIO()
t = TestTerminal(stream=buf, force_styling=True)
y, x = 10, 20
with t.location(y, x):
xy_val = t.move(x, y)
yx_val = buf.getvalue()[len(t.sc):]
assert xy_val == yx_val
child(all_terms)
def test_yield_keypad():
"Ensure ``keypad()`` writes keyboard_xmit and keyboard_local."
@as_subprocess
def child(kind):
# given,
t = TestTerminal(stream=StringIO(), force_styling=True)
expected_output = u''.join((t.smkx, t.rmkx))
# exercise,
with t.keypad():
pass
# verify.
assert (t.stream.getvalue() == expected_output)
child(kind='xterm')
def test_null_fileno():
"Make sure ``Terminal`` works when ``fileno`` is ``None``."
@as_subprocess
def child():
# This simulates piping output to another program.
out = StringIO()
out.fileno = None
t = TestTerminal(stream=out)
assert (t.save == u'')
child()
def test_number_of_colors_without_tty():
"``number_of_colors`` should return 0 when there's no tty."
@as_subprocess
def child_256_nostyle():
t = TestTerminal(stream=StringIO())
assert (t.number_of_colors == 0)
@as_subprocess
def child_256_forcestyle():
t = TestTerminal(stream=StringIO(), force_styling=True)
assert (t.number_of_colors == 256)
@as_subprocess
def child_8_forcestyle():
kind = 'ansi'
if platform.system().lower() == 'freebsd':
# 'ansi' on freebsd returns 0 colors, we use the 'cons25' driver,
# compatible with its kernel tty.c
kind = 'cons25'
t = TestTerminal(kind=kind, stream=StringIO(),
force_styling=True)
assert (t.number_of_colors == 8)
@as_subprocess
def child_0_forcestyle():
t = TestTerminal(kind='vt220', stream=StringIO(),
force_styling=True)
assert (t.number_of_colors == 0)
child_0_forcestyle()
child_8_forcestyle()
child_256_forcestyle()
child_256_nostyle()
def test_number_of_colors_with_tty():
"test ``number_of_colors`` 0, 8, and 256."
@as_subprocess
def child_256():
t = TestTerminal()
assert (t.number_of_colors == 256)
@as_subprocess
def child_8():
kind = 'ansi'
if platform.system().lower() == 'freebsd':
# 'ansi' on freebsd returns 0 colors, we use the 'cons25' driver,
# compatible with its kernel tty.c
kind = 'cons25'
t = TestTerminal(kind=kind)
assert (t.number_of_colors == 8)
@as_subprocess
def child_0():
t = TestTerminal(kind='vt220')
assert (t.number_of_colors == 0)
child_0()
child_8()
child_256()
def test_init_descriptor_always_initted(all_terms):
"Test height and width with non-tty Terminals."
@as_subprocess
def child(kind):
t = TestTerminal(kind=kind, stream=StringIO())
assert t._init_descriptor == sys.__stdout__.fileno()
assert (isinstance(t.height, int))
assert (isinstance(t.width, int))
assert t.height == t._height_and_width()[0]
assert t.width == t._height_and_width()[1]
child(all_terms)
def test_force_styling_none(all_terms):
"If ``force_styling=None`` is used, don't ever do styling."
@as_subprocess
def child(kind):
t = TestTerminal(kind=kind, force_styling=None)
assert (t.save == '')
assert (t.color(9) == '')
assert (t.bold('oi') == 'oi')
child(all_terms)
def test_setupterm_singleton_issue33():
"A warning is emitted if a new terminal ``kind`` is used per process."
@as_subprocess
def child():
warnings.filterwarnings("error", category=UserWarning)
# instantiate first terminal, of type xterm-256color
term = TestTerminal(force_styling=True)
try:
# a second instantiation raises UserWarning
term = TestTerminal(kind="vt220", force_styling=True)
except UserWarning:
err = sys.exc_info()[1]
assert (err.args[0].startswith(
'A terminal of kind "vt220" has been requested')
), err.args[0]
assert ('a terminal of kind "xterm-256color" will '
'continue to be returned' in err.args[0]), err.args[0]
else:
# unless term is not a tty and setupterm() is not called
assert not term.is_a_tty or False, 'Should have thrown exception'
warnings.resetwarnings()
child()
def test_setupterm_invalid_issue39():
"A warning is emitted if TERM is invalid."
# https://bugzilla.mozilla.org/show_bug.cgi?id=878089
#
# if TERM is unset, defaults to 'unknown', which should
# fail to lookup and emit a warning on *some* systems.
# freebsd actually has a termcap entry for 'unknown'
@as_subprocess
def child():
warnings.filterwarnings("error", category=UserWarning)
try:
term = TestTerminal(kind='unknown', force_styling=True)
except UserWarning:
err = sys.exc_info()[1]
assert err.args[0] == (
"Failed to setupterm(kind='unknown'): "
"setupterm: could not find terminal")
else:
if platform.system().lower() != 'freebsd':
assert not term.is_a_tty and not term.does_styling, (
'Should have thrown exception')
warnings.resetwarnings()
child()
def test_setupterm_invalid_has_no_styling():
"An unknown TERM type does not perform styling."
# https://bugzilla.mozilla.org/show_bug.cgi?id=878089
# if TERM is unset, defaults to 'unknown', which should
# fail to lookup and emit a warning, only.
@as_subprocess
def child():
warnings.filterwarnings("ignore", category=UserWarning)
term = TestTerminal(kind='xxXunknownXxx', force_styling=True)
assert term.kind is None
assert term.does_styling is False
assert term.number_of_colors == 0
warnings.resetwarnings()
child()
@pytest.mark.skipif(platform.python_implementation() == 'PyPy',
reason='PyPy freezes')
def test_missing_ordereddict_uses_module(monkeypatch):
"ordereddict module is imported when without collections.OrderedDict."
import blessed.keyboard
if hasattr(collections, 'OrderedDict'):
monkeypatch.delattr('collections.OrderedDict')
try:
imp.reload(blessed.keyboard)
except ImportError as err:
assert err.args[0] in ("No module named ordereddict", # py2
"No module named 'ordereddict'") # py3
sys.modules['ordereddict'] = mock.Mock()
sys.modules['ordereddict'].OrderedDict = -1
imp.reload(blessed.keyboard)
assert blessed.keyboard.OrderedDict == -1
del sys.modules['ordereddict']
monkeypatch.undo()
imp.reload(blessed.keyboard)
else:
assert platform.python_version_tuple() < ('2', '7') # reached by py2.6
@pytest.mark.skipif(platform.python_implementation() == 'PyPy',
reason='PyPy freezes')
def test_python3_2_raises_exception(monkeypatch):
"Test python version 3.0 through 3.2 raises an exception."
import blessed
monkeypatch.setattr('platform.python_version_tuple',
lambda: ('3', '2', '2'))
try:
imp.reload(blessed)
except ImportError as err:
assert err.args[0] == (
'Blessed needs Python 3.2.3 or greater for Python 3 '
'support due to http://bugs.python.org/issue10570.')
monkeypatch.undo()
imp.reload(blessed)
else:
assert False, 'Exception should have been raised'
def test_IOUnsupportedOperation_dummy(monkeypatch):
"Ensure dummy exception is used when io is without UnsupportedOperation."
import blessed.terminal
import io
if hasattr(io, 'UnsupportedOperation'):
monkeypatch.delattr('io.UnsupportedOperation')
imp.reload(blessed.terminal)
assert blessed.terminal.IOUnsupportedOperation.__doc__.startswith(
"A dummy exception to take the place of")
monkeypatch.undo()
imp.reload(blessed.terminal)
def test_without_dunder():
"Ensure dunder does not remain in module (py2x InterruptedError test."
import blessed.terminal
assert '_' not in dir(blessed.terminal)
def test_IOUnsupportedOperation():
"Ensure stream that throws IOUnsupportedOperation results in non-tty."
@as_subprocess
def child():
import blessed.terminal
def side_effect():
raise blessed.terminal.IOUnsupportedOperation
mock_stream = mock.Mock()
mock_stream.fileno = side_effect
term = TestTerminal(stream=mock_stream)
assert term.stream == mock_stream
assert term.does_styling is False
assert term.is_a_tty is False
assert term.number_of_colors is 0
child()
def test_winsize_IOError_returns_environ():
"""When _winsize raises IOError, defaults from os.environ given."""
@as_subprocess
def child():
def side_effect(fd):
raise IOError
term = TestTerminal()
term._winsize = side_effect
os.environ['COLUMNS'] = '1984'
os.environ['LINES'] = '1888'
assert term._height_and_width() == (1888, 1984, None, None)
child()
def test_yield_fullscreen(all_terms):
"Ensure ``fullscreen()`` writes enter_fullscreen and exit_fullscreen."
@as_subprocess
def child(kind):
t = TestTerminal(stream=StringIO(), force_styling=True)
t.enter_fullscreen = u'BEGIN'
t.exit_fullscreen = u'END'
with t.fullscreen():
pass
expected_output = u''.join((t.enter_fullscreen, t.exit_fullscreen))
assert (t.stream.getvalue() == expected_output)
child(all_terms)
def test_yield_hidden_cursor(all_terms):
"Ensure ``hidden_cursor()`` writes hide_cursor and normal_cursor."
@as_subprocess
def child(kind):
t = TestTerminal(stream=StringIO(), force_styling=True)
t.hide_cursor = u'BEGIN'
t.normal_cursor = u'END'
with t.hidden_cursor():
pass
expected_output = u''.join((t.hide_cursor, t.normal_cursor))
assert (t.stream.getvalue() == expected_output)
child(all_terms)
def test_no_preferredencoding_fallback_ascii():
"Ensure empty preferredencoding value defaults to ascii."
@as_subprocess
def child():
with mock.patch('locale.getpreferredencoding') as get_enc:
get_enc.return_value = u''
t = TestTerminal()
assert t._encoding == 'ascii'
child()
def test_unknown_preferredencoding_warned_and_fallback_ascii():
"Ensure a locale without a codecs incrementaldecoder emits a warning."
@as_subprocess
def child():
with mock.patch('locale.getpreferredencoding') as get_enc:
with warnings.catch_warnings(record=True) as warned:
get_enc.return_value = '---unknown--encoding---'
t = TestTerminal()
assert t._encoding == 'ascii'
assert len(warned) == 1
assert issubclass(warned[-1].category, UserWarning)
assert "fallback to ASCII" in str(warned[-1].message)
child()
def test_win32_missing_tty_modules(monkeypatch):
"Ensure dummy exception is used when io is without UnsupportedOperation."
@as_subprocess
def child():
OLD_STYLE = False
try:
original_import = getattr(__builtins__, '__import__')
OLD_STYLE = True
except AttributeError:
original_import = __builtins__['__import__']
tty_modules = ('termios', 'fcntl', 'tty')
def __import__(name, *args, **kwargs):
if name in tty_modules:
raise ImportError
return original_import(name, *args, **kwargs)
for module in tty_modules:
sys.modules.pop(module, None)
warnings.filterwarnings("error", category=UserWarning)
try:
if OLD_STYLE:
__builtins__.__import__ = __import__
else:
__builtins__['__import__'] = __import__
try:
import blessed.terminal
imp.reload(blessed.terminal)
except UserWarning:
err = sys.exc_info()[1]
assert err.args[0] == blessed.terminal.msg_nosupport
warnings.filterwarnings("ignore", category=UserWarning)
import blessed.terminal
imp.reload(blessed.terminal)
assert blessed.terminal.HAS_TTY is False
term = blessed.terminal.Terminal('ansi')
assert term.height == 24
assert term.width == 80
finally:
if OLD_STYLE:
setattr(__builtins__, '__import__', original_import)
else:
__builtins__['__import__'] = original_import
warnings.resetwarnings()
import blessed.terminal
imp.reload(blessed.terminal)
child()
| gpl-2.0 | -4,714,773,379,512,285,000 | 29.820896 | 79 | 0.60166 | false |
IRI-Research/django | tests/managers_regress/tests.py | 62 | 8228 | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.utils.encoding import force_text
from .models import (
Child1,
Child2,
Child3,
Child4,
Child5,
Child6,
Child7,
AbstractBase1,
AbstractBase2,
AbstractBase3,
RelatedModel,
RelationModel,
)
class ManagersRegressionTests(TestCase):
def test_managers(self):
Child1.objects.create(name='fred', data='a1')
Child1.objects.create(name='barney', data='a2')
Child2.objects.create(name='fred', data='b1', value=1)
Child2.objects.create(name='barney', data='b2', value=42)
Child3.objects.create(name='fred', data='c1', comment='yes')
Child3.objects.create(name='barney', data='c2', comment='no')
Child4.objects.create(name='fred', data='d1')
Child4.objects.create(name='barney', data='d2')
Child5.objects.create(name='fred', comment='yes')
Child5.objects.create(name='barney', comment='no')
Child6.objects.create(name='fred', data='f1', value=42)
Child6.objects.create(name='barney', data='f2', value=42)
Child7.objects.create(name='fred')
Child7.objects.create(name='barney')
self.assertQuerysetEqual(Child1.manager1.all(), ["<Child1: a1>"])
self.assertQuerysetEqual(Child1.manager2.all(), ["<Child1: a2>"])
self.assertQuerysetEqual(Child1._default_manager.all(), ["<Child1: a1>"])
self.assertQuerysetEqual(Child2._default_manager.all(), ["<Child2: b1>"])
self.assertQuerysetEqual(Child2.restricted.all(), ["<Child2: b2>"])
self.assertQuerysetEqual(Child3._default_manager.all(), ["<Child3: c1>"])
self.assertQuerysetEqual(Child3.manager1.all(), ["<Child3: c1>"])
self.assertQuerysetEqual(Child3.manager2.all(), ["<Child3: c2>"])
# Since Child6 inherits from Child4, the corresponding rows from f1 and
# f2 also appear here. This is the expected result.
self.assertQuerysetEqual(Child4._default_manager.order_by('data'), [
"<Child4: d1>",
"<Child4: d2>",
"<Child4: f1>",
"<Child4: f2>"
]
)
self.assertQuerysetEqual(Child4.manager1.all(), [
"<Child4: d1>",
"<Child4: f1>"
],
ordered=False
)
self.assertQuerysetEqual(Child5._default_manager.all(), ["<Child5: fred>"])
self.assertQuerysetEqual(Child6._default_manager.all(), ["<Child6: f1>"])
self.assertQuerysetEqual(Child7._default_manager.order_by('name'), [
"<Child7: barney>",
"<Child7: fred>"
]
)
def test_abstract_manager(self):
# Accessing the manager on an abstract model should
# raise an attribute error with an appropriate message.
try:
AbstractBase3.objects.all()
self.fail('Should raise an AttributeError')
except AttributeError as e:
# This error message isn't ideal, but if the model is abstract and
# a lot of the class instantiation logic isn't invoked; if the
# manager is implied, then we don't get a hook to install the
# error-raising manager.
self.assertEqual(str(e), "type object 'AbstractBase3' has no attribute 'objects'")
def test_custom_abstract_manager(self):
# Accessing the manager on an abstract model with an custom
# manager should raise an attribute error with an appropriate
# message.
try:
AbstractBase2.restricted.all()
self.fail('Should raise an AttributeError')
except AttributeError as e:
self.assertEqual(str(e), "Manager isn't available; AbstractBase2 is abstract")
def test_explicit_abstract_manager(self):
# Accessing the manager on an abstract model with an explicit
# manager should raise an attribute error with an appropriate
# message.
try:
AbstractBase1.objects.all()
self.fail('Should raise an AttributeError')
except AttributeError as e:
self.assertEqual(str(e), "Manager isn't available; AbstractBase1 is abstract")
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
def test_swappable_manager(self):
# The models need to be removed after the test in order to prevent bad
# interactions with the flush operation in other tests.
_old_models = apps.app_configs['managers_regress'].models.copy()
try:
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model should
# raise an attribute error with a helpful message
try:
SwappableModel.objects.all()
self.fail('Should raise an AttributeError')
except AttributeError as e:
self.assertEqual(str(e), "Manager isn't available; SwappableModel has been swapped for 'managers_regress.Parent'")
finally:
apps.app_configs['managers_regress'].models = _old_models
apps.all_models['managers_regress'] = _old_models
apps.clear_cache()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
def test_custom_swappable_manager(self):
# The models need to be removed after the test in order to prevent bad
# interactions with the flush operation in other tests.
_old_models = apps.app_configs['managers_regress'].models.copy()
try:
class SwappableModel(models.Model):
stuff = models.Manager()
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model with an
# explicit manager should raise an attribute error with a
# helpful message
try:
SwappableModel.stuff.all()
self.fail('Should raise an AttributeError')
except AttributeError as e:
self.assertEqual(str(e), "Manager isn't available; SwappableModel has been swapped for 'managers_regress.Parent'")
finally:
apps.app_configs['managers_regress'].models = _old_models
apps.all_models['managers_regress'] = _old_models
apps.clear_cache()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
def test_explicit_swappable_manager(self):
# The models need to be removed after the test in order to prevent bad
# interactions with the flush operation in other tests.
_old_models = apps.app_configs['managers_regress'].models.copy()
try:
class SwappableModel(models.Model):
objects = models.Manager()
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model with an
# explicit manager should raise an attribute error with a
# helpful message
try:
SwappableModel.objects.all()
self.fail('Should raise an AttributeError')
except AttributeError as e:
self.assertEqual(str(e), "Manager isn't available; SwappableModel has been swapped for 'managers_regress.Parent'")
finally:
apps.app_configs['managers_regress'].models = _old_models
apps.all_models['managers_regress'] = _old_models
apps.clear_cache()
def test_regress_3871(self):
related = RelatedModel.objects.create()
relation = RelationModel()
relation.fk = related
relation.gfk = related
relation.save()
relation.m2m.add(related)
t = Template('{{ related.test_fk.all.0 }}{{ related.test_gfk.all.0 }}{{ related.test_m2m.all.0 }}')
self.assertEqual(
t.render(Context({'related': related})),
''.join([force_text(relation.pk)] * 3),
)
| bsd-3-clause | 3,979,251,184,667,779,000 | 39.53202 | 130 | 0.616796 | false |
zhoulingjun/zipline | zipline/utils/security_list.py | 18 | 4472 | from datetime import datetime
from os import listdir
import os.path
import pandas as pd
import pytz
import zipline
from zipline.finance.trading import with_environment
DATE_FORMAT = "%Y%m%d"
zipline_dir = os.path.dirname(zipline.__file__)
SECURITY_LISTS_DIR = os.path.join(zipline_dir, 'resources', 'security_lists')
class SecurityList(object):
def __init__(self, data, current_date_func):
"""
data: a nested dictionary:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': []}, delete: [symbol list]}
current_date_func: function taking no parameters, returning
current datetime
"""
self.data = data
self._cache = {}
self._knowledge_dates = self.make_knowledge_dates(self.data)
self.current_date = current_date_func
self.count = 0
self._current_set = set()
def make_knowledge_dates(self, data):
knowledge_dates = sorted(
[pd.Timestamp(k) for k in data.keys()])
return knowledge_dates
def __iter__(self):
return iter(self.restricted_list)
def __contains__(self, item):
return item in self.restricted_list
@property
def restricted_list(self):
cd = self.current_date()
for kd in self._knowledge_dates:
if cd < kd:
break
if kd in self._cache:
self._current_set = self._cache[kd]
continue
for effective_date, changes in iter(self.data[kd].items()):
self.update_current(
effective_date,
changes['add'],
self._current_set.add
)
self.update_current(
effective_date,
changes['delete'],
self._current_set.remove
)
self._cache[kd] = self._current_set
return self._current_set
@with_environment()
def update_current(self, effective_date, symbols, change_func, env=None):
for symbol in symbols:
asset = env.asset_finder.lookup_symbol(
symbol,
as_of_date=effective_date
)
# Pass if no Asset exists for the symbol
if asset is None:
continue
change_func(asset.sid)
class SecurityListSet(object):
# provide a cut point to substitute other security
# list implementations.
security_list_type = SecurityList
def __init__(self, current_date_func):
self.current_date_func = current_date_func
self._leveraged_etf = None
@property
def leveraged_etf_list(self):
if self._leveraged_etf is None:
self._leveraged_etf = self.security_list_type(
load_from_directory('leveraged_etf_list'),
self.current_date_func
)
return self._leveraged_etf
def load_from_directory(list_name):
"""
To resolve the symbol in the LEVERAGED_ETF list,
the date on which the symbol was in effect is needed.
Furthermore, to maintain a point in time record of our own maintenance
of the restricted list, we need a knowledge date. Thus, restricted lists
are dictionaries of datetime->symbol lists.
new symbols should be entered as a new knowledge date entry.
This method assumes a directory structure of:
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt
The return value is a dictionary with:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': [symbol list]}
"""
data = {}
dir_path = os.path.join(SECURITY_LISTS_DIR, list_name)
for kd_name in listdir(dir_path):
kd = datetime.strptime(kd_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd] = {}
kd_path = os.path.join(dir_path, kd_name)
for ld_name in listdir(kd_path):
ld = datetime.strptime(ld_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd][ld] = {}
ld_path = os.path.join(kd_path, ld_name)
for fname in listdir(ld_path):
fpath = os.path.join(ld_path, fname)
with open(fpath) as f:
symbols = f.read().splitlines()
data[kd][ld][fname] = symbols
return data
| apache-2.0 | -428,562,606,813,553,800 | 31.172662 | 77 | 0.581172 | false |
AOSPU/external_chromium_org | build/android/gyp/finalize_apk.py | 9 | 1864 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Signs and zipaligns APK.
"""
import optparse
import shutil
import sys
import tempfile
from util import build_utils
def SignApk(key_path, key_name, key_passwd, unsigned_path, signed_path):
shutil.copy(unsigned_path, signed_path)
sign_cmd = [
'jarsigner',
'-sigalg', 'MD5withRSA',
'-digestalg', 'SHA1',
'-keystore', key_path,
'-storepass', key_passwd,
signed_path,
key_name,
]
build_utils.CheckOutput(sign_cmd)
def AlignApk(zipalign_path, unaligned_path, final_path):
align_cmd = [
zipalign_path,
'-f', '4', # 4 bytes
unaligned_path,
final_path,
]
build_utils.CheckOutput(align_cmd)
def main():
parser = optparse.OptionParser()
parser.add_option('--zipalign-path', help='Path to the zipalign tool.')
parser.add_option('--unsigned-apk-path', help='Path to input unsigned APK.')
parser.add_option('--final-apk-path',
help='Path to output signed and aligned APK.')
parser.add_option('--key-path', help='Path to keystore for signing.')
parser.add_option('--key-passwd', help='Keystore password')
parser.add_option('--key-name', help='Keystore name')
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args()
with tempfile.NamedTemporaryFile() as intermediate_file:
signed_apk_path = intermediate_file.name
SignApk(options.key_path, options.key_name, options.key_passwd,
options.unsigned_apk_path, signed_apk_path)
AlignApk(options.zipalign_path, signed_apk_path, options.final_apk_path)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 6,539,404,240,537,068,000 | 26.411765 | 78 | 0.67382 | false |
lamby/django-cache-toolbox | cache_toolbox/templatetags/cache_toolbox.py | 1 | 2014 | from django import template
from django.core.cache import cache
from django.template import Node, TemplateSyntaxError, Variable
register = template.Library()
class CacheNode(Node):
def __init__(self, nodelist, expire_time, key):
self.nodelist = nodelist
self.expire_time = Variable(expire_time)
self.key = Variable(key)
def render(self, context):
key = self.key.resolve(context)
expire_time = int(self.expire_time.resolve(context))
value = cache.get(key)
if value is None:
value = self.nodelist.render(context)
cache.set(key, value, expire_time)
return value
@register.tag
def cachedeterministic(parser, token):
"""
This will cache the contents of a template fragment for a given amount of
time, just like {% cache .. %} except that the key is deterministic and not
mangled or run through MD5.
Usage::
{% cachedeterministic [expire_time] [key] %}
.. some expensive processing ..
{% endcachedeterministic %}
"""
nodelist = parser.parse(("endcachedeterministic",))
parser.delete_first_token()
tokens = token.contents.split()
if len(tokens) != 3:
raise TemplateSyntaxError(u"'%r' tag requires 2 arguments." % tokens[0])
return CacheNode(nodelist, tokens[1], tokens[2])
class ShowIfCachedNode(Node):
def __init__(self, key):
self.key = Variable(key)
def render(self, context):
key = self.key.resolve(context)
return cache.get(key) or ""
@register.tag
def showifcached(parser, token):
"""
Show content if it exists in the cache, otherwise display nothing.
The key is entirely deterministic and not mangled or run through MD5 (cf.
{% cache %})
Usage::
{% showifcached [key] %}
"""
tokens = token.contents.split()
if len(tokens) != 2:
raise TemplateSyntaxError(u"'%r' tag requires 1 argument." % tokens[0])
return ShowIfCachedNode(tokens[1])
| bsd-3-clause | -3,891,380,380,396,165,600 | 26.972222 | 80 | 0.642502 | false |
anant-dev/django | tests/forms_tests/models.py | 261 | 3805 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import itertools
import tempfile
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
callable_default_counter = itertools.count()
def callable_default():
return next(callable_default_counter)
temp_storage = FileSystemStorage(location=tempfile.mkdtemp())
class BoundaryModel(models.Model):
positive_integer = models.PositiveIntegerField(null=True, blank=True)
class Defaults(models.Model):
name = models.CharField(max_length=255, default='class default value')
def_date = models.DateField(default=datetime.date(1980, 1, 1))
value = models.IntegerField(default=42)
callable_default = models.IntegerField(default=callable_default)
class ChoiceModel(models.Model):
"""For ModelChoiceField and ModelMultipleChoiceField tests."""
CHOICES = [
('', 'No Preference'),
('f', 'Foo'),
('b', 'Bar'),
]
INTEGER_CHOICES = [
(None, 'No Preference'),
(1, 'Foo'),
(2, 'Bar'),
]
STRING_CHOICES_WITH_NONE = [
(None, 'No Preference'),
('f', 'Foo'),
('b', 'Bar'),
]
name = models.CharField(max_length=10)
choice = models.CharField(max_length=2, blank=True, choices=CHOICES)
choice_string_w_none = models.CharField(
max_length=2, blank=True, null=True, choices=STRING_CHOICES_WITH_NONE)
choice_integer = models.IntegerField(choices=INTEGER_CHOICES, blank=True,
null=True)
@python_2_unicode_compatible
class ChoiceOptionModel(models.Model):
"""Destination for ChoiceFieldModel's ForeignKey.
Can't reuse ChoiceModel because error_message tests require that it have no instances."""
name = models.CharField(max_length=10)
class Meta:
ordering = ('name',)
def __str__(self):
return 'ChoiceOption %d' % self.pk
def choice_default():
return ChoiceOptionModel.objects.get_or_create(name='default')[0].pk
def choice_default_list():
return [choice_default()]
def int_default():
return 1
def int_list_default():
return [1]
class ChoiceFieldModel(models.Model):
"""Model with ForeignKey to another model, for testing ModelForm
generation with ModelChoiceField."""
choice = models.ForeignKey(
ChoiceOptionModel,
models.CASCADE,
blank=False,
default=choice_default,
)
choice_int = models.ForeignKey(
ChoiceOptionModel,
models.CASCADE,
blank=False,
related_name='choice_int',
default=int_default,
)
multi_choice = models.ManyToManyField(
ChoiceOptionModel,
blank=False,
related_name='multi_choice',
default=choice_default_list,
)
multi_choice_int = models.ManyToManyField(
ChoiceOptionModel,
blank=False,
related_name='multi_choice_int',
default=int_list_default,
)
class OptionalMultiChoiceModel(models.Model):
multi_choice = models.ManyToManyField(
ChoiceOptionModel,
blank=False,
related_name='not_relevant',
default=choice_default,
)
multi_choice_optional = models.ManyToManyField(
ChoiceOptionModel,
blank=True,
related_name='not_relevant2',
)
class FileModel(models.Model):
file = models.FileField(storage=temp_storage, upload_to='tests')
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return '%s' % self.name
class Cheese(models.Model):
name = models.CharField(max_length=100)
class Article(models.Model):
content = models.TextField()
| bsd-3-clause | 3,677,169,001,037,952,500 | 24.198675 | 93 | 0.656767 | false |
joedeller/pymine | helloworld.py | 1 | 3234 | #! /usr/bin/python
# Joe Deller 2014
# Our first Minecraft program written in the Python language
# Level : Beginner
# Uses : Libraries
# When learning any programming language there is a tradition of writing
# your first program to simply say "Hello World!"
# The very first line of this program tells the Raspberry Pi we are
# running a python program.
# The first thing we need to do is tell our program about the
# Minecraft Library and to find the Minecraft Manual in that library
# Without this, our program won't know anything about Minecraft
# Most languages can use libraries as a way of extending the things
# they can do. They let us reuse other peoples hard work
# so that we don't have to redo it ourselves.
# Some libraries contain lots of information, some only a little
# On the Raspberry pi, the Minecraft library is fairly small
# but has enough that we can do lots of things
# The people that write Minecraft wrote a special version for the Raspberry Pi
# and a library that lets us do things with it.
# import is a Python language keyword. It tells Python to do a very
# specific job. In this case to find the Minecraft library
import mcpi.minecraft as minecraft
# Now we have found our Minecraft instruction manual
# we are going to look for the part that tells us how to control minecraft
# Then we make something called an object, in this case our object
# is a bit like a Smart TV remote control
# We also give it a nickname as any easy way of remembering which remote
# we mean. In this case, we've called the remote "mc"
mc = minecraft.Minecraft.create()
# Just as a remote control has lots of buttons that do things
# our Minecraft remote control is very similar, except we call the buttons
# "methods"
# When we want to do something, we press the right button and Minecraft will do something
# Much like a smart TV remote searching for a YouTube video, we sometimes type something before
# pressing another button on the remote.
# The button (method) we are going to press is the postToChat button
# This will show us a message in Minecraft
# but before we press it, we need to decide what to say
# Just like writing a story, we use speech marks to enclose our message
# That way the program knows exactly where our message starts and stops
# You might have noticed, but most of the program so far has a #
# at the start of the line
# This tells the computer that the line is a comment for a human to read
# and it will ignore it, except in very special cases
# Good comments can help other people understand your program
# They can help remind you what your program does
# Bad comments can help confuse them
# Enough already, lets do something!
mc.postToChat("Hello World!")
# Notice the round brackets ()
# This tells the program that everything inside them is meant for the postToChat
# button
# It is a bit like an envelope when you write a letter.
# You put your letter inside the envelope and then post it.
# In this first program, we only send one piece of information
# but as we start to do more complex things,
# some buttons need lots of information before they will work
# This program only has three lines that actually do anything.
# The other 71 are comments like this. | mit | -442,942,174,478,550,900 | 41.012987 | 95 | 0.767471 | false |
devendermishrajio/nova_test_latest | nova/tests/unit/volume/test_cinder.py | 43 | 16898 | # Copyright 2013 Mirantis, Inc.
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import exceptions as cinder_exception
import mock
from nova import context
from nova import exception
from nova import test
from nova.volume import cinder
class FakeCinderClient(object):
class Volumes(object):
def get(self, volume_id):
return {'id': volume_id}
def list(self, detailed, search_opts=None):
if search_opts is not None and 'id' in search_opts:
return [{'id': search_opts['id']}]
else:
return [{'id': 'id1'}, {'id': 'id2'}]
def create(self, *args, **kwargs):
return {'id': 'created_id'}
def __getattr__(self, item):
return None
def __init__(self):
self.volumes = self.Volumes()
self.volume_snapshots = self.volumes
class FakeVolume(object):
def __init__(self, dict=dict()):
self.id = dict.get('id') or '1234'
self.status = dict.get('status') or 'available'
self.size = dict.get('size') or 1
self.availability_zone = dict.get('availability_zone') or 'cinder'
self.created_at = dict.get('created_at')
self.attach_time = dict.get('attach_time')
self.mountpoint = dict.get('mountpoint')
self.display_name = dict.get('display_name') or 'volume-' + self.id
self.display_description = dict.get('display_description') or 'fake'
self.volume_type_id = dict.get('volume_type_id')
self.snapshot_id = dict.get('snapshot_id')
self.metadata = dict.get('volume_metadata') or {}
class CinderApiTestCase(test.NoDBTestCase):
def setUp(self):
super(CinderApiTestCase, self).setUp()
self.api = cinder.API()
self.cinderclient = FakeCinderClient()
self.ctx = context.get_admin_context()
self.mox.StubOutWithMock(cinder, 'cinderclient')
self.mox.StubOutWithMock(cinder, '_untranslate_volume_summary_view')
self.mox.StubOutWithMock(cinder, '_untranslate_snapshot_summary_view')
self.mox.StubOutWithMock(cinder, 'get_cinder_client_version')
def test_get(self):
volume_id = 'volume_id1'
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_volume_summary_view(self.ctx, {'id': 'volume_id1'})
self.mox.ReplayAll()
self.api.get(self.ctx, volume_id)
def test_get_failed(self):
volume_id = 'volume_id'
cinder.cinderclient(self.ctx).AndRaise(cinder_exception.NotFound(''))
cinder.cinderclient(self.ctx).AndRaise(cinder_exception.BadRequest(''))
cinder.cinderclient(self.ctx).AndRaise(
cinder_exception.ConnectionError(''))
self.mox.ReplayAll()
self.assertRaises(exception.VolumeNotFound,
self.api.get, self.ctx, volume_id)
self.assertRaises(exception.InvalidInput,
self.api.get, self.ctx, volume_id)
self.assertRaises(exception.CinderConnectionFailed,
self.api.get, self.ctx, volume_id)
def test_create(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_volume_summary_view(self.ctx, {'id': 'created_id'})
self.mox.ReplayAll()
self.api.create(self.ctx, 1, '', '')
@mock.patch('nova.volume.cinder.cinderclient')
def test_create_failed(self, mock_cinderclient):
mock_cinderclient.return_value.volumes.create.side_effect = (
cinder_exception.BadRequest(''))
self.assertRaises(exception.InvalidInput,
self.api.create, self.ctx, 1, '', '')
@mock.patch('nova.volume.cinder.cinderclient')
def test_create_over_quota_failed(self, mock_cinderclient):
mock_cinderclient.return_value.volumes.create.side_effect = (
cinder_exception.OverLimit(413))
self.assertRaises(exception.OverQuota, self.api.create, self.ctx,
1, '', '')
mock_cinderclient.return_value.volumes.create.assert_called_once_with(
1, user_id=None, imageRef=None, availability_zone=None,
volume_type=None, description='', snapshot_id=None, name='',
project_id=None, metadata=None)
def test_get_all(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_volume_summary_view(self.ctx,
{'id': 'id1'}).AndReturn('id1')
cinder._untranslate_volume_summary_view(self.ctx,
{'id': 'id2'}).AndReturn('id2')
self.mox.ReplayAll()
self.assertEqual(['id1', 'id2'], self.api.get_all(self.ctx))
def test_get_all_with_search(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_volume_summary_view(self.ctx,
{'id': 'id1'}).AndReturn('id1')
self.mox.ReplayAll()
self.assertEqual(['id1'], self.api.get_all(self.ctx,
search_opts={'id': 'id1'}))
def test_check_attach_volume_status_error(self):
volume = {'id': 'fake', 'status': 'error'}
self.assertRaises(exception.InvalidVolume,
self.api.check_attach, self.ctx, volume)
def test_check_attach_volume_already_attached(self):
volume = {'id': 'fake', 'status': 'available'}
volume['attach_status'] = "attached"
self.assertRaises(exception.InvalidVolume,
self.api.check_attach, self.ctx, volume)
def test_check_attach_availability_zone_differs(self):
volume = {'id': 'fake', 'status': 'available'}
volume['attach_status'] = "detached"
instance = {'id': 'fake',
'availability_zone': 'zone1', 'host': 'fakehost'}
with mock.patch.object(cinder.az, 'get_instance_availability_zone',
side_effect=lambda context,
instance: 'zone1') as mock_get_instance_az:
cinder.CONF.set_override('cross_az_attach', False, group='cinder')
volume['availability_zone'] = 'zone1'
self.assertIsNone(self.api.check_attach(self.ctx,
volume, instance))
mock_get_instance_az.assert_called_once_with(self.ctx, instance)
mock_get_instance_az.reset_mock()
volume['availability_zone'] = 'zone2'
self.assertRaises(exception.InvalidVolume,
self.api.check_attach, self.ctx, volume, instance)
mock_get_instance_az.assert_called_once_with(self.ctx, instance)
mock_get_instance_az.reset_mock()
del instance['host']
volume['availability_zone'] = 'zone1'
self.assertIsNone(self.api.check_attach(
self.ctx, volume, instance))
self.assertFalse(mock_get_instance_az.called)
volume['availability_zone'] = 'zone2'
self.assertRaises(exception.InvalidVolume,
self.api.check_attach, self.ctx, volume, instance)
self.assertFalse(mock_get_instance_az.called)
cinder.CONF.reset()
def test_check_attach(self):
volume = {'status': 'available'}
volume['attach_status'] = "detached"
volume['availability_zone'] = 'zone1'
instance = {'availability_zone': 'zone1', 'host': 'fakehost'}
cinder.CONF.set_override('cross_az_attach', False, group='cinder')
with mock.patch.object(cinder.az, 'get_instance_availability_zone',
side_effect=lambda context, instance: 'zone1'):
self.assertIsNone(self.api.check_attach(
self.ctx, volume, instance))
cinder.CONF.reset()
def test_check_detach(self):
volume = {'id': 'fake', 'status': 'available'}
self.assertRaises(exception.InvalidVolume,
self.api.check_detach, self.ctx, volume)
volume['status'] = 'non-available'
self.assertIsNone(self.api.check_detach(self.ctx, volume))
def test_reserve_volume(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'reserve',
use_mock_anything=True)
self.cinderclient.volumes.reserve('id1')
self.mox.ReplayAll()
self.api.reserve_volume(self.ctx, 'id1')
def test_unreserve_volume(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'unreserve',
use_mock_anything=True)
self.cinderclient.volumes.unreserve('id1')
self.mox.ReplayAll()
self.api.unreserve_volume(self.ctx, 'id1')
def test_begin_detaching(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'begin_detaching',
use_mock_anything=True)
self.cinderclient.volumes.begin_detaching('id1')
self.mox.ReplayAll()
self.api.begin_detaching(self.ctx, 'id1')
def test_roll_detaching(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'roll_detaching',
use_mock_anything=True)
self.cinderclient.volumes.roll_detaching('id1')
self.mox.ReplayAll()
self.api.roll_detaching(self.ctx, 'id1')
@mock.patch('nova.volume.cinder.cinderclient')
def test_attach(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.attach(self.ctx, 'id1', 'uuid', 'point')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point',
mode='rw')
@mock.patch('nova.volume.cinder.cinderclient')
def test_attach_with_mode(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.attach(self.ctx, 'id1', 'uuid', 'point', mode='ro')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point',
mode='ro')
def test_detach(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'detach',
use_mock_anything=True)
self.cinderclient.volumes.detach('id1')
self.mox.ReplayAll()
self.api.detach(self.ctx, 'id1')
def test_initialize_connection(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'initialize_connection',
use_mock_anything=True)
self.cinderclient.volumes.initialize_connection('id1', 'connector')
self.mox.ReplayAll()
self.api.initialize_connection(self.ctx, 'id1', 'connector')
def test_terminate_connection(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'terminate_connection',
use_mock_anything=True)
self.cinderclient.volumes.terminate_connection('id1', 'connector')
self.mox.ReplayAll()
self.api.terminate_connection(self.ctx, 'id1', 'connector')
def test_delete(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'delete',
use_mock_anything=True)
self.cinderclient.volumes.delete('id1')
self.mox.ReplayAll()
self.api.delete(self.ctx, 'id1')
def test_update(self):
self.assertRaises(NotImplementedError,
self.api.update, self.ctx, '', '')
def test_get_snapshot(self):
snapshot_id = 'snapshot_id'
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_snapshot_summary_view(self.ctx,
{'id': snapshot_id})
self.mox.ReplayAll()
self.api.get_snapshot(self.ctx, snapshot_id)
def test_get_snapshot_failed(self):
snapshot_id = 'snapshot_id'
cinder.cinderclient(self.ctx).AndRaise(cinder_exception.NotFound(''))
cinder.cinderclient(self.ctx).AndRaise(
cinder_exception.ConnectionError(''))
self.mox.ReplayAll()
self.assertRaises(exception.SnapshotNotFound,
self.api.get_snapshot, self.ctx, snapshot_id)
self.assertRaises(exception.CinderConnectionFailed,
self.api.get_snapshot, self.ctx, snapshot_id)
def test_get_all_snapshots(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_snapshot_summary_view(self.ctx,
{'id': 'id1'}).AndReturn('id1')
cinder._untranslate_snapshot_summary_view(self.ctx,
{'id': 'id2'}).AndReturn('id2')
self.mox.ReplayAll()
self.assertEqual(['id1', 'id2'], self.api.get_all_snapshots(self.ctx))
def test_create_snapshot(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_snapshot_summary_view(self.ctx,
{'id': 'created_id'})
self.mox.ReplayAll()
self.api.create_snapshot(self.ctx, {'id': 'id1'}, '', '')
def test_create_force(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_snapshot_summary_view(self.ctx,
{'id': 'created_id'})
self.mox.ReplayAll()
self.api.create_snapshot_force(self.ctx, {'id': 'id1'}, '', '')
def test_delete_snapshot(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volume_snapshots,
'delete',
use_mock_anything=True)
self.cinderclient.volume_snapshots.delete('id1')
self.mox.ReplayAll()
self.api.delete_snapshot(self.ctx, 'id1')
def test_update_snapshot_status(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volume_snapshots,
'update_snapshot_status',
use_mock_anything=True)
self.cinderclient.volume_snapshots.update_snapshot_status(
'id1', {'status': 'error', 'progress': '90%'})
self.mox.ReplayAll()
self.api.update_snapshot_status(self.ctx, 'id1', 'error')
def test_get_volume_encryption_metadata(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'get_encryption_metadata',
use_mock_anything=True)
self.cinderclient.volumes.\
get_encryption_metadata({'encryption_key_id': 'fake_key'})
self.mox.ReplayAll()
self.api.get_volume_encryption_metadata(self.ctx,
{'encryption_key_id':
'fake_key'})
| apache-2.0 | -7,361,225,462,233,326,000 | 42.217391 | 79 | 0.586519 | false |
ktsamis/repose | repose/command/install.py | 2 | 1838 | import concurrent.futures
from . import Command
from itertools import chain
import logging
from ..utils import blue
logger = logging.getLogger("repose.command.install")
class Install(Command):
command = True
def _run(self, repoq, target):
repositories = {}
for repa in self.repa:
try:
repositories.update(
repoq.solve_repa(repa, self.targets[target].products.get_base())
)
except ValueError as error:
logger.error(error)
for repo in chain.from_iterable(x for x in (y for y in repositories.values())):
addcmd = self.addcmd.format(
name=repo.name, url=repo.url, params="-cfkn" if repo.refresh else "-ckn"
)
if self.dryrun:
print(blue("{}".format(target)) + " - {}".format(addcmd))
else:
self.targets[target].run(addcmd)
self._report_target(target)
self.targets[target].run(self.refcmd)
if repositories.keys():
inscmd = self.ipdcmd.format(products=" ".join(repositories.keys()))
if self.dryrun:
print(blue(str(target)) + " - {}".format(inscmd))
else:
self.targets[target].run(inscmd)
self._report_target(target)
else:
logger.error("No products to install")
def run(self):
repoq = self._init_repoq()
self.targets.read_products()
self.targets.read_repos()
with concurrent.futures.ThreadPoolExecutor() as executor:
targets = [
executor.submit(self._run, repoq, target)
for target in self.targets.keys()
]
concurrent.futures.wait(targets)
self.targets.close()
| gpl-3.0 | 3,607,189,841,088,285,000 | 31.821429 | 88 | 0.550598 | false |
Kazade/NeHe-Website | google_appengine/lib/protorpc/protorpc/webapp/service_handlers.py | 2 | 28703 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Handlers for remote services.
This module contains classes that may be used to build a service
on top of the App Engine Webapp framework.
The services request handler can be configured to handle requests in a number
of different request formats. All different request formats must have a way
to map the request to the service handlers defined request message.Message
class. The handler can also send a response in any format that can be mapped
from the response message.Message class.
Participants in an RPC:
There are four classes involved with the life cycle of an RPC.
Service factory: A user-defined service factory that is responsible for
instantiating an RPC service. The methods intended for use as RPC
methods must be decorated by the 'remote' decorator.
RPCMapper: Responsible for determining whether or not a specific request
matches a particular RPC format and translating between the actual
request/response and the underlying message types. A single instance of
an RPCMapper sub-class is required per service configuration. Each
mapper must be usable across multiple requests.
ServiceHandler: A webapp.RequestHandler sub-class that responds to the
webapp framework. It mediates between the RPCMapper and service
implementation class during a request. As determined by the Webapp
framework, a new ServiceHandler instance is created to handle each
user request. A handler is never used to handle more than one request.
ServiceHandlerFactory: A class that is responsible for creating new,
properly configured ServiceHandler instance for each request. The
factory is configured by providing it with a set of RPCMapper instances.
When the Webapp framework invokes the service handler, the handler
creates a new service class instance. The service class instance is
provided with a reference to the handler. A single instance of an
RPCMapper sub-class is required to configure each service. Each mapper
instance must be usable across multiple requests.
RPC mappers:
RPC mappers translate between a single HTTP based RPC protocol and the
underlying service implementation. Each RPC mapper must configured
with the following information to determine if it is an appropriate
mapper for a given request:
http_methods: Set of HTTP methods supported by handler.
content_types: Set of supported content types.
default_content_type: Default content type for handler responses.
Built-in mapper implementations:
URLEncodedRPCMapper: Matches requests that are compatible with post
forms with the 'application/x-www-form-urlencoded' content-type
(this content type is the default if none is specified. It
translates post parameters into request parameters.
ProtobufRPCMapper: Matches requests that are compatible with post
forms with the 'application/x-google-protobuf' content-type. It
reads the contents of a binary post request.
Public Exceptions:
Error: Base class for service handler errors.
ServiceConfigurationError: Raised when a service not correctly configured.
RequestError: Raised by RPC mappers when there is an error in its request
or request format.
ResponseError: Raised by RPC mappers when there is an error in its response.
"""
__author__ = '[email protected] (Rafe Kaplan)'
import array
import cgi
import itertools
import logging
import re
import sys
import traceback
import urllib
import weakref
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util as webapp_util
from protorpc import messages
from protorpc import protobuf
from protorpc import protojson
from protorpc import protourlencode
from protorpc import registry
from protorpc import remote
from protorpc import util
from protorpc.webapp import forms
__all__ = [
'Error',
'RequestError',
'ResponseError',
'ServiceConfigurationError',
'DEFAULT_REGISTRY_PATH',
'ProtobufRPCMapper',
'RPCMapper',
'ServiceHandler',
'ServiceHandlerFactory',
'URLEncodedRPCMapper',
'JSONRPCMapper',
'service_mapping',
'run_services',
]
class Error(Exception):
"""Base class for all errors in service handlers module."""
class ServiceConfigurationError(Error):
"""When service configuration is incorrect."""
class RequestError(Error):
"""Error occurred when building request."""
class ResponseError(Error):
"""Error occurred when building response."""
_URLENCODED_CONTENT_TYPE = protourlencode.CONTENT_TYPE
_PROTOBUF_CONTENT_TYPE = protobuf.CONTENT_TYPE
_JSON_CONTENT_TYPE = protojson.CONTENT_TYPE
_EXTRA_JSON_CONTENT_TYPES = ['application/x-javascript',
'text/javascript',
'text/x-javascript',
'text/x-json',
'text/json',
]
# The whole method pattern is an optional regex. It contains a single
# group used for mapping to the query parameter. This is passed to the
# parameters of 'get' and 'post' on the ServiceHandler.
_METHOD_PATTERN = r'(?:\.([^?]*))?'
DEFAULT_REGISTRY_PATH = forms.DEFAULT_REGISTRY_PATH
class RPCMapper(object):
"""Interface to mediate between request and service object.
Request mappers are implemented to support various types of
RPC protocols. It is responsible for identifying whether a
given request matches a particular protocol, resolve the remote
method to invoke and mediate between the request and appropriate
protocol messages for the remote method.
"""
@util.positional(4)
def __init__(self,
http_methods,
default_content_type,
protocol,
content_types=None):
"""Constructor.
Args:
http_methods: Set of HTTP methods supported by mapper.
default_content_type: Default content type supported by mapper.
protocol: The protocol implementation. Must implement encode_message and
decode_message.
content_types: Set of additionally supported content types.
"""
self.__http_methods = frozenset(http_methods)
self.__default_content_type = default_content_type
self.__protocol = protocol
if content_types is None:
content_types = []
self.__content_types = frozenset([self.__default_content_type] +
content_types)
@property
def http_methods(self):
return self.__http_methods
@property
def default_content_type(self):
return self.__default_content_type
@property
def content_types(self):
return self.__content_types
def build_request(self, handler, request_type):
"""Build request message based on request.
Each request mapper implementation is responsible for converting a
request to an appropriate message instance.
Args:
handler: RequestHandler instance that is servicing request.
Must be initialized with request object and been previously determined
to matching the protocol of the RPCMapper.
request_type: Message type to build.
Returns:
Instance of request_type populated by protocol buffer in request body.
Raises:
RequestError if the mapper implementation is not able to correctly
convert the request to the appropriate message.
"""
try:
return self.__protocol.decode_message(request_type, handler.request.body)
except (messages.ValidationError, messages.DecodeError), err:
raise RequestError('Unable to parse request content: %s' % err)
def build_response(self, handler, response):
"""Build response based on service object response message.
Each request mapper implementation is responsible for converting a
response message to an appropriate handler response.
Args:
handler: RequestHandler instance that is servicing request.
Must be initialized with request object and been previously determined
to matching the protocol of the RPCMapper.
response: Response message as returned from the service object.
Raises:
ResponseError if the mapper implementation is not able to correctly
convert the message to an appropriate response.
"""
try:
encoded_message = self.__protocol.encode_message(response)
except messages.ValidationError, err:
raise ResponseError('Unable to encode message: %s' % err)
else:
handler.response.headers['Content-Type'] = self.default_content_type
handler.response.out.write(encoded_message)
class ServiceHandlerFactory(object):
"""Factory class used for instantiating new service handlers.
Normally a handler class is passed directly to the webapp framework
so that it can be simply instantiated to handle a single request.
The service handler, however, must be configured with additional
information so that it knows how to instantiate a service object.
This class acts the same as a normal RequestHandler class by
overriding the __call__ method to correctly configures a ServiceHandler
instance with a new service object.
The factory must also provide a set of RPCMapper instances which
examine a request to determine what protocol is being used and mediates
between the request and the service object.
The mapping of a service handler must have a single group indicating the
part of the URL path that maps to the request method. This group must
exist but can be optional for the request (the group may be followed by
'?' in the regular expression matching the request).
Usage:
stock_factory = ServiceHandlerFactory(StockService)
... configure stock_factory by adding RPCMapper instances ...
application = webapp.WSGIApplication(
[stock_factory.mapping('/stocks')])
Default usage:
application = webapp.WSGIApplication(
[ServiceHandlerFactory.default(StockService).mapping('/stocks')])
"""
def __init__(self, service_factory):
"""Constructor.
Args:
service_factory: Service factory to instantiate and provide to
service handler.
"""
self.__service_factory = service_factory
self.__request_mappers = []
def all_request_mappers(self):
"""Get all request mappers.
Returns:
Iterator of all request mappers used by this service factory.
"""
return iter(self.__request_mappers)
def add_request_mapper(self, mapper):
"""Add request mapper to end of request mapper list."""
self.__request_mappers.append(mapper)
def __call__(self):
"""Construct a new service handler instance."""
return ServiceHandler(self, self.__service_factory())
@property
def service_factory(self):
"""Service factory associated with this factory."""
return self.__service_factory
@staticmethod
def __check_path(path):
"""Check a path parameter.
Make sure a provided path parameter is compatible with the
webapp URL mapping.
Args:
path: Path to check. This is a plain path, not a regular expression.
Raises:
ValueError if path does not start with /, path ends with /.
"""
if path.endswith('/'):
raise ValueError('Path %s must not end with /.' % path)
def mapping(self, path):
"""Convenience method to map service to application.
Args:
path: Path to map service to. It must be a simple path
with a leading / and no trailing /.
Returns:
Mapping from service URL to service handler factory.
"""
self.__check_path(path)
service_url_pattern = r'(%s)%s' % (path, _METHOD_PATTERN)
return service_url_pattern, self
@classmethod
def default(cls, service_factory, parameter_prefix=''):
"""Convenience method to map default factory configuration to application.
Creates a standardized default service factory configuration that pre-maps
the URL encoded protocol handler to the factory.
Args:
service_factory: Service factory to instantiate and provide to
service handler.
method_parameter: The name of the form parameter used to determine the
method to invoke used by the URLEncodedRPCMapper. If None, no
parameter is used and the mapper will only match against the form
path-name. Defaults to 'method'.
parameter_prefix: If provided, all the parameters in the form are
expected to begin with that prefix by the URLEncodedRPCMapper.
Returns:
Mapping from service URL to service handler factory.
"""
factory = cls(service_factory)
factory.add_request_mapper(ProtobufRPCMapper())
factory.add_request_mapper(JSONRPCMapper())
return factory
class ServiceHandler(webapp.RequestHandler):
"""Web handler for RPC service.
Overridden methods:
get: All requests handled by 'handle' method. HTTP method stored in
attribute. Takes remote_method parameter as derived from the URL mapping.
post: All requests handled by 'handle' method. HTTP method stored in
attribute. Takes remote_method parameter as derived from the URL mapping.
redirect: Not implemented for this service handler.
New methods:
handle: Handle request for both GET and POST.
Attributes (in addition to attributes in RequestHandler):
service: Service instance associated with request being handled.
method: Method of request. Used by RPCMapper to determine match.
remote_method: Sub-path as provided to the 'get' and 'post' methods.
"""
def __init__(self, factory, service):
"""Constructor.
Args:
factory: Instance of ServiceFactory used for constructing new service
instances used for handling requests.
service: Service instance used for handling RPC.
"""
self.__factory = factory
self.__service = service
@property
def service(self):
return self.__service
def __show_info(self, service_path, remote_method):
self.response.headers['content-type'] = 'text/plain; charset=utf-8'
if remote_method:
self.response.out.write('%s.%s is a ProtoRPC method.\n\n' %(
service_path, remote_method))
else:
self.response.out.write('%s is a ProtoRPC service.\n\n' % service_path)
definition_name_function = getattr(self.__service, 'definition_name', None)
if definition_name_function:
definition_name = definition_name_function()
else:
definition_name = '%s.%s' % (self.__service.__module__,
self.__service.__class__.__name__)
self.response.out.write('Service %s\n\n' % definition_name)
self.response.out.write('More about ProtoRPC: '
'http://code.google.com/p/google-protorpc\n')
def get(self, service_path, remote_method):
"""Handler method for GET requests.
Args:
service_path: Service path derived from request URL.
remote_method: Sub-path after service path has been matched.
"""
if remote_method:
self.handle('GET', service_path, remote_method)
else:
self.response.headers['x-content-type-options'] = 'nosniff'
self.error(405)
if self.response.status in (405, 415) or not self.__get_content_type():
self.__show_info(service_path, remote_method)
def post(self, service_path, remote_method):
"""Handler method for POST requests.
Args:
service_path: Service path derived from request URL.
remote_method: Sub-path after service path has been matched.
"""
self.handle('POST', service_path, remote_method)
def redirect(self, uri, permanent=False):
"""Not supported for services."""
raise NotImplementedError('Services do not currently support redirection.')
def __send_error(self,
http_code,
status_state,
error_message,
mapper,
error_name=None):
status = remote.RpcStatus(state=status_state,
error_message=error_message,
error_name=error_name)
encoded_status = mapper.build_response(self, status)
self.response.headers['content-type'] = mapper.default_content_type
logging.error(error_message)
self.response.set_status(http_code, error_message)
def __send_simple_error(self, code, message):
"""Send error to caller without embedded message."""
self.response.headers['content-type'] = 'text/plain; charset=utf-8'
logging.error(message)
self.response.set_status(code, message)
def __get_content_type(self):
content_type = self.request.headers.get('content-type', None)
if not content_type:
content_type = self.request.environ.get('HTTP_CONTENT_TYPE', None)
if not content_type:
return None
# Lop off parameters from the end (for example content-encoding)
return content_type.split(';', 1)[0].lower()
def __headers(self, content_type):
for name in self.request.headers:
name = name.lower()
if name == 'content-type':
value = content_type
elif name == 'content-length':
value = str(len(self.request.body))
else:
value = self.request.headers.get(name, '')
yield name, value
def handle(self, http_method, service_path, remote_method):
"""Handle a service request.
The handle method will handle either a GET or POST response.
It is up to the individual mappers from the handler factory to determine
which request methods they can service.
If the protocol is not recognized, the request does not provide a correct
request for that protocol or the service object does not support the
requested RPC method, will return error code 400 in the response.
Args:
http_method: HTTP method of request.
service_path: Service path derived from request URL.
remote_method: Sub-path after service path has been matched.
"""
self.response.headers['x-content-type-options'] = 'nosniff'
content_type = self.__get_content_type()
# Provide server state to the service. If the service object does not have
# an "initialize_request_state" method, will not attempt to assign state.
try:
state_initializer = self.service.initialize_request_state
except AttributeError:
pass
else:
server_port = self.request.environ.get('SERVER_PORT', None)
if server_port:
server_port = int(server_port)
request_state = remote.HttpRequestState(
remote_host=self.request.environ.get('REMOTE_HOST', None),
remote_address=self.request.environ.get('REMOTE_ADDR', None),
server_host=self.request.environ.get('SERVER_HOST', None),
server_port=server_port,
http_method=http_method,
service_path=service_path,
headers=list(self.__headers(content_type)))
state_initializer(request_state)
if not content_type:
self.__send_simple_error(400, 'Invalid RPC request: missing content-type')
return
# Search for mapper to mediate request.
for mapper in self.__factory.all_request_mappers():
if content_type in mapper.content_types:
break
else:
self.__send_simple_error(415,
'Unsupported content-type: %s' % content_type)
return
try:
if http_method not in mapper.http_methods:
self.__send_simple_error(405,
'Unsupported HTTP method: %s' % http_method)
return
try:
try:
method = getattr(self.service, remote_method)
method_info = method.remote
except AttributeError, err:
self.__send_error(
400, remote.RpcState.METHOD_NOT_FOUND_ERROR,
'Unrecognized RPC method: %s' % remote_method,
mapper)
return
request = mapper.build_request(self, method_info.request_type)
except (RequestError, messages.DecodeError), err:
self.__send_error(400,
remote.RpcState.REQUEST_ERROR,
'Error parsing ProtoRPC request (%s)' % err,
mapper)
return
try:
response = method(request)
except remote.ApplicationError, err:
self.__send_error(400,
remote.RpcState.APPLICATION_ERROR,
err.message,
mapper,
err.error_name)
return
mapper.build_response(self, response)
except Exception, err:
logging.error('An unexpected error occured when handling RPC: %s',
err, exc_info=1)
self.__send_error(500,
remote.RpcState.SERVER_ERROR,
'Internal Server Error',
mapper)
return
# TODO(rafek): Support tag-id only forms.
class URLEncodedRPCMapper(RPCMapper):
"""Request mapper for application/x-www-form-urlencoded forms.
This mapper is useful for building forms that can invoke RPC. Many services
are also configured to work using URL encoded request information because
of its perceived ease of programming and debugging.
The mapper must be provided with at least method_parameter or
remote_method_pattern so that it is possible to determine how to determine the
requests RPC method. If both are provided, the service will respond to both
method request types, however, only one may be present in a given request.
If both types are detected, the request will not match.
"""
def __init__(self, parameter_prefix=''):
"""Constructor.
Args:
parameter_prefix: If provided, all the parameters in the form are
expected to begin with that prefix.
"""
# Private attributes:
# __parameter_prefix: parameter prefix as provided by constructor
# parameter.
super(URLEncodedRPCMapper, self).__init__(['POST'],
_URLENCODED_CONTENT_TYPE,
self)
self.__parameter_prefix = parameter_prefix
def encode_message(self, message):
"""Encode a message using parameter prefix.
Args:
message: Message to URL Encode.
Returns:
URL encoded message.
"""
return protourlencode.encode_message(message,
prefix=self.__parameter_prefix)
@property
def parameter_prefix(self):
"""Prefix all form parameters are expected to begin with."""
return self.__parameter_prefix
def build_request(self, handler, request_type):
"""Build request from URL encoded HTTP request.
Constructs message from names of URL encoded parameters. If this service
handler has a parameter prefix, parameters must begin with it or are
ignored.
Args:
handler: RequestHandler instance that is servicing request.
request_type: Message type to build.
Returns:
Instance of request_type populated by protocol buffer in request
parameters.
Raises:
RequestError if message type contains nested message field or repeated
message field. Will raise RequestError if there are any repeated
parameters.
"""
request = request_type()
builder = protourlencode.URLEncodedRequestBuilder(
request, prefix=self.__parameter_prefix)
for argument in sorted(handler.request.arguments()):
values = handler.request.get_all(argument)
try:
builder.add_parameter(argument, values)
except messages.DecodeError, err:
raise RequestError(str(err))
return request
class ProtobufRPCMapper(RPCMapper):
"""Request mapper for application/x-protobuf service requests.
This mapper will parse protocol buffer from a POST body and return the request
as a protocol buffer.
"""
def __init__(self):
super(ProtobufRPCMapper, self).__init__(['POST'],
_PROTOBUF_CONTENT_TYPE,
protobuf)
class JSONRPCMapper(RPCMapper):
"""Request mapper for application/x-protobuf service requests.
This mapper will parse protocol buffer from a POST body and return the request
as a protocol buffer.
"""
def __init__(self):
super(JSONRPCMapper, self).__init__(
['POST'],
_JSON_CONTENT_TYPE,
protojson,
content_types=_EXTRA_JSON_CONTENT_TYPES)
def service_mapping(services,
registry_path=DEFAULT_REGISTRY_PATH):
"""Create a services mapping for use with webapp.
Creates basic default configuration and registration for ProtoRPC services.
Each service listed in the service mapping has a standard service handler
factory created for it.
The list of mappings can either be an explicit path to service mapping or
just services. If mappings are just services, they will automatically
be mapped to their default name. For exampel:
package = 'my_package'
class MyService(remote.Service):
...
server_mapping([('/my_path', MyService), # Maps to /my_path
MyService, # Maps to /my_package/MyService
])
Specifying a service mapping:
Normally services are mapped to URL paths by specifying a tuple
(path, service):
path: The path the service resides on.
service: The service class or service factory for creating new instances
of the service. For more information about service factories, please
see remote.Service.new_factory.
If no tuple is provided, and therefore no path specified, a default path
is calculated by using the fully qualified service name using a URL path
separator for each of its components instead of a '.'.
Args:
services: Can be service type, service factory or string definition name of
service being mapped or list of tuples (path, service):
path: Path on server to map service to.
service: Service type, service factory or string definition name of
service being mapped.
Can also be a dict. If so, the keys are treated as the path and values as
the service.
registry_path: Path to give to registry service. Use None to disable
registry service.
Returns:
List of tuples defining a mapping of request handlers compatible with a
webapp application.
Raises:
ServiceConfigurationError when duplicate paths are provided.
"""
if isinstance(services, dict):
services = services.iteritems()
mapping = []
registry_map = {}
if registry_path is not None:
registry_service = registry.RegistryService.new_factory(registry_map)
services = list(services) + [(registry_path, registry_service)]
mapping.append((registry_path + r'/form(?:/)?',
forms.FormsHandler.new_factory(registry_path)))
mapping.append((registry_path + r'/form/(.+)', forms.ResourceHandler))
paths = set()
for service_item in services:
infer_path = not isinstance(service_item, (list, tuple))
if infer_path:
service = service_item
else:
service = service_item[1]
service_class = getattr(service, 'service_class', service)
if infer_path:
path = '/' + service_class.definition_name().replace('.', '/')
else:
path = service_item[0]
if path in paths:
raise ServiceConfigurationError(
'Path %r is already defined in service mapping' % path.encode('utf-8'))
else:
paths.add(path)
# Create service mapping for webapp.
new_mapping = ServiceHandlerFactory.default(service).mapping(path)
mapping.append(new_mapping)
# Update registry with service class.
registry_map[path] = service_class
return mapping
def run_services(services,
registry_path=DEFAULT_REGISTRY_PATH):
"""Handle CGI request using service mapping.
Args:
Same as service_mapping.
"""
mappings = service_mapping(services, registry_path=registry_path)
application = webapp.WSGIApplication(mappings)
webapp_util.run_wsgi_app(application)
| bsd-3-clause | 5,336,288,615,576,117,000 | 33.876063 | 80 | 0.682716 | false |
dvliman/jaikuengine | .google_appengine/lib/django-1.4/tests/regressiontests/localflavor/se/tests.py | 33 | 6441 | # -*- coding: utf-8 -*-
from django.contrib.localflavor.se.forms import (SECountySelect,
SEOrganisationNumberField, SEPersonalIdentityNumberField,
SEPostalCodeField)
import datetime
from django.test import SimpleTestCase
class SELocalFlavorTests(SimpleTestCase):
def setUp(self):
# Mocking datetime.date to make sure
# localflavor.se.utils.validate_id_birthday works
class MockDate(datetime.date):
def today(cls):
return datetime.date(2008, 5, 14)
today = classmethod(today)
self._olddate = datetime.date
datetime.date = MockDate
def tearDown(self):
datetime.date = self._olddate
def test_SECountySelect(self):
f = SECountySelect()
out = u'''<select name="swedish_county">
<option value="AB">Stockholm</option>
<option value="AC">V\xe4sterbotten</option>
<option value="BD">Norrbotten</option>
<option value="C">Uppsala</option>
<option value="D">S\xf6dermanland</option>
<option value="E" selected="selected">\xd6sterg\xf6tland</option>
<option value="F">J\xf6nk\xf6ping</option>
<option value="G">Kronoberg</option>
<option value="H">Kalmar</option>
<option value="I">Gotland</option>
<option value="K">Blekinge</option>
<option value="M">Sk\xe5ne</option>
<option value="N">Halland</option>
<option value="O">V\xe4stra G\xf6taland</option>
<option value="S">V\xe4rmland</option>
<option value="T">\xd6rebro</option>
<option value="U">V\xe4stmanland</option>
<option value="W">Dalarna</option>
<option value="X">G\xe4vleborg</option>
<option value="Y">V\xe4sternorrland</option>
<option value="Z">J\xe4mtland</option>
</select>'''
self.assertHTMLEqual(f.render('swedish_county', 'E'), out)
def test_SEOrganizationNumberField(self):
error_invalid = [u'Enter a valid Swedish organisation number.']
valid = {
'870512-1989': '198705121989',
'19870512-1989': '198705121989',
'870512-2128': '198705122128',
'081015-6315': '190810156315',
'081015+6315': '180810156315',
'0810156315': '190810156315',
# Test some different organisation numbers
# IKEA Linköping
'556074-7569': '5560747569',
# Volvo Personvagnar
'556074-3089': '5560743089',
# LJS (organisation)
'822001-5476': '8220015476',
# LJS (organisation)
'8220015476': '8220015476',
# Katedralskolan Linköping (school)
'2120000449': '2120000449',
# Faux organisation number, which tests that the checksum can be 0
'232518-5060': '2325185060',
}
invalid = {
# Ordinary personal identity numbers for sole proprietors
# The same rules as for SEPersonalIdentityField applies here
'081015 6315': error_invalid,
'950231-4496': error_invalid,
'6914104499': error_invalid,
'950d314496': error_invalid,
'invalid!!!': error_invalid,
'870514-1111': error_invalid,
# Co-ordination number checking
# Co-ordination numbers are not valid organisation numbers
'870574-1315': error_invalid,
'870573-1311': error_invalid,
# Volvo Personvagnar, bad format
'556074+3089': error_invalid,
# Invalid checksum
'2120000441': error_invalid,
# Valid checksum but invalid organisation type
'1120000441': error_invalid,
}
self.assertFieldOutput(SEOrganisationNumberField, valid, invalid)
def test_SEPersonalIdentityNumberField(self):
error_invalid = [u'Enter a valid Swedish personal identity number.']
error_coord = [u'Co-ordination numbers are not allowed.']
valid = {
'870512-1989': '198705121989',
'870512-2128': '198705122128',
'19870512-1989': '198705121989',
'198705121989': '198705121989',
'081015-6315': '190810156315',
'0810156315': '190810156315',
# This is a "special-case" in the checksum calculation,
# where the sum is divisible by 10 (the checksum digit == 0)
'8705141060': '198705141060',
# + means that the person is older than 100 years
'081015+6315': '180810156315',
# Co-ordination number checking
'870574-1315': '198705741315',
'870574+1315': '188705741315',
'198705741315': '198705741315',
}
invalid = {
'081015 6315': error_invalid,
'950d314496': error_invalid,
'invalid!!!': error_invalid,
# Invalid dates
# February 31st does not exist
'950231-4496': error_invalid,
# Month 14 does not exist
'6914104499': error_invalid,
# There are no Swedish personal id numbers where year < 1800
'17430309-7135': error_invalid,
# Invalid checksum
'870514-1111': error_invalid,
# Co-ordination number with bad checksum
'870573-1311': error_invalid,
}
self.assertFieldOutput(SEPersonalIdentityNumberField, valid, invalid)
valid = {}
invalid = {
# Check valid co-ordination numbers that should not be accepted
# because of coordination_number=False
'870574-1315': error_coord,
'870574+1315': error_coord,
'8705741315': error_coord,
# Invalid co-ordination numbers should be treated as invalid, and not
# as co-ordination numbers
'870573-1311': error_invalid,
}
kwargs = {'coordination_number': False,}
self.assertFieldOutput(SEPersonalIdentityNumberField, valid, invalid,
field_kwargs=kwargs)
def test_SEPostalCodeField(self):
error_format = [u'Enter a Swedish postal code in the format XXXXX.']
valid = {
'589 37': '58937',
'58937': '58937',
}
invalid = {
'abcasfassadf': error_format,
# Only one space is allowed for separation
'589 37': error_format,
# The postal code must not start with 0
'01234': error_format,
}
self.assertFieldOutput(SEPostalCodeField, valid, invalid)
| apache-2.0 | -89,901,335,741,456,060 | 38.262195 | 81 | 0.59854 | false |
scripni/rethinkdb | test/rql_test/connections/http_support/flask/debughelpers.py | 777 | 3508 | # -*- coding: utf-8 -*-
"""
flask.debughelpers
~~~~~~~~~~~~~~~~~~
Various helpers to make the development experience better.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from ._compat import implements_to_string
class UnexpectedUnicodeError(AssertionError, UnicodeError):
"""Raised in places where we want some better error reporting for
unexpected unicode or binary data.
"""
@implements_to_string
class DebugFilesKeyError(KeyError, AssertionError):
"""Raised from request.files during debugging. The idea is that it can
provide a better error message than just a generic KeyError/BadRequest.
"""
def __init__(self, request, key):
form_matches = request.form.getlist(key)
buf = ['You tried to access the file "%s" in the request.files '
'dictionary but it does not exist. The mimetype for the request '
'is "%s" instead of "multipart/form-data" which means that no '
'file contents were transmitted. To fix this error you should '
'provide enctype="multipart/form-data" in your form.' %
(key, request.mimetype)]
if form_matches:
buf.append('\n\nThe browser instead transmitted some file names. '
'This was submitted: %s' % ', '.join('"%s"' % x
for x in form_matches))
self.msg = ''.join(buf)
def __str__(self):
return self.msg
class FormDataRoutingRedirect(AssertionError):
"""This exception is raised by Flask in debug mode if it detects a
redirect caused by the routing system when the request method is not
GET, HEAD or OPTIONS. Reasoning: form data will be dropped.
"""
def __init__(self, request):
exc = request.routing_exception
buf = ['A request was sent to this URL (%s) but a redirect was '
'issued automatically by the routing system to "%s".'
% (request.url, exc.new_url)]
# In case just a slash was appended we can be extra helpful
if request.base_url + '/' == exc.new_url.split('?')[0]:
buf.append(' The URL was defined with a trailing slash so '
'Flask will automatically redirect to the URL '
'with the trailing slash if it was accessed '
'without one.')
buf.append(' Make sure to directly send your %s-request to this URL '
'since we can\'t make browsers or HTTP clients redirect '
'with form data reliably or without user interaction.' %
request.method)
buf.append('\n\nNote: this exception is only raised in debug mode')
AssertionError.__init__(self, ''.join(buf).encode('utf-8'))
def attach_enctype_error_multidict(request):
"""Since Flask 0.8 we're monkeypatching the files object in case a
request is detected that does not use multipart form data but the files
object is accessed.
"""
oldcls = request.files.__class__
class newcls(oldcls):
def __getitem__(self, key):
try:
return oldcls.__getitem__(self, key)
except KeyError as e:
if key not in request.form:
raise
raise DebugFilesKeyError(request, key)
newcls.__name__ = oldcls.__name__
newcls.__module__ = oldcls.__module__
request.files.__class__ = newcls
| agpl-3.0 | 5,927,279,497,085,097,000 | 39.321839 | 81 | 0.602052 | false |
odicraig/kodi2odi | addons/plugin.program.super.favourites/selector.py | 2 | 2421 | #
# Copyright (C) 2014-2015
# Sean Poyser ([email protected])
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import inspect
FILENAME = inspect.getfile(inspect.currentframe())
def _select(index):
import utils
#utils.DialogOK(str(index))
if index < 0:
return
import xbmc
import utils
view = 0
count = 10
while view < 1 and count > 0:
count -= 1
view = utils.getViewType()
xbmc.sleep(50)
if view < 1:
return
import xbmcgui
win = None
count = 10
while not win and count > 0:
count -= 1
try: win = xbmcgui.Window(utils.getCurrentWindowId())
except: xbmc.sleep(50)
if not win:
return
list = None
count = 10
while not list and count > 0:
try: list = win.getControl(view)
except: xbmc.sleep(50)
if not list:
return
xbmc.sleep(50)
try:
nItem = int(xbmcgui.Window(10000).getProperty('SF_NMR_ITEMS'))
if index >= nItem:
index = nItem-1
except:
pass
list.selectItem(index)
def select(index):
import utils
import xbmc
import os
HOME = utils.HOME
name = 'select'
script = FILENAME
args = '%d' % index
cmd = 'AlarmClock(%s,RunScript(%s,%s),%d,True)' % (name, script, args, 0)
xbmc.executebuiltin('CancelAlarm(%s,True)' % name)
xbmc.executebuiltin(cmd)
utils.log(cmd, True)
if __name__ == '__main__':
if FILENAME.endswith(sys.argv[0]):
try: _select(int(sys.argv[1]))
except: pass | gpl-3.0 | 759,642,096,171,010,400 | 22.72449 | 83 | 0.585295 | false |
kumajaya/android_kernel_samsung_universal5422 | tools/perf/tests/attr.py | 3174 | 9441 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-2.0 | -4,005,801,298,288,467,000 | 27.436747 | 79 | 0.51234 | false |
jelugbo/hebs_master | common/djangoapps/student/tests/test_create_account.py | 15 | 5962 | "Tests for account creation"
import ddt
import unittest
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser
from django.utils.importlib import import_module
from django.test import TestCase, TransactionTestCase
import mock
from user_api.models import UserPreference
from lang_pref import LANGUAGE_KEY
from edxmako.tests import mako_middleware_process_request
from external_auth.models import ExternalAuthMap
import student
TEST_CS_URL = 'https://comments.service.test:123/'
@ddt.ddt
class TestCreateAccount(TestCase):
"Tests for account creation"
def setUp(self):
self.username = "test_user"
self.url = reverse("create_account")
self.request_factory = RequestFactory()
self.params = {
"username": self.username,
"email": "[email protected]",
"password": "testpass",
"name": "Test User",
"honor_code": "true",
"terms_of_service": "true",
}
@ddt.data("en", "eo")
def test_default_lang_pref_saved(self, lang):
with mock.patch("django.conf.settings.LANGUAGE_CODE", lang):
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=self.username)
self.assertEqual(UserPreference.get_preference(user, LANGUAGE_KEY), lang)
@ddt.data("en", "eo")
def test_header_lang_pref_saved(self, lang):
response = self.client.post(self.url, self.params, HTTP_ACCEPT_LANGUAGE=lang)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username=self.username)
self.assertEqual(UserPreference.get_preference(user, LANGUAGE_KEY), lang)
def base_extauth_bypass_sending_activation_email(self, bypass_activation_email_for_extauth_setting):
"""
Tests user creation without sending activation email when
doing external auth
"""
request = self.request_factory.post(self.url, self.params)
# now indicate we are doing ext_auth by setting 'ExternalAuthMap' in the session.
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
extauth = ExternalAuthMap(external_id='[email protected]',
external_email='[email protected]',
internal_password=self.params['password'],
external_domain='shib:https://idp.stanford.edu/')
request.session['ExternalAuthMap'] = extauth
request.user = AnonymousUser()
mako_middleware_process_request(request)
with mock.patch('django.contrib.auth.models.User.email_user') as mock_send_mail:
student.views.create_account(request)
# check that send_mail is called
if bypass_activation_email_for_extauth_setting:
self.assertFalse(mock_send_mail.called)
else:
self.assertTrue(mock_send_mail.called)
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@mock.patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': True, 'AUTOMATIC_AUTH_FOR_TESTING': False})
def test_extauth_bypass_sending_activation_email_with_bypass(self):
"""
Tests user creation without sending activation email when
settings.FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH']=True and doing external auth
"""
self.base_extauth_bypass_sending_activation_email(True)
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
@mock.patch.dict(settings.FEATURES, {'BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH': False, 'AUTOMATIC_AUTH_FOR_TESTING': False})
def test_extauth_bypass_sending_activation_email_without_bypass(self):
"""
Tests user creation without sending activation email when
settings.FEATURES['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH']=False and doing external auth
"""
self.base_extauth_bypass_sending_activation_email(False)
@mock.patch.dict("student.models.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@mock.patch("lms.lib.comment_client.User.base_url", TEST_CS_URL)
@mock.patch("lms.lib.comment_client.utils.requests.request", return_value=mock.Mock(status_code=200, text='{}'))
class TestCreateCommentsServiceUser(TransactionTestCase):
def setUp(self):
self.username = "test_user"
self.url = reverse("create_account")
self.params = {
"username": self.username,
"email": "[email protected]",
"password": "testpass",
"name": "Test User",
"honor_code": "true",
"terms_of_service": "true",
}
def test_cs_user_created(self, request):
"If user account creation succeeds, we should create a comments service user"
response = self.client.post(self.url, self.params)
self.assertEqual(response.status_code, 200)
self.assertTrue(request.called)
args, kwargs = request.call_args
self.assertEqual(args[0], 'put')
self.assertTrue(args[1].startswith(TEST_CS_URL))
self.assertEqual(kwargs['data']['username'], self.params['username'])
@mock.patch("student.models.Registration.register", side_effect=Exception)
def test_cs_user_not_created(self, register, request):
"If user account creation fails, we should not create a comments service user"
try:
response = self.client.post(self.url, self.params)
except:
pass
with self.assertRaises(User.DoesNotExist):
User.objects.get(username=self.username)
self.assertTrue(register.called)
self.assertFalse(request.called)
| agpl-3.0 | 1,326,592,727,946,035,200 | 41.892086 | 124 | 0.668064 | false |
Iconoclasteinc/tgit | test/test_announcer.py | 1 | 1124 | import pytest
from flexmock import flexmock
from tgit.announcer import Announcer
pytestmark = pytest.mark.unit
class Listener(object):
def event_occurred(self, event):
pass
@pytest.fixture
def announcer():
return Announcer()
@pytest.fixture
def event():
return "event"
def test_announces_to_all_subscribed_listeners(announcer, event):
_listeners_are_subscribed(announcer, event)
announcer.event_occurred(event)
def test_stops_announcing_to_unregistered_listeners(announcer, event):
should_not_notified = flexmock(Listener())
announcer.addListener(should_not_notified)
_listeners_are_subscribed(announcer, event)
announcer.removeListener(should_not_notified)
should_not_notified.should_receive("event_occurred").never()
announcer.event_occurred(event)
def _listeners_are_subscribed(announcer, event):
for i in range(5):
_subscribe_listener(announcer, event)
def _subscribe_listener(announcer, event):
listener = flexmock(Listener())
listener.should_receive("event_occurred").with_args(event).once()
announcer.addListener(listener)
| gpl-3.0 | -6,832,353,462,879,023,000 | 22.416667 | 70 | 0.738434 | false |
kamyu104/django | tests/auth_tests/test_decorators.py | 279 | 4124 | from django.conf import settings
from django.contrib.auth import models
from django.contrib.auth.decorators import login_required, permission_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from .test_views import AuthViewsTestCase
@override_settings(ROOT_URLCONF='auth_tests.urls')
class LoginRequiredTestCase(AuthViewsTestCase):
"""
Tests the login_required decorators
"""
def testCallable(self):
"""
Check that login_required is assignable to callable objects.
"""
class CallableView(object):
def __call__(self, *args, **kwargs):
pass
login_required(CallableView())
def testView(self):
"""
Check that login_required is assignable to normal views.
"""
def normal_view(request):
pass
login_required(normal_view)
def testLoginRequired(self, view_url='/login_required/', login_url=None):
"""
Check that login_required works on a simple view wrapped in a
login_required decorator.
"""
if login_url is None:
login_url = settings.LOGIN_URL
response = self.client.get(view_url)
self.assertEqual(response.status_code, 302)
self.assertIn(login_url, response.url)
self.login()
response = self.client.get(view_url)
self.assertEqual(response.status_code, 200)
def testLoginRequiredNextUrl(self):
"""
Check that login_required works on a simple view wrapped in a
login_required decorator with a login_url set.
"""
self.testLoginRequired(view_url='/login_required_login_url/',
login_url='/somewhere/')
class PermissionsRequiredDecoratorTest(TestCase):
"""
Tests for the permission_required decorator
"""
def setUp(self):
self.user = models.User.objects.create(username='joe', password='qwerty')
self.factory = RequestFactory()
# Add permissions auth.add_customuser and auth.change_customuser
perms = models.Permission.objects.filter(codename__in=('add_customuser', 'change_customuser'))
self.user.user_permissions.add(*perms)
def test_many_permissions_pass(self):
@permission_required(['auth.add_customuser', 'auth.change_customuser'])
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 200)
def test_many_permissions_in_set_pass(self):
@permission_required({'auth.add_customuser', 'auth.change_customuser'})
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 200)
def test_single_permission_pass(self):
@permission_required('auth.add_customuser')
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 200)
def test_permissioned_denied_redirect(self):
@permission_required(['auth.add_customuser', 'auth.change_customuser', 'non-existent-permission'])
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 302)
def test_permissioned_denied_exception_raised(self):
@permission_required([
'auth.add_customuser', 'auth.change_customuser', 'non-existent-permission'
], raise_exception=True)
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
self.assertRaises(PermissionDenied, a_view, request)
| bsd-3-clause | 7,135,900,376,741,808,000 | 33.949153 | 106 | 0.645247 | false |
renyi533/tensorflow | tensorflow/python/data/util/options.py | 22 | 4914 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for tf.data options."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def _internal_attr_name(name):
return "_" + name
class OptionsBase(object):
"""Base class for representing a set of tf.data options.
Attributes:
_options: Stores the option values.
"""
def __init__(self):
# NOTE: Cannot use `self._options` here as we override `__setattr__`
object.__setattr__(self, "_options", {})
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
for name in set(self._options) | set(other._options): # pylint: disable=protected-access
if getattr(self, name) != getattr(other, name):
return False
return True
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
else:
return NotImplemented
def __setattr__(self, name, value):
if hasattr(self, name):
object.__setattr__(self, name, value)
else:
raise AttributeError(
"Cannot set the property %s on %s." % (name, type(self).__name__))
def create_option(name, ty, docstring, default_factory=lambda: None):
"""Creates a type-checked property.
Args:
name: The name to use.
ty: The type to use. The type of the property will be validated when it
is set.
docstring: The docstring to use.
default_factory: A callable that takes no arguments and returns a default
value to use if not set.
Returns:
A type-checked property.
"""
def get_fn(option):
# pylint: disable=protected-access
if name not in option._options:
option._options[name] = default_factory()
return option._options.get(name)
def set_fn(option, value):
if not isinstance(value, ty):
raise TypeError("Property \"%s\" must be of type %s, got: %r (type: %r)" %
(name, ty, value, type(value)))
option._options[name] = value # pylint: disable=protected-access
return property(get_fn, set_fn, None, docstring)
def merge_options(*options_list):
"""Merges the given options, returning the result as a new options object.
The input arguments are expected to have a matching type that derives from
`OptionsBase` (and thus each represent a set of options). The method outputs
an object of the same type created by merging the sets of options represented
by the input arguments.
The sets of options can be merged as long as there does not exist an option
with different non-default values.
If an option is an instance of `OptionsBase` itself, then this method is
applied recursively to the set of options represented by this option.
Args:
*options_list: options to merge
Raises:
TypeError: if the input arguments are incompatible or not derived from
`OptionsBase`
ValueError: if the given options cannot be merged
Returns:
A new options object which is the result of merging the given options.
"""
if len(options_list) < 1:
raise ValueError("At least one options should be provided")
result_type = type(options_list[0])
for options in options_list:
if not isinstance(options, result_type):
raise TypeError("Incompatible options type: %r vs %r" % (type(options),
result_type))
if not isinstance(options_list[0], OptionsBase):
raise TypeError("The inputs should inherit from `OptionsBase`")
default_options = result_type()
result = result_type()
for options in options_list:
# Iterate over all set options and merge the into the result.
for name in options._options: # pylint: disable=protected-access
this = getattr(result, name)
that = getattr(options, name)
default = getattr(default_options, name)
if that == default:
continue
elif this == default:
setattr(result, name, that)
elif isinstance(this, OptionsBase):
setattr(result, name, merge_options(this, that))
elif this != that:
raise ValueError(
"Cannot merge incompatible values (%r and %r) of option: %s" %
(this, that, name))
return result
| apache-2.0 | -3,389,251,721,259,265,000 | 33.125 | 93 | 0.661579 | false |
ksmaheshkumar/grr | parsers/ie_history.py | 8 | 5599 | #!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
"""Parser for IE index.dat files.
Note that this is a very naive and incomplete implementation and should be
replaced with a more intelligent one. Do not implement anything based on this
code, it is a placeholder for something real.
For anyone who wants a useful reference, see this:
http://heanet.dl.sourceforge.net/project/libmsiecf/Documentation/MSIE%20Cache%20
File%20format/MSIE%20Cache%20File%20%28index.dat%29%20format.pdf
"""
import datetime
import glob
import operator
import os
import struct
import sys
import urlparse
import logging
from grr.lib import parsers
from grr.lib.rdfvalues import webhistory
# Difference between 1 Jan 1601 and 1 Jan 1970.
WIN_UNIX_DIFF_MSECS = 11644473600 * 1e6
class IEHistoryParser(parsers.FileParser):
"""Parse IE index.dat files into BrowserHistoryItem objects."""
output_types = ["BrowserHistoryItem"]
supported_artifacts = ["InternetExplorerHistory"]
def Parse(self, stat, file_object, knowledge_base):
"""Parse the History file."""
_, _ = stat, knowledge_base
# TODO(user): Convert this to use the far more intelligent plaso parser.
ie = IEParser(file_object)
for dat in ie.Parse():
yield webhistory.BrowserHistoryItem(
url=dat["url"], domain=urlparse.urlparse(dat["url"]).netloc,
access_time=dat.get("mtime"),
program_name="Internet Explorer", source_urn=stat.aff4path)
class IEParser(object):
"""Parser object for index.dat files.
The file format for IE index.dat files is somewhat poorly documented.
The following implementation is based on information from:
http://www.forensicswiki.org/wiki/Internet_Explorer_History_File_Format
Returns results in chronological order based on mtime
"""
FILE_HEADER = "Client UrlCache MMF Ver 5.2"
BLOCK_SIZE = 0x80
def __init__(self, input_obj):
"""Initialize.
Args:
input_obj: A file like object to read the index.dat from.
"""
self._file = input_obj
self._entries = []
def Parse(self):
"""Parse the file."""
if not self._file:
logging.error("Couldn't open file")
return
# Limit read size to 5MB.
self.input_dat = self._file.read(1024 * 1024 * 5)
if not self.input_dat.startswith(self.FILE_HEADER):
logging.error("Invalid index.dat file %s", self._file)
return
# Events aren't time ordered in the history file, so we collect them all
# then sort.
events = []
for event in self._DoParse():
events.append(event)
for event in sorted(events, key=operator.itemgetter("mtime")):
yield event
def _GetRecord(self, offset, record_size):
"""Retrieve a single record from the file.
Args:
offset: offset from start of input_dat where header starts
record_size: length of the header according to file (untrusted)
Returns:
A dict containing a single browser history record.
"""
record_header = "<4sLQQL"
get4 = lambda x: struct.unpack("<L", self.input_dat[x:x + 4])[0]
url_offset = struct.unpack("B", self.input_dat[offset + 52:offset + 53])[0]
if url_offset in [0xFF, 0xFE]:
return None
data_offset = get4(offset + 68)
data_size = get4(offset + 72)
start_pos = offset + data_offset
data = struct.unpack("{0}s".format(data_size),
self.input_dat[start_pos:start_pos + data_size])[0]
fmt = record_header
unknown_size = url_offset - struct.calcsize(fmt)
fmt += "{0}s".format(unknown_size)
fmt += "{0}s".format(record_size - struct.calcsize(fmt))
dat = struct.unpack(fmt, self.input_dat[offset:offset + record_size])
header, blocks, mtime, ctime, ftime, _, url = dat
url = url.split(chr(0x00))[0]
if mtime: mtime = mtime/10 - WIN_UNIX_DIFF_MSECS
if ctime: ctime = ctime/10 - WIN_UNIX_DIFF_MSECS
return {"header": header, # the header
"blocks": blocks, # number of blocks
"urloffset": url_offset, # offset of URL in file
"data_offset": data_offset, # offset for start of data
"data_size": data_size, # size of data
"data": data, # actual data
"mtime": mtime, # modified time
"ctime": ctime, # created time
"ftime": ftime, # file time
"url": url # the url visited
}
def _DoParse(self):
"""Parse a file for history records yielding dicts.
Yields:
Dicts containing browser history
"""
get4 = lambda x: struct.unpack("<L", self.input_dat[x:x + 4])[0]
filesize = get4(0x1c)
offset = get4(0x20)
coffset = offset
while coffset < filesize:
etype = struct.unpack("4s", self.input_dat[coffset:coffset + 4])[0]
if etype == "REDR":
pass
elif etype in ["URL "]:
# Found a valid record
reclen = get4(coffset + 4) * self.BLOCK_SIZE
yield self._GetRecord(coffset, reclen)
coffset += self.BLOCK_SIZE
def main(argv):
if len(argv) < 2:
print "Usage: {0} index.dat".format(os.path.basename(argv[0]))
else:
files_to_process = []
for input_glob in argv[1:]:
files_to_process += glob.glob(input_glob)
for input_file in files_to_process:
ie = IEParser(open(input_file))
for dat in ie.Parse():
dat["ctime"] = datetime.datetime.utcfromtimestamp(dat["ctime"] / 1e6)
print "{ctime} {header} {url}".format(**dat)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 | -2,989,703,434,446,507,500 | 31.364162 | 80 | 0.636721 | false |
KNMI/VERCE | verce-hpc-pe/src/networkx/generators/tests/test_degree_seq.py | 61 | 5734 | #!/usr/bin/env python
from nose.tools import *
import networkx
from networkx import *
from networkx.generators.degree_seq import *
from networkx.utils import uniform_sequence,powerlaw_sequence
def test_configuration_model_empty():
# empty graph has empty degree sequence
deg_seq=[]
G=configuration_model(deg_seq)
assert_equal(G.degree(), {})
def test_configuration_model():
deg_seq=[5,3,3,3,3,2,2,2,1,1,1]
G=configuration_model(deg_seq,seed=12345678)
assert_equal(sorted(G.degree().values(),reverse=True),
[5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1])
assert_equal(sorted(G.degree(range(len(deg_seq))).values(),
reverse=True),
[5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1])
# test that fixed seed delivers the same graph
deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
G1=configuration_model(deg_seq,seed=1000)
G2=configuration_model(deg_seq,seed=1000)
assert_true(is_isomorphic(G1,G2))
G1=configuration_model(deg_seq,seed=10)
G2=configuration_model(deg_seq,seed=10)
assert_true(is_isomorphic(G1,G2))
@raises(NetworkXError)
def test_configuation_raise():
z=[5,3,3,3,3,2,2,2,1,1,1]
G = configuration_model(z, create_using=DiGraph())
@raises(NetworkXError)
def test_configuation_raise_odd():
z=[5,3,3,3,3,2,2,2,1,1]
G = configuration_model(z, create_using=DiGraph())
@raises(NetworkXError)
def test_directed_configuation_raise_unequal():
zin = [5,3,3,3,3,2,2,2,1,1]
zout = [5,3,3,3,3,2,2,2,1,2]
G = directed_configuration_model(zin, zout)
def test_directed_configuation_mode():
G = directed_configuration_model([],[],seed=0)
assert_equal(len(G),0)
def test_expected_degree_graph_empty():
# empty graph has empty degree sequence
deg_seq=[]
G=expected_degree_graph(deg_seq)
assert_equal(G.degree(), {})
def test_expected_degree_graph():
# test that fixed seed delivers the same graph
deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
G1=expected_degree_graph(deg_seq,seed=1000)
G2=expected_degree_graph(deg_seq,seed=1000)
assert_true(is_isomorphic(G1,G2))
G1=expected_degree_graph(deg_seq,seed=10)
G2=expected_degree_graph(deg_seq,seed=10)
assert_true(is_isomorphic(G1,G2))
def test_expected_degree_graph_selfloops():
deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
G1=expected_degree_graph(deg_seq,seed=1000, selfloops=False)
G2=expected_degree_graph(deg_seq,seed=1000, selfloops=False)
assert_true(is_isomorphic(G1,G2))
def test_expected_degree_graph_skew():
deg_seq=[10,2,2,2,2]
G1=expected_degree_graph(deg_seq,seed=1000)
G2=expected_degree_graph(deg_seq,seed=1000)
assert_true(is_isomorphic(G1,G2))
def test_havel_hakimi_construction():
G = havel_hakimi_graph([])
assert_equal(len(G),0)
z=[1000,3,3,3,3,2,2,2,1,1,1]
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z)
z=["A",3,3,3,3,2,2,2,1,1,1]
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z)
z=[5,4,3,3,3,2,2,2]
G=havel_hakimi_graph(z)
G=configuration_model(z)
z=[6,5,4,4,2,1,1,1]
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z)
z=[10,3,3,3,3,2,2,2,2,2,2]
G=havel_hakimi_graph(z)
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z, create_using=DiGraph())
def test_directed_havel_hakimi():
# Test range of valid directed degree sequences
n, r = 100, 10
p = 1.0 / r
for i in range(r):
G1 = nx.erdos_renyi_graph(n,p*(i+1),None,True)
din = list(G1.in_degree().values())
dout = list(G1.out_degree().values())
G2 = nx.directed_havel_hakimi_graph(din, dout)
assert_true(din == list(G2.in_degree().values()))
assert_true(dout == list(G2.out_degree().values()))
# Test non-graphical sequence
dout = [1000,3,3,3,3,2,2,2,1,1,1]
din=[103,102,102,102,102,102,102,102,102,102]
assert_raises(nx.exception.NetworkXError,
nx.directed_havel_hakimi_graph, din, dout)
# Test valid sequences
dout=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
din=[2, 2, 2, 2, 2, 2, 2, 2, 0, 2]
G2 = nx.directed_havel_hakimi_graph(din, dout)
assert_true(din == list(G2.in_degree().values()))
assert_true(dout == list(G2.out_degree().values()))
# Test unequal sums
din=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
assert_raises(nx.exception.NetworkXError,
nx.directed_havel_hakimi_graph, din, dout)
# Test for negative values
din=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2]
assert_raises(nx.exception.NetworkXError,
nx.directed_havel_hakimi_graph, din, dout)
def test_degree_sequence_tree():
z=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
G=degree_sequence_tree(z)
assert_true(len(G.nodes())==len(z))
assert_true(len(G.edges())==sum(z)/2)
assert_raises(networkx.exception.NetworkXError,
degree_sequence_tree, z, create_using=DiGraph())
z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_raises(networkx.exception.NetworkXError,
degree_sequence_tree, z)
def test_random_degree_sequence_graph():
d=[1,2,2,3]
G = nx.random_degree_sequence_graph(d)
assert_equal(d, list(G.degree().values()))
def test_random_degree_sequence_graph_raise():
z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_raises(networkx.exception.NetworkXUnfeasible,
random_degree_sequence_graph, z)
def test_random_degree_sequence_large():
G = nx.fast_gnp_random_graph(100,0.1)
d = G.degree().values()
G = nx.random_degree_sequence_graph(d, seed=0)
assert_equal(sorted(d), sorted(list(G.degree().values())))
| mit | 5,886,845,972,845,971,000 | 32.928994 | 66 | 0.627834 | false |
lewixliu/git-repo | subcmds/upload.py | 4 | 21890 | # -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import copy
import re
import sys
from command import InteractiveCommand
from editor import Editor
from error import HookError, UploadError
from git_command import GitCommand
from git_refs import R_HEADS
from hooks import RepoHook
from pyversion import is_python3
if not is_python3():
input = raw_input # noqa: F821
else:
unicode = str
UNUSUAL_COMMIT_THRESHOLD = 5
def _ConfirmManyUploads(multiple_branches=False):
if multiple_branches:
print('ATTENTION: One or more branches has an unusually high number '
'of commits.')
else:
print('ATTENTION: You are uploading an unusually high number of commits.')
print('YOU PROBABLY DO NOT MEAN TO DO THIS. (Did you rebase across '
'branches?)')
answer = input("If you are sure you intend to do this, type 'yes': ").strip()
return answer == "yes"
def _die(fmt, *args):
msg = fmt % args
print('error: %s' % msg, file=sys.stderr)
sys.exit(1)
def _SplitEmails(values):
result = []
for value in values:
result.extend([s.strip() for s in value.split(',')])
return result
class Upload(InteractiveCommand):
common = True
helpSummary = "Upload changes for code review"
helpUsage = """
%prog [--re --cc] [<project>]...
"""
helpDescription = """
The '%prog' command is used to send changes to the Gerrit Code
Review system. It searches for topic branches in local projects
that have not yet been published for review. If multiple topic
branches are found, '%prog' opens an editor to allow the user to
select which branches to upload.
'%prog' searches for uploadable changes in all projects listed at
the command line. Projects can be specified either by name, or by
a relative or absolute path to the project's local directory. If no
projects are specified, '%prog' will search for uploadable changes
in all projects listed in the manifest.
If the --reviewers or --cc options are passed, those emails are
added to the respective list of users, and emails are sent to any
new users. Users passed as --reviewers must already be registered
with the code review system, or the upload will fail.
# Configuration
review.URL.autoupload:
To disable the "Upload ... (y/N)?" prompt, you can set a per-project
or global Git configuration option. If review.URL.autoupload is set
to "true" then repo will assume you always answer "y" at the prompt,
and will not prompt you further. If it is set to "false" then repo
will assume you always answer "n", and will abort.
review.URL.autoreviewer:
To automatically append a user or mailing list to reviews, you can set
a per-project or global Git option to do so.
review.URL.autocopy:
To automatically copy a user or mailing list to all uploaded reviews,
you can set a per-project or global Git option to do so. Specifically,
review.URL.autocopy can be set to a comma separated list of reviewers
who you always want copied on all uploads with a non-empty --re
argument.
review.URL.username:
Override the username used to connect to Gerrit Code Review.
By default the local part of the email address is used.
The URL must match the review URL listed in the manifest XML file,
or in the .git/config within the project. For example:
[remote "origin"]
url = git://git.example.com/project.git
review = http://review.example.com/
[review "http://review.example.com/"]
autoupload = true
autocopy = [email protected],[email protected]
review.URL.uploadtopic:
To add a topic branch whenever uploading a commit, you can set a
per-project or global Git option to do so. If review.URL.uploadtopic
is set to "true" then repo will assume you always want the equivalent
of the -t option to the repo command. If unset or set to "false" then
repo will make use of only the command line option.
review.URL.uploadhashtags:
To add hashtags whenever uploading a commit, you can set a per-project
or global Git option to do so. The value of review.URL.uploadhashtags
will be used as comma delimited hashtags like the --hashtag option.
review.URL.uploadlabels:
To add labels whenever uploading a commit, you can set a per-project
or global Git option to do so. The value of review.URL.uploadlabels
will be used as comma delimited labels like the --label option.
review.URL.uploadnotify:
Control e-mail notifications when uploading.
https://gerrit-review.googlesource.com/Documentation/user-upload.html#notify
# References
Gerrit Code Review: https://www.gerritcodereview.com/
"""
def _Options(self, p):
p.add_option('-t',
dest='auto_topic', action='store_true',
help='Send local branch name to Gerrit Code Review')
p.add_option('--hashtag', '--ht',
dest='hashtags', action='append', default=[],
help='Add hashtags (comma delimited) to the review.')
p.add_option('--hashtag-branch', '--htb',
action='store_true',
help='Add local branch name as a hashtag.')
p.add_option('-l', '--label',
dest='labels', action='append', default=[],
help='Add a label when uploading.')
p.add_option('--re', '--reviewers',
type='string', action='append', dest='reviewers',
help='Request reviews from these people.')
p.add_option('--cc',
type='string', action='append', dest='cc',
help='Also send email to these email addresses.')
p.add_option('--br',
type='string', action='store', dest='branch',
help='Branch to upload.')
p.add_option('--cbr', '--current-branch',
dest='current_branch', action='store_true',
help='Upload current git branch.')
p.add_option('--ne', '--no-emails',
action='store_false', dest='notify', default=True,
help='If specified, do not send emails on upload.')
p.add_option('-p', '--private',
action='store_true', dest='private', default=False,
help='If specified, upload as a private change.')
p.add_option('-w', '--wip',
action='store_true', dest='wip', default=False,
help='If specified, upload as a work-in-progress change.')
p.add_option('-o', '--push-option',
type='string', action='append', dest='push_options',
default=[],
help='Additional push options to transmit')
p.add_option('-D', '--destination', '--dest',
type='string', action='store', dest='dest_branch',
metavar='BRANCH',
help='Submit for review on this target branch.')
p.add_option('-n', '--dry-run',
dest='dryrun', default=False, action='store_true',
help='Do everything except actually upload the CL.')
p.add_option('-y', '--yes',
default=False, action='store_true',
help='Answer yes to all safe prompts.')
p.add_option('--no-cert-checks',
dest='validate_certs', action='store_false', default=True,
help='Disable verifying ssl certs (unsafe).')
# Options relating to upload hook. Note that verify and no-verify are NOT
# opposites of each other, which is why they store to different locations.
# We are using them to match 'git commit' syntax.
#
# Combinations:
# - no-verify=False, verify=False (DEFAULT):
# If stdout is a tty, can prompt about running upload hooks if needed.
# If user denies running hooks, the upload is cancelled. If stdout is
# not a tty and we would need to prompt about upload hooks, upload is
# cancelled.
# - no-verify=False, verify=True:
# Always run upload hooks with no prompt.
# - no-verify=True, verify=False:
# Never run upload hooks, but upload anyway (AKA bypass hooks).
# - no-verify=True, verify=True:
# Invalid
g = p.add_option_group('Upload hooks')
g.add_option('--no-verify',
dest='bypass_hooks', action='store_true',
help='Do not run the upload hook.')
g.add_option('--verify',
dest='allow_all_hooks', action='store_true',
help='Run the upload hook without prompting.')
g.add_option('--ignore-hooks',
dest='ignore_hooks', action='store_true',
help='Do not abort uploading if upload hooks fail.')
def _SingleBranch(self, opt, branch, people):
project = branch.project
name = branch.name
remote = project.GetBranch(name).remote
key = 'review.%s.autoupload' % remote.review
answer = project.config.GetBoolean(key)
if answer is False:
_die("upload blocked by %s = false" % key)
if answer is None:
date = branch.date
commit_list = branch.commits
destination = opt.dest_branch or project.dest_branch or project.revisionExpr
print('Upload project %s/ to remote branch %s%s:' %
(project.relpath, destination, ' (private)' if opt.private else ''))
print(' branch %s (%2d commit%s, %s):' % (
name,
len(commit_list),
len(commit_list) != 1 and 's' or '',
date))
for commit in commit_list:
print(' %s' % commit)
print('to %s (y/N)? ' % remote.review, end='')
# TODO: When we require Python 3, use flush=True w/print above.
sys.stdout.flush()
if opt.yes:
print('<--yes>')
answer = True
else:
answer = sys.stdin.readline().strip().lower()
answer = answer in ('y', 'yes', '1', 'true', 't')
if answer:
if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD:
answer = _ConfirmManyUploads()
if answer:
self._UploadAndReport(opt, [branch], people)
else:
_die("upload aborted by user")
def _MultipleBranches(self, opt, pending, people):
projects = {}
branches = {}
script = []
script.append('# Uncomment the branches to upload:')
for project, avail in pending:
script.append('#')
script.append('# project %s/:' % project.relpath)
b = {}
for branch in avail:
if branch is None:
continue
name = branch.name
date = branch.date
commit_list = branch.commits
if b:
script.append('#')
destination = opt.dest_branch or project.dest_branch or project.revisionExpr
script.append('# branch %s (%2d commit%s, %s) to remote branch %s:' % (
name,
len(commit_list),
len(commit_list) != 1 and 's' or '',
date,
destination))
for commit in commit_list:
script.append('# %s' % commit)
b[name] = branch
projects[project.relpath] = project
branches[project.name] = b
script.append('')
script = Editor.EditString("\n".join(script)).split("\n")
project_re = re.compile(r'^#?\s*project\s*([^\s]+)/:$')
branch_re = re.compile(r'^\s*branch\s*([^\s(]+)\s*\(.*')
project = None
todo = []
for line in script:
m = project_re.match(line)
if m:
name = m.group(1)
project = projects.get(name)
if not project:
_die('project %s not available for upload', name)
continue
m = branch_re.match(line)
if m:
name = m.group(1)
if not project:
_die('project for branch %s not in script', name)
branch = branches[project.name].get(name)
if not branch:
_die('branch %s not in %s', name, project.relpath)
todo.append(branch)
if not todo:
_die("nothing uncommented for upload")
many_commits = False
for branch in todo:
if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD:
many_commits = True
break
if many_commits:
if not _ConfirmManyUploads(multiple_branches=True):
_die("upload aborted by user")
self._UploadAndReport(opt, todo, people)
def _AppendAutoList(self, branch, people):
"""
Appends the list of reviewers in the git project's config.
Appends the list of users in the CC list in the git project's config if a
non-empty reviewer list was found.
"""
name = branch.name
project = branch.project
key = 'review.%s.autoreviewer' % project.GetBranch(name).remote.review
raw_list = project.config.GetString(key)
if raw_list is not None:
people[0].extend([entry.strip() for entry in raw_list.split(',')])
key = 'review.%s.autocopy' % project.GetBranch(name).remote.review
raw_list = project.config.GetString(key)
if raw_list is not None and len(people[0]) > 0:
people[1].extend([entry.strip() for entry in raw_list.split(',')])
def _FindGerritChange(self, branch):
last_pub = branch.project.WasPublished(branch.name)
if last_pub is None:
return ""
refs = branch.GetPublishedRefs()
try:
# refs/changes/XYZ/N --> XYZ
return refs.get(last_pub).split('/')[-2]
except (AttributeError, IndexError):
return ""
def _UploadAndReport(self, opt, todo, original_people):
have_errors = False
for branch in todo:
try:
people = copy.deepcopy(original_people)
self._AppendAutoList(branch, people)
# Check if there are local changes that may have been forgotten
changes = branch.project.UncommitedFiles()
if changes:
key = 'review.%s.autoupload' % branch.project.remote.review
answer = branch.project.config.GetBoolean(key)
# if they want to auto upload, let's not ask because it could be automated
if answer is None:
print()
print('Uncommitted changes in %s (did you forget to amend?):'
% branch.project.name)
print('\n'.join(changes))
print('Continue uploading? (y/N) ', end='')
# TODO: When we require Python 3, use flush=True w/print above.
sys.stdout.flush()
if opt.yes:
print('<--yes>')
a = 'yes'
else:
a = sys.stdin.readline().strip().lower()
if a not in ('y', 'yes', 't', 'true', 'on'):
print("skipping upload", file=sys.stderr)
branch.uploaded = False
branch.error = 'User aborted'
continue
# Check if topic branches should be sent to the server during upload
if opt.auto_topic is not True:
key = 'review.%s.uploadtopic' % branch.project.remote.review
opt.auto_topic = branch.project.config.GetBoolean(key)
def _ExpandCommaList(value):
"""Split |value| up into comma delimited entries."""
if not value:
return
for ret in value.split(','):
ret = ret.strip()
if ret:
yield ret
# Check if hashtags should be included.
key = 'review.%s.uploadhashtags' % branch.project.remote.review
hashtags = set(_ExpandCommaList(branch.project.config.GetString(key)))
for tag in opt.hashtags:
hashtags.update(_ExpandCommaList(tag))
if opt.hashtag_branch:
hashtags.add(branch.name)
# Check if labels should be included.
key = 'review.%s.uploadlabels' % branch.project.remote.review
labels = set(_ExpandCommaList(branch.project.config.GetString(key)))
for label in opt.labels:
labels.update(_ExpandCommaList(label))
# Basic sanity check on label syntax.
for label in labels:
if not re.match(r'^.+[+-][0-9]+$', label):
print('repo: error: invalid label syntax "%s": labels use forms '
'like CodeReview+1 or Verified-1' % (label,), file=sys.stderr)
sys.exit(1)
# Handle e-mail notifications.
if opt.notify is False:
notify = 'NONE'
else:
key = 'review.%s.uploadnotify' % branch.project.remote.review
notify = branch.project.config.GetString(key)
destination = opt.dest_branch or branch.project.dest_branch
# Make sure our local branch is not setup to track a different remote branch
merge_branch = self._GetMergeBranch(branch.project)
if destination:
full_dest = destination
if not full_dest.startswith(R_HEADS):
full_dest = R_HEADS + full_dest
if not opt.dest_branch and merge_branch and merge_branch != full_dest:
print('merge branch %s does not match destination branch %s'
% (merge_branch, full_dest))
print('skipping upload.')
print('Please use `--destination %s` if this is intentional'
% destination)
branch.uploaded = False
continue
branch.UploadForReview(people,
dryrun=opt.dryrun,
auto_topic=opt.auto_topic,
hashtags=hashtags,
labels=labels,
private=opt.private,
notify=notify,
wip=opt.wip,
dest_branch=destination,
validate_certs=opt.validate_certs,
push_options=opt.push_options)
branch.uploaded = True
except UploadError as e:
branch.error = e
branch.uploaded = False
have_errors = True
print(file=sys.stderr)
print('----------------------------------------------------------------------', file=sys.stderr)
if have_errors:
for branch in todo:
if not branch.uploaded:
if len(str(branch.error)) <= 30:
fmt = ' (%s)'
else:
fmt = '\n (%s)'
print(('[FAILED] %-15s %-15s' + fmt) % (
branch.project.relpath + '/',
branch.name,
str(branch.error)),
file=sys.stderr)
print()
for branch in todo:
if branch.uploaded:
print('[OK ] %-15s %s' % (
branch.project.relpath + '/',
branch.name),
file=sys.stderr)
if have_errors:
sys.exit(1)
def _GetMergeBranch(self, project):
p = GitCommand(project,
['rev-parse', '--abbrev-ref', 'HEAD'],
capture_stdout=True,
capture_stderr=True)
p.Wait()
local_branch = p.stdout.strip()
p = GitCommand(project,
['config', '--get', 'branch.%s.merge' % local_branch],
capture_stdout=True,
capture_stderr=True)
p.Wait()
merge_branch = p.stdout.strip()
return merge_branch
def Execute(self, opt, args):
project_list = self.GetProjects(args)
pending = []
reviewers = []
cc = []
branch = None
if opt.branch:
branch = opt.branch
for project in project_list:
if opt.current_branch:
cbr = project.CurrentBranch
up_branch = project.GetUploadableBranch(cbr)
if up_branch:
avail = [up_branch]
else:
avail = None
print('ERROR: Current branch (%s) not uploadable. '
'You may be able to type '
'"git branch --set-upstream-to m/master" to fix '
'your branch.' % str(cbr),
file=sys.stderr)
else:
avail = project.GetUploadableBranches(branch)
if avail:
pending.append((project, avail))
if not pending:
if branch is None:
print('repo: error: no branches ready for upload', file=sys.stderr)
else:
print('repo: error: no branches named "%s" ready for upload' %
(branch,), file=sys.stderr)
return 1
if not opt.bypass_hooks:
hook = RepoHook('pre-upload', self.manifest.repo_hooks_project,
self.manifest.topdir,
self.manifest.manifestProject.GetRemote('origin').url,
abort_if_user_denies=True)
pending_proj_names = [project.name for (project, available) in pending]
pending_worktrees = [project.worktree for (project, available) in pending]
passed = True
try:
hook.Run(opt.allow_all_hooks, project_list=pending_proj_names,
worktree_list=pending_worktrees)
except SystemExit:
passed = False
if not opt.ignore_hooks:
raise
except HookError as e:
passed = False
print("ERROR: %s" % str(e), file=sys.stderr)
if not passed:
if opt.ignore_hooks:
print('\nWARNING: pre-upload hooks failed, but uploading anyways.',
file=sys.stderr)
else:
return 1
if opt.reviewers:
reviewers = _SplitEmails(opt.reviewers)
if opt.cc:
cc = _SplitEmails(opt.cc)
people = (reviewers, cc)
if len(pending) == 1 and len(pending[0][1]) == 1:
self._SingleBranch(opt, pending[0][1][0], people)
else:
self._MultipleBranches(opt, pending, people)
| apache-2.0 | 2,546,965,587,339,811,300 | 34.885246 | 100 | 0.598904 | false |
TeradataCenterForHadoop/ambari-presto-service | tests/test_worker.py | 2 | 5422 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import MagicMock, patch, mock_open, call
import unittest
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from package.scripts.presto_worker import Worker
from package.scripts.params import memory_configs
class TestWorker(unittest.TestCase):
dummy_config_properties = {'pseudo.distributed.enabled': False,
'query.queue-config-file': '',
'http-server.http.port': '8285',
'node-scheduler.include-coordinator': False}
minimal_config_properties = {'pseudo.distributed.enabled': False,
'node-scheduler.include-coordinator': False}
for memory_config in memory_configs:
dummy_config_properties[memory_config] = '123'
def setUp(self):
self.mock_env = MagicMock()
@patch('package.scripts.presto_worker.Worker.configure')
@patch('package.scripts.presto_worker.Execute')
def test_lifecycle_methods_shell_out_to_execute(
self, execute_mock, unused_configure_mock):
presto_worker = Worker()
presto_worker.install(self.mock_env)
assert execute_mock.call_count is 2
assert 'wget' in execute_mock.call_args_list[0][0][0]
assert 'rpm -i' in execute_mock.call_args_list[1][0][0]
assert 'export JAVA8_HOME=' in execute_mock.call_args_list[1][0][0]
execute_mock.reset_mock()
presto_worker.stop(self.mock_env)
assert execute_mock.call_count is 1
assert 'stop' in execute_mock.call_args_list[0][0][0]
execute_mock.reset_mock()
presto_worker.start(self.mock_env)
assert execute_mock.call_count is 1
assert 'start' in execute_mock.call_args_list[0][0][0]
execute_mock.reset_mock()
presto_worker.status(self.mock_env)
assert execute_mock.call_count is 1
assert 'status' in execute_mock.call_args_list[0][0][0]
@patch('package.scripts.presto_worker.Worker.configure')
@patch('package.scripts.presto_worker.Execute')
def test_install_start_configure_presto(
self, unused_execute_mock, configure_mock):
presto_worker = Worker()
presto_worker.install(self.mock_env)
assert configure_mock.called
configure_mock.reset_mock()
presto_worker.start(self.mock_env)
assert configure_mock.called
@patch('package.scripts.presto_worker.create_connectors')
def test_configure_adds_tpch_connector(self, create_connectors_mock):
presto_worker = Worker()
with patch('__builtin__.open'):
presto_worker.configure(self.mock_env)
assert call({}, "{'tpch': ['connector.name=tpch']}") in create_connectors_mock.call_args_list
@patch('package.scripts.presto_worker.create_connectors')
@patch('package.scripts.params.config_properties', new=dummy_config_properties)
def test_configure_ignore_pseudo_distribute_enabled_property(self, create_connectors_mock ):
config = collect_config_vars_written_out(self.mock_env, Worker())
assert 'pseudo.distributed.enabled=true\n' not in config
@patch('package.scripts.presto_worker.create_connectors')
@patch('package.scripts.params.config_properties', new=dummy_config_properties)
def test_configure_ignore_empty_queue_config_file(self, create_connectors_mock):
config = collect_config_vars_written_out(self.mock_env, Worker())
for item in config:
assert not item.startswith('query.queue-config-file')
@patch('package.scripts.presto_worker.create_connectors')
@patch('package.scripts.params.config_properties', new=minimal_config_properties)
def test_constant_properties(self, create_connectors_mock):
config = collect_config_vars_written_out(self.mock_env, Worker())
assert 'coordinator=false\n' in config
assert 'node.data-dir=/var/lib/presto\n' in config
@patch('package.scripts.presto_worker.create_connectors')
@patch('package.scripts.params.config_properties', new=dummy_config_properties)
def test_memory_settings_have_units(self, create_connectors_mock):
from test_coordinator import assert_memory_configs_properly_formatted
config = collect_config_vars_written_out(self.mock_env, Worker())
assert_memory_configs_properly_formatted(config)
def collect_config_vars_written_out(mock_env, obj_under_test):
config = []
open_mock = mock_file_descriptor_write_method(config)
with patch('__builtin__.open', open_mock):
getattr(obj_under_test, 'configure')(mock_env)
return config
def mock_file_descriptor_write_method(list):
def append(item_to_append):
list.append(item_to_append)
open_mock = mock_open()
fd = open_mock()
fd.write = append
return open_mock
| apache-2.0 | -2,642,991,648,735,018,000 | 38.007194 | 101 | 0.681483 | false |
adsabs/ADSPipelineMsg | adsmsg/protobuf/status_pb2.py | 1 | 2108 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: status.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='status.proto',
package='adsmsg',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0cstatus.proto\x12\x06\x61\x64smsg*7\n\x06Status\x12\n\n\x06\x61\x63tive\x10\x00\x12\x0b\n\x07\x64\x65leted\x10\x01\x12\x07\n\x03new\x10\x02\x12\x0b\n\x07updated\x10\x03\x62\x06proto3'
)
_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='adsmsg.Status',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='active', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='deleted', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='new', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='updated', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=24,
serialized_end=79,
)
_sym_db.RegisterEnumDescriptor(_STATUS)
Status = enum_type_wrapper.EnumTypeWrapper(_STATUS)
active = 0
deleted = 1
new = 2
updated = 3
DESCRIPTOR.enum_types_by_name['Status'] = _STATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
# @@protoc_insertion_point(module_scope)
| agpl-3.0 | -6,500,477,714,159,251,000 | 28.277778 | 206 | 0.722011 | false |
edx/edx-ora | peer_grading/migrations/0007_auto__add_unique_calibrationhistory_student_id_location.py | 1 | 5423 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'CalibrationHistory', fields ['student_id', 'location']
db.create_unique('peer_grading_calibrationhistory', ['student_id', 'location'])
def backwards(self, orm):
# Removing unique constraint on 'CalibrationHistory', fields ['student_id', 'location']
db.delete_unique('peer_grading_calibrationhistory', ['student_id', 'location'])
models = {
'controller.submission': {
'Meta': {'unique_together': "(('student_response', 'student_id', 'location'),)", 'object_name': 'Submission'},
'answer': ('django.db.models.fields.TextField', [], {'default': "''"}),
'control_fields': ('django.db.models.fields.TextField', [], {'default': "''"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'duplicate_submission_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'grader_settings': ('django.db.models.fields.TextField', [], {'default': "''"}),
'has_been_duplicate_checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_display': ('django.db.models.fields.TextField', [], {'default': "''"}),
'is_duplicate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_plagiarized': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'db_index': 'True'}),
'max_score': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'next_grader_type': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '2'}),
'posted_results_back_to_queue': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferred_grader_type': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '2'}),
'previous_grader_type': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '2'}),
'problem_id': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'prompt': ('django.db.models.fields.TextField', [], {'default': "''"}),
'rubric': ('django.db.models.fields.TextField', [], {'default': "''"}),
'skip_basic_checks': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'student_response': ('django.db.models.fields.TextField', [], {'default': "''"}),
'student_submission_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'xqueue_queue_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'xqueue_submission_id': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '1024'}),
'xqueue_submission_key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'})
},
'peer_grading.calibrationhistory': {
'Meta': {'unique_together': "(('student_id', 'location'),)", 'object_name': 'CalibrationHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'db_index': 'True'}),
'problem_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'peer_grading.calibrationrecord': {
'Meta': {'object_name': 'CalibrationRecord'},
'actual_score': ('django.db.models.fields.IntegerField', [], {}),
'calibration_history': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['peer_grading.CalibrationHistory']"}),
'feedback': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_pre_calibration': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rubric_scores': ('django.db.models.fields.TextField', [], {'default': "''"}),
'rubric_scores_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['controller.Submission']"})
}
}
complete_apps = ['peer_grading'] | agpl-3.0 | 7,517,790,753,235,718,000 | 72.297297 | 137 | 0.570533 | false |
rohitwaghchaure/erpnext_develop | erpnext/healthcare/doctype/physician/test_physician.py | 5 | 1077 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS LLP and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
import frappe
test_dependencies = ['Physician Schedule']
class TestPhysician(unittest.TestCase):
def tearDown(self):
frappe.delete_doc_if_exists('Physician', '_Testdoctor2', force=1)
def test_schedule_and_time(self):
physician = frappe.new_doc('Physician')
physician.first_name = '_Testdoctor2'
physician.physician_schedule = '_Testdoctor2 Schedule'
self.assertRaises(frappe.ValidationError, physician.insert)
physician.physician_schedule = ''
physician.time_per_appointment = 15
self.assertRaises(frappe.ValidationError, physician.insert)
physician.physician_schedule = '_Testdoctor2 Schedule'
physician.time_per_appointment = 15
physician.insert()
def test_new_physician_without_schedule(self):
physician = frappe.new_doc('Physician')
physician.first_name = '_Testdoctor2'
physician.insert()
self.assertEqual(frappe.get_value('Physician', '_Testdoctor2', 'first_name'), '_Testdoctor2')
| gpl-3.0 | -6,522,493,021,094,342,000 | 28.108108 | 95 | 0.747447 | false |
mads-bertelsen/McCode | meta-pkgs/windows/Support/gnuplot-py-1.8/__init__.py | 4 | 6268 | #! /usr/bin/env python
# $Id: __init__.py 306 2008-05-02 01:09:02Z alford $
# Copyright (C) 1998-2003 Michael Haggerty <[email protected]>
#
# This file is licensed under the GNU Lesser General Public License
# (LGPL). See LICENSE.txt for details.
"""Gnuplot -- A pipe-based interface to the gnuplot plotting program.
This is the main module of the Gnuplot package.
Written by "Michael Haggerty", mailto:[email protected]. Inspired
by and partly derived from an earlier version by "Konrad Hinsen",
mailto:[email protected]. If you find a problem or have a suggestion,
please "let me know", mailto:[email protected]. Other feedback
would also be appreciated.
The Gnuplot.py home page is at
"Gnuplot.py", http://gnuplot-py.sourceforge.net
For information about how to use this module:
1. Check the README file.
2. Look at the example code in demo.py and try running it by typing
'python demo.py' or 'python __init__.py'.
3. For more details see the extensive documentation strings
throughout the python source files, especially this file,
_Gnuplot.py, PlotItems.py, and gp_unix.py.
4. The docstrings have also been turned into html which can be read
"here", http://gnuplot-py.sourceforge.net/doc. However, the
formatting is not perfect; when in doubt, double-check the
docstrings.
You should import this file with 'import Gnuplot', not with 'from
Gnuplot import *', because the module and the main class have the same
name, `Gnuplot'.
To obtain the gnuplot plotting program itself, see "the gnuplot FAQ",
ftp://ftp.gnuplot.vt.edu/pub/gnuplot/faq/index.html. Obviously you
need to have gnuplot installed if you want to use Gnuplot.py.
The old command-based interface to gnuplot (previously supported as
'oldplot.py') has been removed from the package.
Features:
o Allows the creation of two or three dimensional plots from
python.
o A gnuplot session is an instance of class 'Gnuplot'. Multiple
sessions can be open at once. For example::
g1 = Gnuplot.Gnuplot()
g2 = Gnuplot.Gnuplot()
Note that due to limitations on those platforms, opening multiple
simultaneous sessions on Windows or Macintosh may not work
correctly. (Feedback?)
o The implicitly-generated gnuplot commands can be stored to a file
instead of executed immediately::
g = Gnuplot.Gnuplot('commands.txt')
The 'commands.txt' file can then be run later with gnuplot's
'load' command. Beware, however: the plot commands may depend on
the existence of temporary files, which will probably be deleted
before you use the command file.
o Can pass arbitrary commands to the gnuplot command interpreter::
g('set pointsize 2')
(If this is all you want to do, you might consider using the
lightweight GnuplotProcess class defined in gp.py.)
o A Gnuplot object knows how to plot objects of type 'PlotItem'.
Any PlotItem can have optional 'title' and/or 'with' suboptions.
Builtin PlotItem types:
* 'Data(array1)' -- data from a Python list or NumPy array
(permits additional option 'cols' )
* 'File('filename')' -- data from an existing data file (permits
additional option 'using' )
* 'Func('exp(4.0 * sin(x))')' -- functions (passed as a string,
evaluated by gnuplot)
* 'GridData(m, x, y)' -- data tabulated on a grid of (x,y) values
(usually to be plotted in 3-D)
See the documentation strings for those classes for more details.
o PlotItems are implemented as objects that can be assigned to
variables and plotted repeatedly. Most of their plot options can
also be changed with the new 'set_option()' member functions then
they can be replotted with their new options.
o Communication of commands to gnuplot is via a one-way pipe.
Communication of data from python to gnuplot is via inline data
(through the command pipe) or via temporary files. Temp files are
deleted automatically when their associated 'PlotItem' is deleted.
The PlotItems in use by a Gnuplot object at any given time are
stored in an internal list so that they won't be deleted
prematurely.
o Can use 'replot' method to add datasets to an existing plot.
o Can make persistent gnuplot windows by using the constructor option
'persist=1'. Such windows stay around even after the gnuplot
program is exited. Note that only newer version of gnuplot support
this option.
o Can plot either directly to a postscript printer or to a
postscript file via the 'hardcopy' method.
o Grid data for the splot command can be sent to gnuplot in binary
format, saving time and disk space.
o Should work under Unix, Macintosh, and Windows.
Restrictions:
- Relies on the numpy Python extension. This can be obtained from
the Scipy group at <http://www.scipy.org/Download>. If you're
interested in gnuplot, you would probably also want numpy anyway.
- Only a small fraction of gnuplot functionality is implemented as
explicit method functions. However, you can give arbitrary
commands to gnuplot manually::
g = Gnuplot.Gnuplot()
g('set data style linespoints')
g('set pointsize 5')
- There is no provision for missing data points in array data (which
gnuplot allows via the 'set missing' command).
Bugs:
- No attempt is made to check for errors reported by gnuplot. On
unix any gnuplot error messages simply appear on stderr. (I don't
know what happens under Windows.)
- All of these classes perform their resource deallocation when
'__del__' is called. Normally this works fine, but there are
well-known cases when Python's automatic resource deallocation
fails, which can leave temporary files around.
"""
__version__ = '1.8'
# Other modules that should be loaded for 'from Gnuplot import *':
__all__ = ['utils', 'funcutils', ]
from gp import GnuplotOpts, GnuplotProcess, test_persist
from Errors import Error, OptionError, DataError
from PlotItems import PlotItem, Func, File, Data, GridData
from _Gnuplot import Gnuplot
if __name__ == '__main__':
import demo
demo.demo()
| gpl-2.0 | -2,172,466,027,556,622,000 | 35.022989 | 71 | 0.713625 | false |
joshuahoman/vivisect | vivisect/parsers/blob.py | 4 | 1764 | import envi
import vivisect
import vivisect.parsers as v_parsers
from vivisect.const import *
def parseFd(vw, fd, filename=None):
fd.seek(0)
arch = vw.config.viv.parsers.blob.arch
bigend = vw.config.viv.parsers.blob.bigend
baseaddr = vw.config.viv.parsers.blob.baseaddr
try:
envi.getArchModule(arch)
except Exception, e:
raise Exception('Blob loader *requires* arch option (-O viv.parsers.blob.arch="<archname>")')
vw.setMeta('Architecture', arch)
vw.setMeta('Platform','unknown')
vw.setMeta('Format','blob')
vw.setMeta('bigend', bigend)
bytez = fd.read()
vw.addMemoryMap(baseaddr, 7, filename, bytez)
vw.addSegment( baseaddr, len(bytez), '%.8x' % baseaddr, 'blob' )
def parseFile(vw, filename):
arch = vw.config.viv.parsers.blob.arch
bigend = vw.config.viv.parsers.blob.bigend
baseaddr = vw.config.viv.parsers.blob.baseaddr
try:
envi.getArchModule(arch)
except Exception, e:
raise Exception('Blob loader *requires* arch option (-O viv.parsers.blob.arch="<archname>")')
vw.setMeta('Architecture', arch)
vw.setMeta('Platform','unknown')
vw.setMeta('Format','blob')
vw.setMeta('bigend', bigend)
fname = vw.addFile(filename, baseaddr, v_parsers.md5File(filename))
bytez = file(filename, "rb").read()
vw.addMemoryMap(baseaddr, 7, filename, bytez)
vw.addSegment( baseaddr, len(bytez), '%.8x' % baseaddr, 'blob' )
def parseMemory(vw, memobj, baseaddr):
va,size,perms,fname = memobj.getMemoryMap(baseaddr)
if not fname:
fname = 'map_%.8x' % baseaddr
bytes = memobj.readMemory(va, size)
fname = vw.addFile(fname, baseaddr, v_parsers.md5Bytes(bytes))
vw.addMemoryMap(va, perms, fname, bytes)
| apache-2.0 | 3,921,233,198,066,294,300 | 29.947368 | 101 | 0.670068 | false |
tjma12/pycbc | pycbc/fft/backend_support.py | 1 | 3120 | # Copyright (C) 2012 Josh Willis, Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This package provides a front-end to various fast Fourier transform
implementations within PyCBC.
"""
import pycbc
import pycbc.scheme
# These are global variables, that are modified by the various scheme-
# dependent submodules, to maintain a list of all possible backends
# for all possible schemes that are available at runtime. This list
# and dict are then used when parsing command-line options.
_all_backends_list = []
_all_backends_dict = {}
# The following is the function called by each scheme's setup to add whatever new
# backends may have been found to the global list. Since some backends may be
# shared, we must first check to make sure that the item in the list is not already
# in the global list, and we assume that the keys to the dict are in one-to-one
# correspondence with the items in the list.
def _update_global_available(new_list, new_dict, global_list, global_dict):
for item in new_list:
if item not in global_list:
global_list.append(item)
global_dict.update({item:new_dict[item]})
def get_backend_modules():
return _all_backends_dict.values()
def get_backend_names():
return _all_backends_dict.keys()
BACKEND_PREFIX="pycbc.fft.backend_"
@pycbc.scheme.schemed(BACKEND_PREFIX)
def set_backend(backend_list):
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@pycbc.scheme.schemed(BACKEND_PREFIX)
def get_backend():
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
# Import all scheme-dependent backends, to get _all_backends accurate:
for scheme_name in ["cpu", "mkl", "cuda"]:
try:
mod = __import__('pycbc.fft.backend_' + scheme_name, fromlist = ['_alist', '_adict'])
_alist = getattr(mod, "_alist")
_adict = getattr(mod, "_adict")
_update_global_available(_alist, _adict, _all_backends_list,
_all_backends_dict)
except ImportError:
pass
| gpl-3.0 | -1,651,903,887,496,366,000 | 36.590361 | 93 | 0.665705 | false |
cernops/neutron | neutron/api/api_common.py | 17 | 10729 | # Copyright 2011 Citrix System.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo.config import cfg
from webob import exc
from neutron.common import constants
from neutron.common import exceptions
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def get_filters(request, attr_info, skips=[]):
"""Extracts the filters from the request string.
Returns a dict of lists for the filters:
check=a&check=b&name=Bob&
becomes:
{'check': [u'a', u'b'], 'name': [u'Bob']}
"""
res = {}
for key, values in request.GET.dict_of_lists().iteritems():
if key in skips:
continue
values = [v for v in values if v]
key_attr_info = attr_info.get(key, {})
if 'convert_list_to' in key_attr_info:
values = key_attr_info['convert_list_to'](values)
elif 'convert_to' in key_attr_info:
convert_to = key_attr_info['convert_to']
values = [convert_to(v) for v in values]
if values:
res[key] = values
return res
def get_previous_link(request, items, id_key):
params = request.GET.copy()
params.pop('marker', None)
if items:
marker = items[0][id_key]
params['marker'] = marker
params['page_reverse'] = True
return "%s?%s" % (request.path_url, urllib.urlencode(params))
def get_next_link(request, items, id_key):
params = request.GET.copy()
params.pop('marker', None)
if items:
marker = items[-1][id_key]
params['marker'] = marker
params.pop('page_reverse', None)
return "%s?%s" % (request.path_url, urllib.urlencode(params))
def get_limit_and_marker(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If limit == 0, it means we needn't
pagination, then return None.
"""
max_limit = _get_pagination_max_limit()
limit = _get_limit_param(request, max_limit)
if max_limit > 0:
limit = min(max_limit, limit) or max_limit
if not limit:
return None, None
marker = request.GET.get('marker', None)
return limit, marker
def _get_pagination_max_limit():
max_limit = -1
if (cfg.CONF.pagination_max_limit.lower() !=
constants.PAGINATION_INFINITE):
try:
max_limit = int(cfg.CONF.pagination_max_limit)
if max_limit == 0:
raise ValueError()
except ValueError:
LOG.warn(_("Invalid value for pagination_max_limit: %s. It "
"should be an integer greater to 0"),
cfg.CONF.pagination_max_limit)
return max_limit
def _get_limit_param(request, max_limit):
"""Extract integer limit from request or fail."""
try:
limit = int(request.GET.get('limit', 0))
if limit >= 0:
return limit
except ValueError:
pass
msg = _("Limit must be an integer 0 or greater and not '%d'")
raise exceptions.BadRequest(resource='limit', msg=msg)
def list_args(request, arg):
"""Extracts the list of arg from request."""
return [v for v in request.GET.getall(arg) if v]
def get_sorts(request, attr_info):
"""Extract sort_key and sort_dir from request.
Return as: [(key1, value1), (key2, value2)]
"""
sort_keys = list_args(request, "sort_key")
sort_dirs = list_args(request, "sort_dir")
if len(sort_keys) != len(sort_dirs):
msg = _("The number of sort_keys and sort_dirs must be same")
raise exc.HTTPBadRequest(explanation=msg)
valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC]
absent_keys = [x for x in sort_keys if x not in attr_info]
if absent_keys:
msg = _("%s is invalid attribute for sort_keys") % absent_keys
raise exc.HTTPBadRequest(explanation=msg)
invalid_dirs = [x for x in sort_dirs if x not in valid_dirs]
if invalid_dirs:
msg = (_("%(invalid_dirs)s is invalid value for sort_dirs, "
"valid value is '%(asc)s' and '%(desc)s'") %
{'invalid_dirs': invalid_dirs,
'asc': constants.SORT_DIRECTION_ASC,
'desc': constants.SORT_DIRECTION_DESC})
raise exc.HTTPBadRequest(explanation=msg)
return zip(sort_keys,
[x == constants.SORT_DIRECTION_ASC for x in sort_dirs])
def get_page_reverse(request):
data = request.GET.get('page_reverse', 'False')
return data.lower() == "true"
def get_pagination_links(request, items, limit,
marker, page_reverse, key="id"):
key = key if key else 'id'
links = []
if not limit:
return links
if not (len(items) < limit and not page_reverse):
links.append({"rel": "next",
"href": get_next_link(request, items,
key)})
if not (len(items) < limit and page_reverse):
links.append({"rel": "previous",
"href": get_previous_link(request, items,
key)})
return links
class PaginationHelper(object):
def __init__(self, request, primary_key='id'):
self.request = request
self.primary_key = primary_key
def update_fields(self, original_fields, fields_to_add):
pass
def update_args(self, args):
pass
def paginate(self, items):
return items
def get_links(self, items):
return {}
class PaginationEmulatedHelper(PaginationHelper):
def __init__(self, request, primary_key='id'):
super(PaginationEmulatedHelper, self).__init__(request, primary_key)
self.limit, self.marker = get_limit_and_marker(request)
self.page_reverse = get_page_reverse(request)
def update_fields(self, original_fields, fields_to_add):
if not original_fields:
return
if self.primary_key not in original_fields:
original_fields.append(self.primary_key)
fields_to_add.append(self.primary_key)
def paginate(self, items):
if not self.limit:
return items
i = -1
if self.marker:
for item in items:
i = i + 1
if item[self.primary_key] == self.marker:
break
if self.page_reverse:
return items[i - self.limit:i]
return items[i + 1:i + self.limit + 1]
def get_links(self, items):
return get_pagination_links(
self.request, items, self.limit, self.marker,
self.page_reverse, self.primary_key)
class PaginationNativeHelper(PaginationEmulatedHelper):
def update_args(self, args):
if self.primary_key not in dict(args.get('sorts', [])).keys():
args.setdefault('sorts', []).append((self.primary_key, True))
args.update({'limit': self.limit, 'marker': self.marker,
'page_reverse': self.page_reverse})
def paginate(self, items):
return items
class NoPaginationHelper(PaginationHelper):
pass
class SortingHelper(object):
def __init__(self, request, attr_info):
pass
def update_args(self, args):
pass
def update_fields(self, original_fields, fields_to_add):
pass
def sort(self, items):
return items
class SortingEmulatedHelper(SortingHelper):
def __init__(self, request, attr_info):
super(SortingEmulatedHelper, self).__init__(request, attr_info)
self.sort_dict = get_sorts(request, attr_info)
def update_fields(self, original_fields, fields_to_add):
if not original_fields:
return
for key in dict(self.sort_dict).keys():
if key not in original_fields:
original_fields.append(key)
fields_to_add.append(key)
def sort(self, items):
def cmp_func(obj1, obj2):
for key, direction in self.sort_dict:
ret = cmp(obj1[key], obj2[key])
if ret:
return ret * (1 if direction else -1)
return 0
return sorted(items, cmp=cmp_func)
class SortingNativeHelper(SortingHelper):
def __init__(self, request, attr_info):
self.sort_dict = get_sorts(request, attr_info)
def update_args(self, args):
args['sorts'] = self.sort_dict
class NoSortingHelper(SortingHelper):
pass
class NeutronController(object):
"""Base controller class for Neutron API."""
# _resource_name will be redefined in sub concrete controller
_resource_name = None
def __init__(self, plugin):
self._plugin = plugin
super(NeutronController, self).__init__()
def _prepare_request_body(self, body, params):
"""Verifies required parameters are in request body.
Sets default value for missing optional parameters.
Body argument must be the deserialized body.
"""
try:
if body is None:
# Initialize empty resource for setting default value
body = {self._resource_name: {}}
data = body[self._resource_name]
except KeyError:
# raise if _resource_name is not in req body.
raise exc.HTTPBadRequest(_("Unable to find '%s' in request body") %
self._resource_name)
for param in params:
param_name = param['param-name']
param_value = data.get(param_name)
# If the parameter wasn't found and it was required, return 400
if param_value is None and param['required']:
msg = (_("Failed to parse request. "
"Parameter '%s' not specified") % param_name)
LOG.error(msg)
raise exc.HTTPBadRequest(msg)
data[param_name] = param_value or param.get('default-value')
return body
| apache-2.0 | -4,326,613,014,784,045,600 | 31.810398 | 79 | 0.595582 | false |
davehorton/drachtio-server | deps/boost_1_69_0/tools/build/test/build_file.py | 7 | 5123 | #!/usr/bin/python
# Copyright (C) 2006. Vladimir Prus
# Copyright (C) 2008. Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests that we explicitly request a file (not target) to be built by
# specifying its name on the command line.
import BoostBuild
###############################################################################
#
# test_building_file_from_specific_project()
# ------------------------------------------
#
###############################################################################
def test_building_file_from_specific_project():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello : hello.cpp ;
exe hello2 : hello.cpp ;
build-project sub ;
""")
t.write("hello.cpp", "int main() {}\n")
t.write("sub/jamfile.jam", """
exe hello : hello.cpp ;
exe hello2 : hello.cpp ;
exe sub : hello.cpp ;
""")
t.write("sub/hello.cpp", "int main() {}\n")
t.run_build_system(["sub", t.adjust_suffix("hello.obj")])
t.expect_output_lines("*depends on itself*", False)
t.expect_addition("sub/bin/$toolset/debug*/hello.obj")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# test_building_file_from_specific_target()
# -----------------------------------------
#
###############################################################################
def test_building_file_from_specific_target():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello1 : hello1.cpp ;
exe hello2 : hello2.cpp ;
exe hello3 : hello3.cpp ;
""")
t.write("hello1.cpp", "int main() {}\n")
t.write("hello2.cpp", "int main() {}\n")
t.write("hello3.cpp", "int main() {}\n")
t.run_build_system(["hello1", t.adjust_suffix("hello1.obj")])
t.expect_addition("bin/$toolset/debug*/hello1.obj")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# test_building_missing_file_from_specific_target()
# -------------------------------------------------
#
###############################################################################
def test_building_missing_file_from_specific_target():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello1 : hello1.cpp ;
exe hello2 : hello2.cpp ;
exe hello3 : hello3.cpp ;
""")
t.write("hello1.cpp", "int main() {}\n")
t.write("hello2.cpp", "int main() {}\n")
t.write("hello3.cpp", "int main() {}\n")
obj = t.adjust_suffix("hello2.obj")
t.run_build_system(["hello1", obj], status=1)
t.expect_output_lines("don't know how to make*" + obj)
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# test_building_multiple_files_with_different_names()
# ---------------------------------------------------
#
###############################################################################
def test_building_multiple_files_with_different_names():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello1 : hello1.cpp ;
exe hello2 : hello2.cpp ;
exe hello3 : hello3.cpp ;
""")
t.write("hello1.cpp", "int main() {}\n")
t.write("hello2.cpp", "int main() {}\n")
t.write("hello3.cpp", "int main() {}\n")
t.run_build_system([t.adjust_suffix("hello1.obj"), t.adjust_suffix(
"hello2.obj")])
t.expect_addition("bin/$toolset/debug*/hello1.obj")
t.expect_addition("bin/$toolset/debug*/hello2.obj")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# test_building_multiple_files_with_the_same_name()
# -------------------------------------------------
#
###############################################################################
def test_building_multiple_files_with_the_same_name():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello : hello.cpp ;
exe hello2 : hello.cpp ;
build-project sub ;
""")
t.write("hello.cpp", "int main() {}\n")
t.write("sub/jamfile.jam", """
exe hello : hello.cpp ;
exe hello2 : hello.cpp ;
exe sub : hello.cpp ;
""")
t.write("sub/hello.cpp", "int main() {}\n")
t.run_build_system([t.adjust_suffix("hello.obj")])
t.expect_output_lines("*depends on itself*", False)
t.expect_addition("bin/$toolset/debug*/hello.obj")
t.expect_addition("sub/bin/$toolset/debug*/hello.obj")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# main()
# ------
#
###############################################################################
test_building_file_from_specific_project()
test_building_file_from_specific_target()
test_building_missing_file_from_specific_target()
test_building_multiple_files_with_different_names()
test_building_multiple_files_with_the_same_name()
| mit | 6,968,420,967,926,862,000 | 29.135294 | 79 | 0.496194 | false |
SmartCash/smartcash | qa/rpc-tests/test_framework/blockstore.py | 97 | 5416 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# BlockStore: a helper class that keeps a map of blocks and implements
# helper functions for responding to getheaders and getdata,
# and for constructing a getheaders message
#
from .mininode import *
from io import BytesIO
import dbm.dumb as dbmd
class BlockStore(object):
def __init__(self, datadir):
self.blockDB = dbmd.open(datadir + "/blocks", 'c')
self.currentBlock = 0
self.headers_map = dict()
def close(self):
self.blockDB.close()
def erase(self, blockhash):
del self.blockDB[repr(blockhash)]
# lookup an entry and return the item as raw bytes
def get(self, blockhash):
value = None
try:
value = self.blockDB[repr(blockhash)]
except KeyError:
return None
return value
# lookup an entry and return it as a CBlock
def get_block(self, blockhash):
ret = None
serialized_block = self.get(blockhash)
if serialized_block is not None:
f = BytesIO(serialized_block)
ret = CBlock()
ret.deserialize(f)
ret.calc_sha256()
return ret
def get_header(self, blockhash):
try:
return self.headers_map[blockhash]
except KeyError:
return None
# Note: this pulls full blocks out of the database just to retrieve
# the headers -- perhaps we could keep a separate data structure
# to avoid this overhead.
def headers_for(self, locator, hash_stop, current_tip=None):
if current_tip is None:
current_tip = self.currentBlock
current_block_header = self.get_header(current_tip)
if current_block_header is None:
return None
response = msg_headers()
headersList = [ current_block_header ]
maxheaders = 2000
while (headersList[0].sha256 not in locator.vHave):
prevBlockHash = headersList[0].hashPrevBlock
prevBlockHeader = self.get_header(prevBlockHash)
if prevBlockHeader is not None:
headersList.insert(0, prevBlockHeader)
else:
break
headersList = headersList[:maxheaders] # truncate if we have too many
hashList = [x.sha256 for x in headersList]
index = len(headersList)
if (hash_stop in hashList):
index = hashList.index(hash_stop)+1
response.headers = headersList[:index]
return response
def add_block(self, block):
block.calc_sha256()
try:
self.blockDB[repr(block.sha256)] = bytes(block.serialize())
except TypeError as e:
print("Unexpected error: ", sys.exc_info()[0], e.args)
self.currentBlock = block.sha256
self.headers_map[block.sha256] = CBlockHeader(block)
def add_header(self, header):
self.headers_map[header.sha256] = header
# lookup the hashes in "inv", and return p2p messages for delivering
# blocks found.
def get_blocks(self, inv):
responses = []
for i in inv:
if (i.type == 2): # MSG_BLOCK
data = self.get(i.hash)
if data is not None:
# Use msg_generic to avoid re-serialization
responses.append(msg_generic(b"block", data))
return responses
def get_locator(self, current_tip=None):
if current_tip is None:
current_tip = self.currentBlock
r = []
counter = 0
step = 1
lastBlock = self.get_block(current_tip)
while lastBlock is not None:
r.append(lastBlock.hashPrevBlock)
for i in range(step):
lastBlock = self.get_block(lastBlock.hashPrevBlock)
if lastBlock is None:
break
counter += 1
if counter > 10:
step *= 2
locator = CBlockLocator()
locator.vHave = r
return locator
class TxStore(object):
def __init__(self, datadir):
self.txDB = dbmd.open(datadir + "/transactions", 'c')
def close(self):
self.txDB.close()
# lookup an entry and return the item as raw bytes
def get(self, txhash):
value = None
try:
value = self.txDB[repr(txhash)]
except KeyError:
return None
return value
def get_transaction(self, txhash):
ret = None
serialized_tx = self.get(txhash)
if serialized_tx is not None:
f = BytesIO(serialized_tx)
ret = CTransaction()
ret.deserialize(f)
ret.calc_sha256()
return ret
def add_transaction(self, tx):
tx.calc_sha256()
try:
self.txDB[repr(tx.sha256)] = bytes(tx.serialize())
except TypeError as e:
print("Unexpected error: ", sys.exc_info()[0], e.args)
def get_transactions(self, inv):
responses = []
for i in inv:
if (i.type == 1): # MSG_TX
tx = self.get(i.hash)
if tx is not None:
responses.append(msg_generic(b"tx", tx))
return responses
| mit | 6,879,593,141,829,968,000 | 32.02439 | 77 | 0.57884 | false |
nmayorov/scipy | scipy/stats/tests/test_rank.py | 4 | 8291 | import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from scipy.stats import rankdata, tiecorrect
import pytest
class TestTieCorrect(object):
def test_empty(self):
"""An empty array requires no correction, should return 1.0."""
ranks = np.array([], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_one(self):
"""A single element requires no correction, should return 1.0."""
ranks = np.array([1.0], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_no_correction(self):
"""Arrays with no ties require no correction."""
ranks = np.arange(2.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
ranks = np.arange(3.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_basic(self):
"""Check a few basic examples of the tie correction factor."""
# One tie of two elements
ranks = np.array([1.0, 2.5, 2.5])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of two elements (same as above, but tie is not at the end)
ranks = np.array([1.5, 1.5, 3.0])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of three elements
ranks = np.array([1.0, 3.0, 3.0, 3.0])
c = tiecorrect(ranks)
T = 3.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# Two ties, lengths 2 and 3.
ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0])
c = tiecorrect(ranks)
T1 = 2.0
T2 = 3.0
N = ranks.size
expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N)
assert_equal(c, expected)
def test_overflow(self):
ntie, k = 2000, 5
a = np.repeat(np.arange(k), ntie)
n = a.size # ntie * k
out = tiecorrect(rankdata(a))
assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n))
class TestRankData(object):
def test_empty(self):
"""stats.rankdata([]) should return an empty array."""
a = np.array([], dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([], dtype=np.float64))
r = rankdata([])
assert_array_equal(r, np.array([], dtype=np.float64))
def test_one(self):
"""Check stats.rankdata with an array of length 1."""
data = [100]
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
r = rankdata(data)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
def test_basic(self):
"""Basic tests of stats.rankdata."""
data = [100, 10, 50]
expected = np.array([3.0, 1.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [40, 10, 30, 10, 50]
expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [20, 20, 20, 10, 10, 10]
expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
# The docstring states explicitly that the argument is flattened.
a2d = a.reshape(2, 3)
r = rankdata(a2d)
assert_array_equal(r, expected)
def test_rankdata_object_string(self):
min_rank = lambda a: [1 + sum(i < j for i in a) for j in a]
max_rank = lambda a: [sum(i <= j for i in a) for j in a]
ordinal_rank = lambda a: min_rank([(x, i) for i, x in enumerate(a)])
def average_rank(a):
return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))]
def dense_rank(a):
b = np.unique(a)
return [1 + sum(i < j for i in b) for j in a]
rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank,
average=average_rank, dense=dense_rank)
def check_ranks(a):
for method in 'min', 'max', 'dense', 'ordinal', 'average':
out = rankdata(a, method=method)
assert_array_equal(out, rankf[method](a))
val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz']
check_ranks(np.random.choice(val, 200))
check_ranks(np.random.choice(val, 200).astype('object'))
val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object')
check_ranks(np.random.choice(val, 200).astype('object'))
def test_large_int(self):
data = np.array([2**60, 2**60+1], dtype=np.uint64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, 2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, -2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [2.0, 1.0])
def test_big_tie(self):
for n in [10000, 100000, 1000000]:
data = np.ones(n, dtype=int)
r = rankdata(data)
expected_rank = 0.5 * (n + 1)
assert_array_equal(r, expected_rank * data,
"test failed with n=%d" % n)
def test_axis(self):
data = [[0, 2, 1],
[4, 2, 2]]
expected0 = [[1., 1.5, 1.],
[2., 1.5, 2.]]
r0 = rankdata(data, axis=0)
assert_array_equal(r0, expected0)
expected1 = [[1., 3., 2.],
[3., 1.5, 1.5]]
r1 = rankdata(data, axis=1)
assert_array_equal(r1, expected1)
methods = ["average", "min", "max", "dense", "ordinal"]
dtypes = [np.float64] + [np.int_]*4
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("method, dtype", zip(methods, dtypes))
def test_size_0_axis(self, axis, method, dtype):
shape = (3, 0)
data = np.zeros(shape)
r = rankdata(data, method=method, axis=axis)
assert_equal(r.shape, shape)
assert_equal(r.dtype, dtype)
_cases = (
# values, method, expected
([], 'average', []),
([], 'min', []),
([], 'max', []),
([], 'dense', []),
([], 'ordinal', []),
#
([100], 'average', [1.0]),
([100], 'min', [1.0]),
([100], 'max', [1.0]),
([100], 'dense', [1.0]),
([100], 'ordinal', [1.0]),
#
([100, 100, 100], 'average', [2.0, 2.0, 2.0]),
([100, 100, 100], 'min', [1.0, 1.0, 1.0]),
([100, 100, 100], 'max', [3.0, 3.0, 3.0]),
([100, 100, 100], 'dense', [1.0, 1.0, 1.0]),
([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]),
#
([100, 300, 200], 'average', [1.0, 3.0, 2.0]),
([100, 300, 200], 'min', [1.0, 3.0, 2.0]),
([100, 300, 200], 'max', [1.0, 3.0, 2.0]),
([100, 300, 200], 'dense', [1.0, 3.0, 2.0]),
([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]),
#
([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]),
([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]),
([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]),
([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]),
([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]),
#
([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]),
([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]),
([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]),
([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]),
([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]),
#
([10] * 30, 'ordinal', np.arange(1.0, 31.0)),
)
def test_cases():
for values, method, expected in _cases:
r = rankdata(values, method=method)
assert_array_equal(r, expected)
| bsd-3-clause | 5,119,763,102,343,465,000 | 33.40249 | 77 | 0.503317 | false |
enStratus/unix-agent | src/dcm/agent/messaging/reply.py | 3 | 40292 | #
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import threading
import signal
import sys
import dcm.agent.exceptions as exceptions
import dcm.agent.logger as dcm_logger
import dcm.agent.messaging.states as states
import dcm.agent.messaging.types as message_types
import dcm.agent.messaging.utils as utils
import dcm.agent.events.state_machine as state_machine
import dcm.agent.utils as agent_util
import dcm.eventlog.tracer as tracer
from dcm.agent.events.globals import global_space as dcm_events
_g_logger = logging.getLogger(__name__)
class ReplyRPC(object):
MISSING_VALUE_STRING = "DEADBEEF"
def __init__(self,
reply_listener,
agent_id,
connection,
request_id,
request_document,
db,
timeout=1.0,
reply_doc=None,
start_state=states.ReplyStates.REQUESTING):
self._agent_id = agent_id
self._request_id = request_id
self._request_document = request_document
self._cancel_callback = None
self._cancel_callback_args = None
self._cancel_callback_kwargs = None
self._reply_message_timer = None
self._reply_listener = reply_listener
self._timeout = timeout
self._conn = connection
self._resend_reply_cnt = 0
self._resend_reply_cnt_threshold = 5
self._lock = threading.RLock()
self._response_doc = reply_doc
self._sm = state_machine.StateMachine(start_state)
self._setup_states()
self._db = db
def get_request_id(self):
return self._request_id
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
def get_message_payload(self):
return self._request_document["payload"]
def shutdown(self):
with tracer.RequestTracer(self._request_id):
try:
if self._reply_message_timer:
self._reply_message_timer.cancel()
self._reply_listener.message_done(self)
except Exception as ex:
_g_logger.warn("Error shutting down the request", ex)
def kill(self):
with tracer.RequestTracer(self._request_id):
if self._reply_message_timer:
try:
self._reply_message_timer.cancel()
except Exception as ex:
_g_logger.info("an exception occurred when trying to "
"cancel the timer: " + str(ex))
@agent_util.class_method_sync
def ack(self,
cancel_callback, cancel_callback_args, cancel_callback_kwargs):
"""
Indicate to the messaging system that you have successfully received
this message and stored it for processing.
"""
with tracer.RequestTracer(self._request_id):
self._cancel_callback = cancel_callback
self._cancel_callback_args = cancel_callback_args
if self._cancel_callback_args is None:
self._cancel_callback_args = []
self._cancel_callback_args.insert(0, self)
self._cancel_callback_kwargs = cancel_callback_kwargs
self._sm.event_occurred(states.ReplyEvents.USER_ACCEPTS_REQUEST,
message={})
@agent_util.class_method_sync
def nak(self, response_document):
"""
This function is called to out right reject the message. The user
is signifying that this message will not be processed at all.
A call to this function signifies that this object will no longer be
referenced by the user.
"""
with tracer.RequestTracer(self._request_id):
self._sm.event_occurred(states.ReplyEvents.USER_REJECTS_REQUEST,
message=response_document)
@agent_util.class_method_sync
def reply(self, response_document):
"""
Send a reply to this request. This signifies that the user is
done with this object.
"""
with tracer.RequestTracer(self._request_id):
_g_logger.debug("reply() has been called")
self._sm.event_occurred(states.ReplyEvents.USER_REPLIES,
message=response_document)
@agent_util.class_method_sync
def reply_timeout(self, message_timer):
with tracer.RequestTracer(self._request_id):
_g_logger.debug("reply timeout occurred, resending.")
self._sm.event_occurred(states.RequesterEvents.TIMEOUT,
message_timer=message_timer)
@agent_util.class_method_sync
def incoming_message(self, json_doc):
with tracer.RequestTracer(self._request_id):
type_to_event = {
message_types.MessageTypes.ACK:
states.ReplyEvents.REPLY_ACK_RECEIVED,
message_types.MessageTypes.NACK:
states.ReplyEvents.REPLY_NACK_RECEIVED,
message_types.MessageTypes.CANCEL:
states.ReplyEvents.CANCEL_RECEIVED,
message_types.MessageTypes.STATUS:
states.ReplyEvents.STATUS_RECEIVED,
message_types.MessageTypes.REQUEST:
states.ReplyEvents.REQUEST_RECEIVED
}
if 'type' not in json_doc:
raise exceptions.MissingMessageParameterException('type')
if json_doc['type'] not in type_to_event:
raise exceptions.InvalidMessageParameterValueException(
'type', json_doc['type'])
# this next call drives the state machine
self._sm.event_occurred(type_to_event[json_doc['type']],
message=json_doc)
def _send_reply_message(self, message_timer):
self._reply_message_timer = message_timer
message_timer.send(self._conn)
###################################################################
# state machine event handlers
# ever method that starts with _sm_ is called under the same lock.
###################################################################
def _sm_initial_request_received(self, **kwargs):
"""
This is the initial request, we simply set this to the requesting
state.
"""
pass
def _sm_requesting_retransmission_received(self, **kwargs):
"""
After receiving an initial request we receive a retransmission of it.
The user has not yet acked the message but they have been notified
that the message exists. In this case we do nothing but wait for
the user to ack the message
"""
pass
def _sm_requesting_cancel_received(self, **kwargs):
"""
A cancel message flows over the wire after the request is received
but before it is acknowledged. Here we will tell the user about the
cancel. It is important that the cancel notification comes after
the message received notification.
"""
dcm_events.register_callback(
self._cancel_callback,
args=self._cancel_callback_args,
kwargs=self._cancel_callback_kwargs)
def _sm_requesting_user_accepts(self, **kwargs):
"""
The user decided to accept the message. Here we will send the ack
"""
self._db.new_record(self._request_id,
self._request_document,
None,
states.ReplyStates.ACKED,
self._agent_id)
ack_doc = {'type': message_types.MessageTypes.ACK,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "user_accepts",
'agent_id': self._agent_id}
self._conn.send(ack_doc)
def _sm_requesting_user_replies(self, **kwargs):
"""
The user decides to reply before acknowledging the message. Therefore
we just send the reply and it acts as the ack and the reply
"""
self._response_doc = kwargs['message']
self._db.update_record(self._request_id,
states.ReplyStates.REPLY,
reply_doc=self._response_doc)
reply_doc = {'type': message_types.MessageTypes.REPLY,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'payload': self._response_doc,
'entity': "user_replies",
'agent_id': self._agent_id}
message_timer = utils.MessageTimer(self._timeout,
self.reply_timeout,
reply_doc)
self._send_reply_message(message_timer)
def _sm_requesting_user_rejects(self, **kwargs):
"""
The user decides to reject the incoming request so we must send
a nack to the remote side.
"""
self._db.new_record(self._request_id,
self._request_document,
None,
states.ReplyStates.ACKED,
self._agent_id)
nack_doc = {'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "user_rejects",
'error_message': "The agent rejected the request.",
'agent_id': self._agent_id}
self._conn.send(nack_doc)
def _sm_acked_request_received(self, **kwargs):
"""
In this case a retransmission of the request comes in after the user
acknowledged the message. Here we resend the ack.
"""
# reply using the latest message id
ack_doc = {'type': message_types.MessageTypes.ACK,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "request_received",
'agent_id': self._agent_id}
self._conn.send(ack_doc)
def _sm_acked_cancel_received(self, **kwargs):
"""
A cancel is received from the remote end. We simply notify the user
of the request and allow the user to act upon it.
"""
dcm_events.register_callback(
self._cancel_callback,
args=self._cancel_callback_args,
kwargs=self._cancel_callback_kwargs)
def _sm_acked_reply(self, **kwargs):
"""
This is the standard case. A user has accepted the message and is
now replying to it. We send the reply.
"""
self._response_doc = kwargs['message']
self._db.update_record(self._request_id,
states.ReplyStates.REPLY,
reply_doc=self._response_doc)
reply_doc = {'type': message_types.MessageTypes.REPLY,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'payload': self._response_doc,
'entity': "acked_reply",
'agent_id': self._agent_id}
message_timer = utils.MessageTimer(self._timeout,
self.reply_timeout,
reply_doc)
self._send_reply_message(message_timer)
def _sm_acked_re_reply(self, **kwargs):
self._db.update_record(self._request_id,
states.ReplyStates.REPLY,
reply_doc=self._response_doc)
reply_doc = {'type': message_types.MessageTypes.REPLY,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'payload': self._response_doc,
'entity': "acked_reply",
'agent_id': self._agent_id}
message_timer = utils.MessageTimer(self._timeout,
self.reply_timeout,
reply_doc)
self._send_reply_message(message_timer)
def _sm_reply_request_retrans(self, **kwargs):
"""
After replying to a message we receive a retransmission of the
original request. This can happen if the remote end never receives
an ack and the reply message is either lost or delayed. Here we
retransmit the reply.
"""
reply_doc = {'type': message_types.MessageTypes.REPLY,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'payload': self._response_doc,
'entity': "request_retrans",
'agent_id': self._agent_id}
message_timer = utils.MessageTimer(self._timeout,
self.reply_timeout,
reply_doc)
self._send_reply_message(message_timer)
def _sm_reply_cancel_received(self, **kwargs):
"""
This occurs when a cancel is received after a reply is sent. It can
happen if the remote end sends a cancel before the reply is received.
Because we have already finished with this request we simply ignore
this message.
"""
pass
def _sm_reply_ack_received(self, **kwargs):
"""
This is the standard case. A reply is sent and the ack to that
reply is received. At this point we know that the RPC was
successful.
"""
self._db.update_record(self._request_id,
states.ReplyStates.REPLY_ACKED)
self._reply_message_timer.cancel()
self._reply_message_timer = None
self._reply_listener.message_done(self)
_g_logger.debug("Messaging complete. State event transition: "
+ str(self._sm.get_event_list()))
def _sm_reply_nack_received(self, **kwargs):
"""
The reply was nacked. This is probably a result of the a
retransmission that was not needed.
"""
self._db.update_record(self._request_id,
states.ReplyStates.REPLY_NACKED)
self._reply_message_timer.cancel()
self._reply_message_timer = None
self._reply_listener.message_done(self)
_g_logger.debug("Reply NACKed, messaging complete. State event "
"transition: " + str(self._sm.get_event_list()))
def _sm_reply_ack_timeout(self, **kwargs):
"""
This happens when after a given amount of time an ack has still not
been received. We thus must re-send the reply.
"""
message_timer = kwargs['message_timer']
# The time out did occur before the message could be acked so we must
# resend it
_g_logger.info("Resending reply")
self._resend_reply_cnt += 1
if self._resend_reply_cnt > self._resend_reply_cnt_threshold:
# TODO punt at some point ?
pass
self._send_reply_message(message_timer)
def _sm_nacked_request_received(self, **kwargs):
"""
This happens when a request is received after it has been nacked.
This will occur if the first nack is lost or delayed. We retransmit
the nack
"""
nack_doc = {'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "request_received",
'error_message': "The agent already rejected this request",
'agent_id': self._agent_id}
self._conn.send(nack_doc)
def _sm_cancel_waiting_ack(self, **kwargs):
"""
If a cancel is received while in the requesting state we must make sure
that the user does not get the cancel callback until after they have
acked the message. This handler occurs when the user calls ack()
after a cancel has arrived. Here we just register a cancel callback
and let the user react to it how they will.
"""
dcm_events.register_user_callback(
self._cancel_callback,
args=self._cancel_callback_args,
kwargs=self._cancel_callback_kwargs)
def _sm_send_status(self):
status_doc = {'type': message_types.MessageTypes.STATUS,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "status send",
'agent_id': self._agent_id,
'state': self._sm._current_state,
'reply': self._response_doc}
self._conn.send(status_doc)
def _sm_reinflated_reply_ack(self):
_g_logger.warn("The agent manager sent a message for this request "
"after it was in the REPLY_ACK state")
def _sm_reinflated_reply_nack(self):
_g_logger.warn("The agent manager sent a message for this request "
"after it was in the REPLY_NACK state")
def _reinflate_done(self):
if self._reply_message_timer:
self._reply_message_timer.cancel()
self._reply_message_timer = None
self._reply_listener.message_done(self)
def _sm_reply_ack_re_acked(self, message=None):
"""
This is called when a re-inflated state had already been reply acked,
and is now acked again. We just take it out of memory.
"""
self._reinflate_done()
def _sm_reply_ack_now_nacked(self, message=None):
"""
This is called whenever a re-inflated command reaches a terminal state
that was
"""
self._reinflate_done()
def _sm_reply_nack_re_nacked(self, message=None):
"""
This is called when a re-inflated state had already been reply nacked,
and is now nacked again. We just take it out of memory.
"""
self._reinflate_done()
def _sm_reply_nack_now_acked(self, message=None):
"""
This is called whenever a re-inflated command reaches acked state but
it was previously nacked
"""
self._reinflate_done()
def _sm_ack_reply_nack_received(self, message=None):
_g_logger.warn("A NACK was received when in the ACK state "
+ str(message))
# this will be cleaned up when the command replies, which it is
# required to do
def _sm_replied_nacked_reply(self, message=None):
"""
This is called when a request was received but the ACK for that
request received a NACK. However the command finished running
and a reply was sent back. Here we cancel the message and log the
event
"""
_g_logger.warn("A command that was already finished ended "
+ str(message))
self.shutdown()
def _setup_states(self):
self._sm.add_transition(states.ReplyStates.NEW,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REQUESTING,
self._sm_initial_request_received)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REQUESTING,
self._sm_requesting_retransmission_received)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
self._sm_requesting_cancel_received)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.USER_ACCEPTS_REQUEST,
states.ReplyStates.ACKED,
self._sm_requesting_user_accepts)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY,
self._sm_requesting_user_replies)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.USER_REJECTS_REQUEST,
states.ReplyStates.NACKED,
self._sm_requesting_user_rejects)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.REQUESTING,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
self._sm_requesting_retransmission_received)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
None)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.USER_ACCEPTS_REQUEST,
states.ReplyStates.ACKED,
self._sm_cancel_waiting_ack)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY,
self._sm_requesting_user_replies)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.USER_REJECTS_REQUEST,
states.ReplyStates.NACKED,
self._sm_requesting_user_rejects)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.ACKED,
self._sm_acked_request_received)
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.ACKED,
self._sm_acked_cancel_received)
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY,
self._sm_acked_reply)
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.ACKED,
self._sm_send_status)
# if the AM receives and ACK but has never heard of the request ID
# it will send a nack. this should not happen in a normal course
# of events. At this point we should just kill the request and
# log a scary message. We also need to kill anything running for that
# that request
# This will happen when the agent manager quits on a request before
# the agent sends the ack. when the AM receives the ack it has already
# canceled the request and thus NACKs the ACK
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.REPLY_NACK_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_ack_reply_nack_received)
# note, eventually we will want to reply retrans logic to just punt
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REPLY,
self._sm_reply_request_retrans)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY,
self._sm_acked_reply)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.REPLY,
self._sm_reply_cancel_received)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.REPLY_ACK_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_reply_ack_received)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.TIMEOUT,
states.ReplyStates.REPLY,
self._sm_reply_ack_timeout)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.REPLY_NACK_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_reply_nack_received)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.REPLY,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.DB_INFLATE,
states.ReplyStates.REPLY,
self._sm_acked_re_reply)
self._sm.add_transition(states.ReplyStates.NACKED,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.NACKED,
self._sm_nacked_request_received)
self._sm.add_transition(states.ReplyStates.NACKED,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.NACKED,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_reply_request_retrans)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.REPLY_ACK_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_reply_ack_re_acked)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.REPLY_NACK_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_reply_ack_now_nacked)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.REPLY_ACKED,
None)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.TIMEOUT,
states.ReplyStates.REPLY_ACKED,
None)
# this transition should only occur when the AM makes a mistake
# or messages are received out of order.
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.DB_INFLATE,
states.ReplyStates.REPLY_ACKED,
self._sm_reinflated_reply_ack)
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_reply_request_retrans)
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.REPLY_ACK_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_reply_nack_re_nacked)
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.REPLY_NACK_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_reply_nack_now_acked)
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.REPLY_NACKED,
None)
# this will happen when the plugin finishes and thus replies
# to a request that had its ACK NACKed. In this case we
# just cancel the messaging and log a message
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY_NACKED,
self._sm_replied_nacked_reply)
# this next state should only occur when a message is out
# of order or the agent manager made a mistake
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.DB_INFLATE,
states.ReplyStates.REPLY_NACKED,
self._sm_reinflated_reply_ack)
class RequestListener(object):
def __init__(self, conf, sender_connection, dispatcher,
db, id_system=None):
self._conn = sender_connection
self._dispatcher = dispatcher
self._requests = {}
self._messages_processed = 0
self._reply_observers = []
self._timeout = conf.messaging_retransmission_timeout
self._shutdown = False
self._conf = conf
self._db = db
self._id_system = id_system
self._lock = threading.RLock()
self._db.starting_agent()
def get_reply_observers(self):
# get the whole list so that the user can add and remove themselves.
# This sort of thing should be done only with carefully writen code
# using carefully writen observers that do very light weight
# nonblocking operations
return self._reply_observers
def _call_reply_observers(self, func_name, argument):
for o in self._reply_observers:
try:
func = getattr(o, func_name)
func(argument)
except:
_g_logger.exception("A bad observer threw an exception.")
# dont let some crappy observer ruin everything
pass
def _process_doc(self, incoming_doc):
if incoming_doc is None:
return
with tracer.RequestTracer(incoming_doc['request_id']):
self._call_reply_observers("incoming_message", incoming_doc)
_g_logger.debug("New message type %s" % incoming_doc['type'])
# if the agent is misbehaving the AM might tell it to kill itself.
# cold.
if incoming_doc["type"] == message_types.MessageTypes.HEMLOCK:
_g_logger.error("HEMLOCK: DCM told me to kill myself.")
os.killpg(0, signal.SIGKILL)
sys.exit(10)
# if it is a alert message short circuit
if incoming_doc["type"] == message_types.MessageTypes.ALERT_ACK:
if self._id_system:
self._id_system.incoming_message(incoming_doc)
return
request_id = incoming_doc["request_id"]
# is this request already in memory?
if request_id in self._requests:
_g_logger.debug("The message was found in the requests.")
# send it through, state machine will deal with it
req = self._requests[request_id]
req.incoming_message(incoming_doc)
return
# if the request id has already been seen by the database
db_record = self._db.lookup_req(request_id)
if db_record:
_g_logger.info("Inflating the record from the DB."
+ request_id)
req = ReplyRPC(
self,
self._conf.agent_id,
self._conn,
request_id,
incoming_doc,
self._db,
timeout=self._timeout,
reply_doc=db_record.reply_doc,
start_state=db_record.state)
# this will probably be used in the near future so get it
# on the memory list
self._requests[request_id] = req
req.incoming_message(incoming_doc)
return
if incoming_doc["type"] == message_types.MessageTypes.REQUEST:
if len(list(self._requests.keys())) >=\
self._conf.messaging_max_at_once > -1:
# short circuit the case where the agent is too busy
dcm_logger.log_to_dcm_console_overloaded(
msg="The new request was rejected because the agent has too many outstanding requests.")
nack_doc = {
'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': request_id,
'agent_id': self._conf.agent_id,
'error_message': ("The agent can only handle %d "
"commands at once"
% self._conf.messaging_max_at_once)}
self._conn.send(nack_doc)
return
_g_logger.debug("A new request has come in.")
req = ReplyRPC(
self,
self._conf.agent_id,
self._conn,
request_id,
incoming_doc,
self._db,
timeout=self._timeout)
self._call_reply_observers("new_message", req)
# only add the message if processing was successful
self._requests[request_id] = req
try:
self._dispatcher.incoming_request(req)
except Exception:
_g_logger.exception("The dispatcher could not handle a "
"message.")
del self._requests[request_id]
dcm_logger.log_to_dcm_console_messaging_error(
msg="The dispatcher could not handle the message.")
raise
else:
# if we have never heard of the ID and this is not a new
# request we return a courtesy error
_g_logger.debug("Unknown message ID sending a NACK")
nack_doc = {'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': request_id,
'agent_id': self._conf.agent_id,
'error_message':
"%s is an unknown ID" % request_id}
self._conn.send(nack_doc)
def _validate_doc(self, incoming_doc):
pass
def _send_bad_message_reply(self, incoming_doc, message):
_g_logger.debug("Sending the bad message %s" % message)
# we want to send a NACK to the message however it may be an error
# because it was not formed with message_id or request_id. In this
# case we will send values in that place indicating that *a* message
# was bad. There will be almost no way for the sender to know which
# one
try:
request_id = incoming_doc['request_id']
except KeyError:
request_id = ReplyRPC.MISSING_VALUE_STRING
nack_doc = {'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': request_id,
'error_message': message,
'agent_id': self._conf.agent_id}
self._conn.send(nack_doc)
def message_done(self, reply_message):
self._lock.acquire()
try:
request_id = reply_message.get_request_id()
del self._requests[request_id]
_g_logger.debug("The message %s has completed and is being "
"removed" % request_id)
self._messages_processed += 1
finally:
self._lock.release()
self._call_reply_observers("message_done", reply_message)
def get_messages_processed(self):
return self._messages_processed
def is_busy(self):
return len(self._requests) != 0
def shutdown(self):
"""
Stop accepting new requests but allow for outstanding messages to
complete.
"""
self._shutdown = True
for req in list(self._requests.values()):
req.kill()
def wait_for_all_nicely(self):
# XXX TODO how long should this block? do we need this?
# looks like just for tests
while self._requests:
dcm_events.poll()
def reply(self, request_id, reply_doc):
reply_req = self._requests[request_id]
reply_req.reply(reply_doc)
def incoming_parent_q_message(self, incoming_doc):
_g_logger.debug("Received message %s" % str(incoming_doc))
try:
self._validate_doc(incoming_doc)
self._process_doc(incoming_doc)
except Exception as ex:
_g_logger.exception(
"Error processing the message: %s" % str(incoming_doc))
self._send_bad_message_reply(incoming_doc, str(ex))
class ReplyObserverInterface(object):
@agent_util.not_implemented_decorator
def new_message(self, reply):
pass
@agent_util.not_implemented_decorator
def message_done(self, reply):
pass
@agent_util.not_implemented_decorator
def incoming_message(self, incoming_doc):
pass
| apache-2.0 | -9,064,698,894,957,441,000 | 42.891068 | 112 | 0.538519 | false |
enikesha/ga-bitbot | libs/call_metrics.py | 18 | 2700 |
"""
call_metrics v0.01
a class function decorator which collects metrics (number of calls and total execution time)
Copyright 2012 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
from functools import wraps
import time
_metrics = {}
#decorator which can be used on class methods
#inserts a _metrics dictionary into the object
def class_call_metrics(func):
@wraps(func)
def _decorator(self, *args, **kwargs):
if not hasattr(self, '_metrics'):
self._metrics = {}
start = time.time()
result = func(self, *args, **kwargs)
finish = time.time()
if not self._metrics.has_key(func.__name__):
self._metrics.update({func.__name__:{'total_time':0,'calls':0}})
self._metrics[func.__name__]['total_time'] += finish - start
self._metrics[func.__name__]['calls'] += 1
return result
return _decorator
#decorator which can be used on funcitons
#uses the global _metrics dictionary
def call_metrics(func):
@wraps(func)
def _decorator(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
finish = time.time()
if not _metrics.has_key(func.__name__):
_metrics.update({func.__name__:{'total_time':0,'calls':0}})
_metrics[func.__name__]['total_time'] += finish - start
_metrics[func.__name__]['calls'] += 1
return result
return _decorator
def get_metrics():
return _metrics
if __name__ == '__main__':
class test():
@class_call_metrics
def test(self,data):
"""test method doc string"""
z = 0
for i in range(9999):
for x in range(9999):
z += 1
print "data:",data
return 1
@call_metrics
def function_test(data):
print "data",data
return 2
t = test()
for i in range(10):
print t.test(i)
print t._metrics
print t.test.__doc__
print function_test('funciton test input')
print get_metrics()
| gpl-3.0 | 8,546,333,074,817,464,000 | 26.835052 | 92 | 0.61037 | false |
nyddle/toster | core/views.py | 1 | 3655 | from django.shortcuts import render, redirect, HttpResponse, get_object_or_404
from django.http import Http404
from django.http import HttpResponseNotFound
from django.views.generic import View
from django.views.generic import ListView
from django.views.generic.edit import FormView, ProcessFormView, CreateView
from rest_framework import viewsets
from .forms import AskQuestionForm
from .models import Question, MyUser
from .serializers import QuestionSerializer, MyUserSerializer
from taggit.models import Tag
from bookmarks.handlers import library
from bookmarks.models import Bookmark
from django.contrib.auth import get_user_model as user_model
from actstream import action
from actstream.models import model_stream, any_stream
class FeedView(View):
def get(self, request):
return render(request, 'core/feed.html', { 'stream' : any_stream(request.user) })
class QuestionView(View):
model = Question
def get(self, request, questionid, slug):
try:
question = Question.objects.get(pk=questionid)
except Question.DoesNotExist:
raise Http404
question.views += 1
question.save()
return render(request, 'core/question.html', {'question': question})
class MyUserView(View):
model = MyUser
def get(self, request, username):
try:
user = MyUser.objects.get(username=username)
except MyUser.DoesNotExist:
raise Http404
return render(request, 'core/user.html', {'user': user})
#TODO: merge with popular view
class QuestionListView(ListView):
model = Question
queryset = Question.objects.order_by('-pub_date')
def get_queryset(self):
queryset = super(QuestionListView, self).get_queryset()
if 'tag' in self.kwargs:
tag = self.kwargs['tag']
return queryset.filter(tags__name__in=[tag,])
q = self.request.GET.get("q")
if q:
return queryset.filter(question__icontains=q)
return queryset
class PopularQuestionListView(ListView):
model = Question
queryset = Question.objects.order_by('-rating')
class MyUserListView(ListView):
model = MyUser
class MyUserQuestionListView(ListView):
model = Question
def get_queryset(self):
self.author = get_object_or_404(MyUser, name=self.kwargs['username'])
return Question.objects.filter(author=self.author)
class QuestionViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Question.objects.all()
serializer_class = QuestionSerializer
class MyUserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = MyUser.objects.all()
serializer_class = MyUserSerializer
class HomeView(View):
def get(self, request):
return render(request, 'base.html')
class AskQuestionView(FormView):
template_name = 'core/new_question.html'
form_class = AskQuestionForm
success_url = '/questions'
def form_valid(self, form):
form.save()
return super(AskQuestionView, self).form_valid(form)
class TagListView(ListView):
model = Tag
#TODO: MembersView?
class Members(View):
def get(self, request):
print('user', request.user)
print('session', request.session.keys())
return render(request, 'core/member.html')
class BookmarksView(View):
def get(self, request):
bookmarks = Bookmark.objects.filter_with_contents(user=self.request.user)
return render(request, 'core/bookmark_list.html', { 'bookmarks' : bookmarks })
| mit | -893,408,532,948,083,800 | 25.875 | 89 | 0.691382 | false |
BIDS-collaborative/cega-trace | webscrap/crossref JSON/doi/doi_to_JSON.py | 2 | 1389 | import re
import urllib
import urllib.request
import json
import sys
import codecs
def search_doi(s):
url = "http://api.crossref.org/works/" + s
with urllib.request.urlopen(url) as htmlfile:
htmltext = htmlfile.read().decode('utf-8')
curdata = json.loads(htmltext)
print(htmltext)
return curdata
def decode(parse_file):
with codecs.open(parse_file, 'r+', encoding='utf-8', errors='ignore') as txt_file:
txt = txt_file.readlines()
return txt
def main():
data_ref = []
# get bibliometric for all the references using DOI search by crossref.
for i in range(0, 568):
try:
name = (str(i) + 'doi.txt')
data = open(name, 'r')
if data:
my_list = data
for line in my_list:
print('reading:' + str(i) + 'doi.txt')
cur_data = search_doi(line)
cur_data["ID"] = str(i)
data_ref.append(cur_data)
data.close()
# Every time finish searching. overwrite the previous JSON file.
with open("master_data_ref.json", "w") as outfile:
json.dump(data_ref, outfile)
print(str(i) + 'finish searching '+'doi.txt')
except IOError:
pass
except ValueError:
pass
if __name__ == '__main__':
main()
| bsd-2-clause | -6,835,219,122,280,337,000 | 29.195652 | 86 | 0.541397 | false |
bbuckingham/katello | scripts/reindent.py | 69 | 10490 | #! /usr/bin/env python
# Released to the public domain, by Tim Peters, 03 October 2000.
"""reindent [-d][-r][-v] [ path ... ]
-d (--dryrun) Dry run. Analyze, but don't make any changes to, files.
-r (--recurse) Recurse. Search for all .py files in subdirectories too.
-n (--nobackup) No backup. Does not make a ".bak" file before reindenting.
-v (--verbose) Verbose. Print informative msgs; else no output.
-h (--help) Help. Print this usage information and exit.
Change Python (.py) files to use 4-space indents and no hard tab characters.
Also trim excess spaces and tabs from ends of lines, and remove empty lines
at the end of files. Also ensure the last line ends with a newline.
If no paths are given on the command line, reindent operates as a filter,
reading a single source file from standard input and writing the transformed
source to standard output. In this case, the -d, -r and -v flags are
ignored.
You can pass one or more file and/or directory paths. When a directory
path, all .py files within the directory will be examined, and, if the -r
option is given, likewise recursively for subdirectories.
If output is not to standard output, reindent overwrites files in place,
renaming the originals with a .bak extension. If it finds nothing to
change, the file is left alone. If reindent does change a file, the changed
file is a fixed-point for future runs (i.e., running reindent on the
resulting .py file won't change it again).
The hard part of reindenting is figuring out what to do with comment
lines. So long as the input files get a clean bill of health from
tabnanny.py, reindent should do a good job.
The backup file is a copy of the one that is being reindented. The ".bak"
file is generated with shutil.copy(), but some corner cases regarding
user/group and permissions could leave the backup file more readable that
you'd prefer. You can always use the --nobackup option to prevent this.
"""
__version__ = "1"
import tokenize
import os, shutil
import sys
verbose = 0
recurse = 0
dryrun = 0
makebackup = True
def usage(msg=None):
if msg is not None:
print >> sys.stderr, msg
print >> sys.stderr, __doc__
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
import getopt
global verbose, recurse, dryrun, makebackup
try:
opts, args = getopt.getopt(sys.argv[1:], "drnvh",
["dryrun", "recurse", "nobackup", "verbose", "help"])
except getopt.error, msg:
usage(msg)
return
for o, a in opts:
if o in ('-d', '--dryrun'):
dryrun += 1
elif o in ('-r', '--recurse'):
recurse += 1
elif o in ('-n', '--nobackup'):
makebackup = False
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-h', '--help'):
usage()
return
if not args:
r = Reindenter(sys.stdin)
r.run()
r.write(sys.stdout)
return
for arg in args:
check(arg)
def check(file):
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "listing directory", file
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if ((recurse and os.path.isdir(fullname) and
not os.path.islink(fullname) and
not os.path.split(fullname)[1].startswith("."))
or name.lower().endswith(".py")):
check(fullname)
return
if verbose:
print "checking", file, "...",
try:
f = open(file)
except IOError, msg:
errprint("%s: I/O Error: %s" % (file, str(msg)))
return
r = Reindenter(f)
f.close()
if r.run():
if verbose:
print "changed."
if dryrun:
print "But this is a dry run, so leaving it alone."
if not dryrun:
bak = file + ".bak"
if makebackup:
shutil.copyfile(file, bak)
if verbose:
print "backed up", file, "to", bak
f = open(file, "w")
r.write(f)
f.close()
if verbose:
print "wrote new", file
return True
else:
if verbose:
print "unchanged."
return False
def _rstrip(line, JUNK='\n \t'):
"""Return line stripped of trailing spaces, tabs, newlines.
Note that line.rstrip() instead also strips sundry control characters,
but at least one known Emacs user expects to keep junk like that, not
mentioning Barry by name or anything <wink>.
"""
i = len(line)
while i > 0 and line[i-1] in JUNK:
i -= 1
return line[:i]
class Reindenter:
def __init__(self, f):
self.find_stmt = 1 # next token begins a fresh stmt?
self.level = 0 # current indent level
# Raw file lines.
self.raw = f.readlines()
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it's "\n".
self.lines = [_rstrip(line).expandtabs() + "\n"
for line in self.raw]
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
# List of (lineno, indentlevel) pairs, one for each stmt and
# comment line. indentlevel is -1 for comment lines, as a
# signal that tokenize doesn't know what to do about them;
# indeed, they're our headache!
self.stats = []
def run(self):
tokenize.tokenize(self.getline, self.tokeneater)
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == "\n":
lines.pop()
# Sentinel.
stats = self.stats
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = self.after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line with *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
for i in range(len(stats)-1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i+1][0]
have = getlspace(lines[thisstmt])
want = thislevel * 4
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in xrange(i+1, len(stats)-1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == getlspace(lines[jline]):
want = jlevel * 4
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in xrange(i-1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = have + getlspace(after[jline-1]) - \
getlspace(lines[jline])
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
for line in lines[thisstmt:nextstmt]:
if diff > 0:
if line == "\n":
after.append(line)
else:
after.append(" " * diff + line)
else:
remove = min(getlspace(line), -diff)
after.append(line[remove:])
return self.raw != self.after
def write(self, f):
f.writelines(self.after)
# Line-getter for tokenize.
def getline(self):
if self.index >= len(self.lines):
line = ""
else:
line = self.lines[self.index]
self.index += 1
return line
# Line-eater for tokenize.
def tokeneater(self, type, token, (sline, scol), end, line,
INDENT=tokenize.INDENT,
DEDENT=tokenize.DEDENT,
NEWLINE=tokenize.NEWLINE,
COMMENT=tokenize.COMMENT,
NL=tokenize.NL):
if type == NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
self.find_stmt = 1
elif type == INDENT:
self.find_stmt = 1
self.level += 1
elif type == DEDENT:
self.find_stmt = 1
self.level -= 1
elif type == COMMENT:
if self.find_stmt:
self.stats.append((sline, -1))
# but we're still looking for a new stmt, so leave
# find_stmt alone
elif type == NL:
pass
elif self.find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
self.find_stmt = 0
if line: # not endmarker
self.stats.append((sline, self.level))
# Count number of leading blanks.
def getlspace(line):
i, n = 0, len(line)
while i < n and line[i] == " ":
i += 1
return i
if __name__ == '__main__':
main()
| gpl-2.0 | 2,023,852,739,583,287,800 | 33.506579 | 77 | 0.525071 | false |
nicholasserra/sentry | src/sentry/services/smtp.py | 36 | 3120 | """
sentry.services.smtp
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import asyncore
import email
import logging
from smtpd import SMTPServer, SMTPChannel
from email_reply_parser import EmailReplyParser
from sentry.services.base import Service
from sentry.tasks.email import process_inbound_email
from sentry.utils.email import email_to_group_id
logger = logging.getLogger(__name__)
# HACK(mattrobenolt): literally no idea what I'm doing. Mostly made this up.
# SMTPChannel doesn't support EHLO response, but nginx requires an EHLO.
# EHLO is available in python 3, so this is backported somewhat
def smtp_EHLO(self, arg):
if not arg:
self.push('501 Syntax: EHLO hostname')
return
if self._SMTPChannel__greeting:
self.push('503 Duplicate HELO/EHLO')
else:
self._SMTPChannel__greeting = arg
self.push('250 %s' % self._SMTPChannel__fqdn)
SMTPChannel.smtp_EHLO = smtp_EHLO
STATUS = {
200: '200 Ok',
550: '550 Not found',
552: '552 Message too long',
}
class SentrySMTPServer(Service, SMTPServer):
name = 'smtp'
max_message_length = 20000 # This might be too conservative
def __init__(self, host=None, port=None, debug=False, workers=None):
from django.conf import settings
self.host = host or getattr(settings, 'SENTRY_SMTP_HOST', '0.0.0.0')
self.port = port or getattr(settings, 'SENTRY_SMTP_PORT', 1025)
def process_message(self, peer, mailfrom, rcpttos, raw_message):
logger.info('Incoming message received from %s', mailfrom)
if not len(rcpttos):
logger.info('Incoming email had no recipients. Ignoring.')
return STATUS[550]
if len(raw_message) > self.max_message_length:
logger.info('Inbound email message was too long: %d', len(raw_message))
return STATUS[552]
try:
group_id = email_to_group_id(rcpttos[0])
except Exception:
logger.info('%r is not a valid email address', rcpttos)
return STATUS[550]
message = email.message_from_string(raw_message)
payload = None
if message.is_multipart():
for msg in message.walk():
if msg.get_content_type() == 'text/plain':
payload = msg.get_payload()
break
if payload is None:
# No text/plain part, bailing
return STATUS[200]
else:
payload = message.get_payload()
payload = EmailReplyParser.parse_reply(payload).strip()
if not payload:
# If there's no body, we don't need to go any further
return STATUS[200]
process_inbound_email.delay(mailfrom, group_id, payload)
return STATUS[200]
def run(self):
SMTPServer.__init__(self, (self.host, self.port), None)
try:
asyncore.loop()
except KeyboardInterrupt:
pass
| bsd-3-clause | 5,984,335,884,088,202,000 | 30.515152 | 83 | 0.626282 | false |
seem-sky/kbengine | kbe/res/scripts/common/Lib/lib2to3/pgen2/tokenize.py | 80 | 19321 | # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <[email protected]>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&@|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = list(map(
re.compile, (Token, PseudoToken, Single3, Double3)))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
(srow, scol) = xxx_todo_changeme
(erow, ecol) = xxx_todo_changeme1
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| lgpl-3.0 | 2,248,472,229,958,328,800 | 37.488048 | 87 | 0.522696 | false |
luoyetx/mxnet | example/ssd/data/demo/download_demo_images.py | 24 | 1663 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
wd = os.path.dirname(os.path.realpath(__file__))
def download(url, target):
os.system("wget {} -O {}".format(url, target))
if __name__ == "__main__":
base_url = "https://cloud.githubusercontent.com/assets/3307514/"
demo_list = {"20012566/cbb53c76-a27d-11e6-9aaa-91939c9a1cd5.jpg":"000001.jpg",
"20012564/cbb43894-a27d-11e6-9619-ba792b66c4ae.jpg": "000002.jpg",
"20012565/cbb53942-a27d-11e6-996c-125bb060a81d.jpg": "000004.jpg",
"20012562/cbb4136e-a27d-11e6-884c-ed83c165b422.jpg": "000010.jpg",
"20012567/cbb60336-a27d-11e6-93ff-cbc3f09f5c9e.jpg": "dog.jpg",
"20012563/cbb41382-a27d-11e6-92a9-18dab4fd1ad3.jpg": "person.jpg",
"20012568/cbc2d6f6-a27d-11e6-94c3-d35a9cb47609.jpg": "street.jpg"}
for k, v in demo_list.items():
download(base_url + k, os.path.join(wd, v))
| apache-2.0 | 556,270,573,290,808,640 | 42.763158 | 82 | 0.721587 | false |
CARocha/addac_fadcanic | encuesta/migrations/0005_auto__add_cultivosanuales__add_productoanimal__add_productoprocesado__.py | 2 | 26202 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CultivosAnuales'
db.create_table(u'encuesta_cultivosanuales', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=250)),
('unidad', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'encuesta', ['CultivosAnuales'])
# Adding model 'ProductoAnimal'
db.create_table(u'encuesta_productoanimal', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=250)),
('unidad', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'encuesta', ['ProductoAnimal'])
# Adding model 'ProductoProcesado'
db.create_table(u'encuesta_productoprocesado', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=250)),
('unidad', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'encuesta', ['ProductoProcesado'])
# Deleting field 'SeguridadCAnuales.unidad_medida'
db.delete_column(u'encuesta_seguridadcanuales', 'unidad_medida')
# Renaming column for 'SeguridadCAnuales.cultivos' to match new field type.
db.rename_column(u'encuesta_seguridadcanuales', 'cultivos', 'cultivos_id')
# Changing field 'SeguridadCAnuales.cultivos'
db.alter_column(u'encuesta_seguridadcanuales', 'cultivos_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.CultivosAnuales']))
# Adding index on 'SeguridadCAnuales', fields ['cultivos']
db.create_index(u'encuesta_seguridadcanuales', ['cultivos_id'])
# Deleting field 'SeguridadPProcesados.unidad_medida'
db.delete_column(u'encuesta_seguridadpprocesados', 'unidad_medida')
# Renaming column for 'SeguridadPProcesados.producto' to match new field type.
db.rename_column(u'encuesta_seguridadpprocesados', 'producto', 'producto_id')
# Changing field 'SeguridadPProcesados.producto'
db.alter_column(u'encuesta_seguridadpprocesados', 'producto_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.ProductoProcesado']))
# Adding index on 'SeguridadPProcesados', fields ['producto']
db.create_index(u'encuesta_seguridadpprocesados', ['producto_id'])
# Deleting field 'SeguridadPAnimal.unidad_medida'
db.delete_column(u'encuesta_seguridadpanimal', 'unidad_medida')
# Renaming column for 'SeguridadPAnimal.producto' to match new field type.
db.rename_column(u'encuesta_seguridadpanimal', 'producto', 'producto_id')
# Changing field 'SeguridadPAnimal.producto'
db.alter_column(u'encuesta_seguridadpanimal', 'producto_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.ProductoAnimal']))
# Adding index on 'SeguridadPAnimal', fields ['producto']
db.create_index(u'encuesta_seguridadpanimal', ['producto_id'])
def backwards(self, orm):
# Removing index on 'SeguridadPAnimal', fields ['producto']
db.delete_index(u'encuesta_seguridadpanimal', ['producto_id'])
# Removing index on 'SeguridadPProcesados', fields ['producto']
db.delete_index(u'encuesta_seguridadpprocesados', ['producto_id'])
# Removing index on 'SeguridadCAnuales', fields ['cultivos']
db.delete_index(u'encuesta_seguridadcanuales', ['cultivos_id'])
# Deleting model 'CultivosAnuales'
db.delete_table(u'encuesta_cultivosanuales')
# Deleting model 'ProductoAnimal'
db.delete_table(u'encuesta_productoanimal')
# Deleting model 'ProductoProcesado'
db.delete_table(u'encuesta_productoprocesado')
# Adding field 'SeguridadCAnuales.unidad_medida'
db.add_column(u'encuesta_seguridadcanuales', 'unidad_medida',
self.gf('django.db.models.fields.IntegerField')(default=1),
keep_default=False)
# Renaming column for 'SeguridadCAnuales.cultivos' to match new field type.
db.rename_column(u'encuesta_seguridadcanuales', 'cultivos_id', 'cultivos')
# Changing field 'SeguridadCAnuales.cultivos'
db.alter_column(u'encuesta_seguridadcanuales', 'cultivos', self.gf('django.db.models.fields.IntegerField')())
# Adding field 'SeguridadPProcesados.unidad_medida'
db.add_column(u'encuesta_seguridadpprocesados', 'unidad_medida',
self.gf('django.db.models.fields.IntegerField')(default=1),
keep_default=False)
# Renaming column for 'SeguridadPProcesados.producto' to match new field type.
db.rename_column(u'encuesta_seguridadpprocesados', 'producto_id', 'producto')
# Changing field 'SeguridadPProcesados.producto'
db.alter_column(u'encuesta_seguridadpprocesados', 'producto', self.gf('django.db.models.fields.IntegerField')())
# Adding field 'SeguridadPAnimal.unidad_medida'
db.add_column(u'encuesta_seguridadpanimal', 'unidad_medida',
self.gf('django.db.models.fields.IntegerField')(default=1),
keep_default=False)
# Renaming column for 'SeguridadPAnimal.producto' to match new field type.
db.rename_column(u'encuesta_seguridadpanimal', 'producto_id', 'producto')
# Changing field 'SeguridadPAnimal.producto'
db.alter_column(u'encuesta_seguridadpanimal', 'producto', self.gf('django.db.models.fields.IntegerField')())
models = {
u'encuesta.credito': {
'Meta': {'object_name': 'Credito'},
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organizacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.OrganizacionesDanCredito']"}),
'personas': ('django.db.models.fields.IntegerField', [], {}),
'uso': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['encuesta.UsoCredito']", 'symmetrical': 'False'})
},
u'encuesta.cultivosanuales': {
'Meta': {'object_name': 'CultivosAnuales'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'encuesta.cultivossaf': {
'Meta': {'object_name': 'CultivosSaf'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'encuesta.educacion': {
'Meta': {'ordering': "(u'sexo_edad',)", 'object_name': 'Educacion'},
'circ_estudio_adulto': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'estudiando': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nosabe_leer': ('django.db.models.fields.IntegerField', [], {}),
'num_persona': ('django.db.models.fields.IntegerField', [], {}),
'pri_completa': ('django.db.models.fields.IntegerField', [], {}),
'pri_incompleta': ('django.db.models.fields.IntegerField', [], {}),
'secu_completa': ('django.db.models.fields.IntegerField', [], {}),
'secu_incompleta': ('django.db.models.fields.IntegerField', [], {}),
'sexo_edad': ('django.db.models.fields.IntegerField', [], {}),
'uni_o_tecnico': ('django.db.models.fields.IntegerField', [], {})
},
u'encuesta.encuesta': {
'Meta': {'object_name': 'Encuesta'},
'ano': ('django.db.models.fields.IntegerField', [], {}),
'fecha': ('django.db.models.fields.DateField', [], {}),
'fecha2': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'oficina': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'personas': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'recolector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Recolector']"})
},
u'encuesta.finca': {
'Meta': {'ordering': "(u'finca',)", 'object_name': 'Finca'},
'animal_aves': ('django.db.models.fields.IntegerField', [], {}),
'animal_bovino': ('django.db.models.fields.IntegerField', [], {}),
'animal_caprino': ('django.db.models.fields.IntegerField', [], {}),
'animal_equino': ('django.db.models.fields.IntegerField', [], {}),
'animal_porcino': ('django.db.models.fields.IntegerField', [], {}),
'area_casa': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'area_finca': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'comunidad': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Comunidad']"}),
'coordenadas_gps': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '6', 'blank': 'True'}),
'coordenadas_lg': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '6', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'finca': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'fuente_agua': ('django.db.models.fields.IntegerField', [], {'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legalidad': ('django.db.models.fields.IntegerField', [], {'max_length': '60'}),
'microcuenca': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Microcuenca']"}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'municipio'", 'to': u"orm['lugar.Municipio']"}),
'nombre_productor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'productores'", 'to': u"orm['encuesta.Productores']"}),
'propietario': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'tipo_casa': ('django.db.models.fields.IntegerField', [], {'max_length': '60'})
},
u'encuesta.fotos': {
'Meta': {'object_name': 'Fotos'},
'adjunto': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.ingresoservicionegocio': {
'Meta': {'object_name': 'IngresoServicioNegocio'},
'cantidad': ('django.db.models.fields.IntegerField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ingresos': ('django.db.models.fields.FloatField', [], {}),
'maneja': ('django.db.models.fields.IntegerField', [], {}),
'plan_negocio': ('django.db.models.fields.IntegerField', [], {}),
'precio': ('django.db.models.fields.FloatField', [], {}),
'servicios': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.ServiciosActividades']"})
},
u'encuesta.innovacion': {
'Meta': {'object_name': 'Innovacion'},
'aplica': ('django.db.models.fields.IntegerField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'innovacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.TipoInnovacion']"})
},
u'encuesta.organizacionesdancredito': {
'Meta': {'object_name': 'OrganizacionesDanCredito'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'encuesta.productoanimal': {
'Meta': {'object_name': 'ProductoAnimal'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'encuesta.productoprocesado': {
'Meta': {'object_name': 'ProductoProcesado'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'encuesta.productores': {
'Meta': {'object_name': 'Productores'},
'cedula_productor': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'celular': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'contador': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'sexo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'encuesta.recolector': {
'Meta': {'unique_together': "((u'nombre',),)", 'object_name': 'Recolector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'encuesta.seguridadalimentaria': {
'Meta': {'object_name': 'SeguridadAlimentaria'},
'alimentos': ('django.db.models.fields.IntegerField', [], {}),
'comprar': ('django.db.models.fields.BooleanField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nivel_consumo_suficiente': ('django.db.models.fields.IntegerField', [], {}),
'porcentaje_compran': ('django.db.models.fields.IntegerField', [], {}),
'porcentaje_nivel': ('django.db.models.fields.IntegerField', [], {})
},
u'encuesta.seguridadcanuales': {
'Meta': {'object_name': 'SeguridadCAnuales'},
'area_produccion': ('django.db.models.fields.FloatField', [], {}),
'auto_consumo': ('django.db.models.fields.FloatField', [], {}),
'cultivos': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.CultivosAnuales']"}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'perdidas': ('django.db.models.fields.FloatField', [], {}),
'precio_promedio_no': ('django.db.models.fields.FloatField', [], {}),
'precio_promedio_orga': ('django.db.models.fields.FloatField', [], {}),
'produccion': ('django.db.models.fields.FloatField', [], {}),
'venta_no': ('django.db.models.fields.FloatField', [], {}),
'venta_organizada': ('django.db.models.fields.FloatField', [], {})
},
u'encuesta.seguridadpanimal': {
'Meta': {'object_name': 'SeguridadPAnimal'},
'auto_consumo': ('django.db.models.fields.FloatField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maneja': ('django.db.models.fields.IntegerField', [], {}),
'perdidas': ('django.db.models.fields.FloatField', [], {}),
'plan_negocio': ('django.db.models.fields.IntegerField', [], {}),
'precio_promedio_no': ('django.db.models.fields.FloatField', [], {}),
'precio_promedio_orga': ('django.db.models.fields.FloatField', [], {}),
'produccion': ('django.db.models.fields.FloatField', [], {}),
'producto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.ProductoAnimal']"}),
'venta_no': ('django.db.models.fields.FloatField', [], {}),
'venta_organizada': ('django.db.models.fields.FloatField', [], {})
},
u'encuesta.seguridadpprocesados': {
'Meta': {'object_name': 'SeguridadPProcesados'},
'auto_consumo': ('django.db.models.fields.FloatField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maneja': ('django.db.models.fields.IntegerField', [], {}),
'perdidas': ('django.db.models.fields.FloatField', [], {}),
'plan_negocio': ('django.db.models.fields.IntegerField', [], {}),
'precio_promedio_no': ('django.db.models.fields.FloatField', [], {}),
'produccion': ('django.db.models.fields.FloatField', [], {}),
'producto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.ProductoProcesado']"}),
'venta_no': ('django.db.models.fields.FloatField', [], {}),
'venta_organizada': ('django.db.models.fields.FloatField', [], {})
},
u'encuesta.seguridadsaf': {
'Meta': {'object_name': 'SeguridadSaf'},
'area_desarrollo': ('django.db.models.fields.FloatField', [], {}),
'area_produccion': ('django.db.models.fields.FloatField', [], {}),
'auto_consumo': ('django.db.models.fields.FloatField', [], {}),
'cultivos': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.CultivosSaf']"}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'perdidas': ('django.db.models.fields.FloatField', [], {}),
'precio_promedio_no': ('django.db.models.fields.FloatField', [], {}),
'precio_promedio_orga': ('django.db.models.fields.FloatField', [], {}),
'produccion_total': ('django.db.models.fields.FloatField', [], {}),
'rendimiento': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'venta_no': ('django.db.models.fields.FloatField', [], {}),
'venta_organizada': ('django.db.models.fields.FloatField', [], {})
},
u'encuesta.serviciosactividades': {
'Meta': {'object_name': 'ServiciosActividades'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.tipoinnovacion': {
'Meta': {'object_name': 'TipoInnovacion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'encuesta.usocredito': {
'Meta': {'object_name': 'UsoCredito'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'encuesta.usotierra': {
'Meta': {'object_name': 'UsoTierra'},
'anuales_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'arboles_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bosque_primario': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'bosque_secundario': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'cultivos_anuales': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'cultivos_perennes': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'cultivos_semiperennes': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'forestales_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'perennes_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'plantaciones_forestales': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'potrero_arboles': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'potrero_sin_arboles': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'primario_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'secundario_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'semiperennes_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'sin_arboles_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'tacotal': ('django.db.models.fields.DecimalField', [], {'default': "u'0.00'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'tacotal_observacion': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'total_uso': ('django.db.models.fields.FloatField', [], {})
},
u'lugar.comunidad': {
'Meta': {'object_name': 'Comunidad'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'lugar.departamento': {
'Meta': {'object_name': 'Departamento'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'lugar.microcuenca': {
'Meta': {'object_name': 'Microcuenca'},
'comunidad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Comunidad']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'lugar.municipio': {
'Meta': {'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
}
}
complete_apps = ['encuesta'] | gpl-3.0 | 4,813,808,884,950,550,000 | 68.320106 | 184 | 0.576292 | false |
bopo/django-userena | userena/utils.py | 4 | 6483 | from django.conf import settings
from django.utils.encoding import smart_bytes
from django.utils.functional import allow_lazy
from django.utils.http import urlencode
from django.utils.six import text_type
from django.utils.text import Truncator
from userena import settings as userena_settings
from userena.compat import SiteProfileNotAvailable, get_model
from hashlib import sha1, md5
import random, datetime
import warnings
def truncate_words(s, num, end_text='...'):
truncate = end_text and ' %s' % end_text or ''
return Truncator(s).words(num, truncate=truncate)
truncate_words = allow_lazy(truncate_words, text_type)
def get_gravatar(email, size=80, default='identicon'):
""" Get's a Gravatar for a email address.
:param size:
The size in pixels of one side of the Gravatar's square image.
Optional, if not supplied will default to ``80``.
:param default:
Defines what should be displayed if no image is found for this user.
Optional argument which defaults to ``identicon``. The argument can be
a URI to an image or one of the following options:
``404``
Do not load any image if none is associated with the email
hash, instead return an HTTP 404 (File Not Found) response.
``mm``
Mystery-man, a simple, cartoon-style silhouetted outline of a
person (does not vary by email hash).
``identicon``
A geometric pattern based on an email hash.
``monsterid``
A generated 'monster' with different colors, faces, etc.
``wavatar``
Generated faces with differing features and backgrounds
:return: The URI pointing to the Gravatar.
"""
if userena_settings.USERENA_MUGSHOT_GRAVATAR_SECURE:
base_url = 'https://secure.gravatar.com/avatar/'
else: base_url = '//www.gravatar.com/avatar/'
gravatar_url = '%(base_url)s%(gravatar_id)s?' % \
{'base_url': base_url,
'gravatar_id': md5(email.lower().encode('utf-8')).hexdigest()}
gravatar_url += urlencode({
's': str(size),
'd': default
})
return gravatar_url
def signin_redirect(redirect=None, user=None):
"""
Redirect user after successful sign in.
First looks for a ``requested_redirect``. If not supplied will fall-back to
the user specific account page. If all fails, will fall-back to the standard
Django ``LOGIN_REDIRECT_URL`` setting. Returns a string defining the URI to
go next.
:param redirect:
A value normally supplied by ``next`` form field. Gets preference
before the default view which requires the user.
:param user:
A ``User`` object specifying the user who has just signed in.
:return: String containing the URI to redirect to.
"""
if redirect: return redirect
elif user is not None:
return userena_settings.USERENA_SIGNIN_REDIRECT_URL % \
{'username': user.username}
else: return settings.LOGIN_REDIRECT_URL
def generate_sha1(string, salt=None):
"""
Generates a sha1 hash for supplied string. Doesn't need to be very secure
because it's not used for password checking. We got Django for that.
:param string:
The string that needs to be encrypted.
:param salt:
Optionally define your own salt. If none is supplied, will use a random
string of 5 characters.
:return: Tuple containing the salt and hash.
"""
if not isinstance(string, (str, text_type)):
string = str(string)
if not salt:
salt = sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
salted_bytes = (smart_bytes(salt) + smart_bytes(string))
hash_ = sha1(salted_bytes).hexdigest()
return salt, hash_
def get_profile_model():
"""
Return the model class for the currently-active user profile
model, as defined by the ``AUTH_PROFILE_MODULE`` setting.
:return: The model that is used as profile.
"""
if (not hasattr(settings, 'AUTH_PROFILE_MODULE')) or \
(not settings.AUTH_PROFILE_MODULE):
raise SiteProfileNotAvailable
try:
profile_mod = get_model(*settings.AUTH_PROFILE_MODULE.rsplit('.', 1))
except LookupError:
profile_mod = None
if profile_mod is None:
raise SiteProfileNotAvailable
return profile_mod
def get_user_profile(user):
profile_model = get_profile_model()
try:
profile = user.get_profile()
except AttributeError:
related_name = profile_model._meta.get_field('user')\
.related_query_name()
profile = getattr(user, related_name, None)
except profile_model.DoesNotExist:
profile = None
if profile:
return profile
return profile_model.objects.create(user=user)
def get_protocol():
"""
Returns a string with the current protocol.
This can be either 'http' or 'https' depending on ``USERENA_USE_HTTPS``
setting.
"""
protocol = 'http'
if getattr(settings, 'USERENA_USE_HTTPS', userena_settings.DEFAULT_USERENA_USE_HTTPS):
protocol = 'https'
return protocol
def get_datetime_now():
"""
Returns datetime object with current point in time.
In Django 1.4+ it uses Django's django.utils.timezone.now() which returns
an aware or naive datetime that represents the current point in time
when ``USE_TZ`` in project's settings is True or False respectively.
In older versions of Django it uses datetime.datetime.now().
"""
try:
from django.utils import timezone
return timezone.now() # pragma: no cover
except ImportError: # pragma: no cover
return datetime.datetime.now()
# Django 1.5 compatibility utilities, providing support for custom User models.
# Since get_user_model() causes a circular import if called when app models are
# being loaded, the user_model_label should be used when possible, with calls
# to get_user_model deferred to execution time
user_model_label = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def get_user_model():
warnings.warn("Use Django's django.contrib.auth.get_user_model directly. "
"This function will be removed in future versions of "
"django-userena.", DeprecationWarning)
from django.contrib.auth import get_user_model
return get_user_model()
| bsd-3-clause | 1,513,311,831,981,949,700 | 32.076531 | 90 | 0.658954 | false |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/django/contrib/sessions/middleware.py | 129 | 3423 | import time
from importlib import import_module
from django.conf import settings
from django.contrib.sessions.backends.base import UpdateError
from django.core.exceptions import SuspiciousOperation
from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import cookie_date
class SessionMiddleware(MiddlewareMixin):
def __init__(self, get_response=None):
self.get_response = get_response
engine = import_module(settings.SESSION_ENGINE)
self.SessionStore = engine.SessionStore
def process_request(self, request):
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
request.session = self.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie or delete
the session cookie if the session has been emptied.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
except AttributeError:
pass
else:
# First check if we need to delete this cookie.
# The session should be deleted only if the session is entirely empty
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(
settings.SESSION_COOKIE_NAME,
path=settings.SESSION_COOKIE_PATH,
domain=settings.SESSION_COOKIE_DOMAIN,
)
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if (modified or settings.SESSION_SAVE_EVERY_REQUEST) and not empty:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
try:
request.session.save()
except UpdateError:
raise SuspiciousOperation(
"The request's session was deleted before the "
"request completed. The user may have logged "
"out in a concurrent request, for example."
)
response.set_cookie(
settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None,
)
return response
| mit | 714,480,233,634,399,700 | 45.890411 | 83 | 0.557698 | false |
douglaskastle/mutagen | tests/test_trueaudio.py | 1 | 1544 | # -*- coding: utf-8 -*-
import os
import shutil
from mutagen.trueaudio import TrueAudio, delete
from mutagen.id3 import TIT1
from tests import TestCase, DATA_DIR
from tempfile import mkstemp
class TTrueAudio(TestCase):
def setUp(self):
self.audio = TrueAudio(os.path.join(DATA_DIR, "empty.tta"))
def test_tags(self):
self.failUnless(self.audio.tags is None)
def test_length(self):
self.failUnlessAlmostEqual(self.audio.info.length, 3.7, 1)
def test_sample_rate(self):
self.failUnlessEqual(44100, self.audio.info.sample_rate)
def test_not_my_file(self):
filename = os.path.join(DATA_DIR, "empty.ogg")
self.failUnlessRaises(IOError, TrueAudio, filename)
def test_module_delete(self):
delete(os.path.join(DATA_DIR, "empty.tta"))
def test_delete(self):
self.audio.delete()
self.failIf(self.audio.tags)
def test_pprint(self):
self.failUnless(self.audio.pprint())
def test_save_reload(self):
try:
fd, filename = mkstemp(suffix='.tta')
os.close(fd)
shutil.copy(self.audio.filename, filename)
audio = TrueAudio(filename)
audio.add_tags()
audio.tags.add(TIT1(encoding=0, text="A Title"))
audio.save()
audio = TrueAudio(filename)
self.failUnlessEqual(audio["TIT1"], "A Title")
finally:
os.unlink(filename)
def test_mime(self):
self.failUnless("audio/x-tta" in self.audio.mime)
| gpl-2.0 | -1,446,213,023,081,084,700 | 27.592593 | 67 | 0.624352 | false |
astropy/astropy | astropy/units/misc.py | 8 | 3393 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines miscellaneous units. They are also
available in the `astropy.units` namespace.
"""
from . import si
from astropy.constants import si as _si
from .core import (UnitBase, def_unit, si_prefixes, binary_prefixes,
set_enabled_units)
# To ensure si units of the constants can be interpreted.
set_enabled_units([si])
import numpy as _numpy
_ns = globals()
###########################################################################
# AREAS
def_unit(['barn', 'barn'], 10 ** -28 * si.m ** 2, namespace=_ns, prefixes=True,
doc="barn: unit of area used in HEP")
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(['cycle', 'cy'], 2.0 * _numpy.pi * si.rad,
namespace=_ns, prefixes=False,
doc="cycle: angular measurement, a full turn or rotation")
def_unit(['spat', 'sp'], 4.0 * _numpy.pi * si.sr,
namespace=_ns, prefixes=False,
doc="spat: the solid angle of the sphere, 4pi sr")
##########################################################################
# PRESSURE
def_unit(['bar'], 1e5 * si.Pa, namespace=_ns,
prefixes=[(['m'], ['milli'], 1.e-3)],
doc="bar: pressure")
# The torr is almost the same as mmHg but not quite.
# See https://en.wikipedia.org/wiki/Torr
# Define the unit here despite it not being an astrophysical unit.
# It may be moved if more similar units are created later.
def_unit(['Torr', 'torr'], _si.atm.value/760. * si.Pa, namespace=_ns,
prefixes=[(['m'], ['milli'], 1.e-3)],
doc="Unit of pressure based on an absolute scale, now defined as "
"exactly 1/760 of a standard atmosphere")
###########################################################################
# MASS
def_unit(['M_p'], _si.m_p, namespace=_ns, doc="Proton mass",
format={'latex': r'M_{p}', 'unicode': 'Mₚ'})
def_unit(['M_e'], _si.m_e, namespace=_ns, doc="Electron mass",
format={'latex': r'M_{e}', 'unicode': 'Mₑ'})
# Unified atomic mass unit
def_unit(['u', 'Da', 'Dalton'], _si.u, namespace=_ns,
prefixes=True, exclude_prefixes=['a', 'da'],
doc="Unified atomic mass unit")
###########################################################################
# COMPUTER
def_unit((['bit', 'b'], ['bit']), namespace=_ns,
prefixes=si_prefixes + binary_prefixes)
def_unit((['byte', 'B'], ['byte']), 8 * bit, namespace=_ns,
format={'vounit': 'byte'},
prefixes=si_prefixes + binary_prefixes,
exclude_prefixes=['d'])
def_unit((['pix', 'pixel'], ['pixel']),
format={'ogip': 'pixel', 'vounit': 'pixel'},
namespace=_ns, prefixes=True)
def_unit((['vox', 'voxel'], ['voxel']),
format={'fits': 'voxel', 'ogip': 'voxel', 'vounit': 'voxel'},
namespace=_ns, prefixes=True)
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
| bsd-3-clause | 1,712,555,838,471,403,500 | 32.89 | 79 | 0.518442 | false |
mdaniel/intellij-community | python/helpers/third_party/thriftpy/_shaded_ply/yacc.py | 99 | 135805 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2015,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup ([email protected]),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
import re
import types
import sys
import os.path
import inspect
import base64
import warnings
__version__ = '3.8'
__tabversion__ = '3.8'
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = True # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = False # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
# String type-checking compatibility
if sys.version_info[0] < 3:
string_types = basestring
else:
string_types = str
MAXINT = sys.maxsize
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception):
pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r))
# Panic mode error recovery support. This feature is being reworked--much of the
# code here is to offer a deprecation/backwards compatible transition
_errok = None
_token = None
_restart = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:
def p_error(p):
...
# Use parser.errok(), parser.token(), parser.restart()
...
parser = yacc.yacc()
'''
def errok():
warnings.warn(_warnmsg)
return _errok()
def restart():
warnings.warn(_warnmsg)
return _restart()
def token():
warnings.warn(_warnmsg)
return _token()
# Utility function to call the p_error() function with some deprecation hacks
def call_errorfunc(errorfunc, token, parser):
global _errok, _token, _restart
_errok = parser.errok
_token = parser.token
_restart = parser.restart
r = errorfunc(token)
try:
del _errok, _token, _restart
except NameError:
pass
return r
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self):
return self.type
def __repr__(self):
return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self, s, stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser = None
def __getitem__(self, n):
if isinstance(n, slice):
return [s.value for s in self.slice[n]]
elif n >= 0:
return self.slice[n].value
else:
return self.stack[n].value
def __setitem__(self, n, v):
self.slice[n].value = v
def __getslice__(self, i, j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self, n):
return getattr(self.slice[n], 'lineno', 0)
def set_lineno(self, n, lineno):
self.slice[n].lineno = lineno
def linespan(self, n):
startline = getattr(self.slice[n], 'lineno', 0)
endline = getattr(self.slice[n], 'endlineno', startline)
return startline, endline
def lexpos(self, n):
return getattr(self.slice[n], 'lexpos', 0)
def lexspan(self, n):
startpos = getattr(self.slice[n], 'lexpos', 0)
endpos = getattr(self.slice[n], 'endlexpos', startpos)
return startpos, endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug, int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
elif tracking:
return self.parseopt(input, lexer, debug, tracking, tokenfunc)
else:
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. Optimized versions of this function
# are automatically created by the ply/ygen.py script. This script cuts out
# sections enclosed in markers such as this:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parsedebug-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
#--! DEBUG
debug.info('PLY: PARSE DEBUG START')
#--! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
#--! DEBUG
debug.debug('')
debug.debug('State : %s', state)
#--! DEBUG
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
#--! DEBUG
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
#--! DEBUG
#--! DEBUG
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
#--! DEBUG
debug.debug('Action : Shift and goto state %s', t)
#--! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
#--! DEBUG
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
#--! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
#--! DEBUG
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
#--! DEBUG
return result
if t is None:
#--! DEBUG
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parsedebug-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
# This code is automatically generated by the ply/ygen.py script. Make
# changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
# by the ply/ygen.py script. Make changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-notrack-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-notrack-end
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = []
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
self.str = '%s -> <empty>' % self.name
def __str__(self):
return self.str
def __repr__(self):
return 'Production(' + str(self) + ')'
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self, n):
if n > len(self.prod):
return None
p = LRItem(self, n)
# Precompute the list of productions immediately following.
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self, str, name, len, func, file, line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return 'MiniProduction(%s)' % self.str
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self, p, n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = {}
self.prod.insert(n, '.')
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
s = '%s -> <empty>' % self.name
return s
def __repr__(self):
return 'LRItem(' + str(self) + ')'
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError):
pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self, grammar, method='LALR', log=None):
if method not in ['SLR', 'LALR']:
raise LALRError('Unsupported method %s' % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
dr_set = {}
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
log.info('Parsing method: %s', self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec, slevel = Productions[st_actionp[a].number].prec
rprec, rlevel = Precedence.get(a, ('right', 0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec, rlevel = Productions[st_actionp[a].number].prec
sprec, slevel = Precedence.get(a, ('right', 0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self, tabmodule, outputdir='', signature=''):
if isinstance(tabmodule, types.ModuleType):
raise IOError("Won't overwrite existing tabmodule")
basemodulename = tabmodule.split('.')[-1]
filename = os.path.join(outputdir, basemodulename) + '.py'
try:
f = open(filename, 'w')
f.write('''
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = {}
for s, nd in self.lr_action.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_action_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
''')
else:
f.write('\n_lr_action = { ')
for k, v in self.lr_action.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
if smaller:
# Factor out names to try and make smaller
items = {}
for s, nd in self.lr_goto.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_goto_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
''')
else:
f.write('\n_lr_goto = { ')
for k, v in self.lr_goto.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
# Write production table
f.write('_lr_productions = [\n')
for p in self.lr_productions:
if p.func:
f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
p.func, os.path.basename(p.file), p.line))
else:
f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
f.write(']\n')
f.close()
except IOError as e:
raise
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self, filename, signature=''):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(filename, 'wb') as outf:
pickle.dump(__tabversion__, outf, pickle_protocol)
pickle.dump(self.lr_method, outf, pickle_protocol)
pickle.dump(signature, outf, pickle_protocol)
pickle.dump(self.lr_action, outf, pickle_protocol)
pickle.dump(self.lr_goto, outf, pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
else:
outp.append((str(p), p.name, p.len, None, None, None))
pickle.dump(outp, outf, pickle_protocol)
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update(''.join([''.join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(' '.join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError, ValueError):
pass
digest = base64.b16encode(sig.digest())
if sys.version_info[0] >= 3:
digest = digest.decode('latin-1')
return digest
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
lines, linen = inspect.getsourcelines(module)
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, string_types):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, string_types):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, string_types):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = item.__code__.co_firstlineno
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
outputdir=None, debuglog=None, errorlog=None, picklefile=None):
if tabmodule is None:
tabmodule = tab_module
# Reference to the parsing method of the last built parser
global parse
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
else:
pdict = get_caller_module_dict(2)
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If tabmodule specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(tabmodule, types.ModuleType):
srcfile = tabmodule.__file__
else:
if '.' not in tabmodule:
srcfile = pdict['__file__']
else:
parts = tabmodule.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = pdict.get('__package__')
if pkg and isinstance(tabmodule, str):
if '.' not in tabmodule:
tabmodule = pkg + '.' + tabmodule
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
except Exception as e:
errorlog.warning('There was a problem loading the table file: %r', e)
except VersionError as e:
errorlog.warning(str(e))
except ImportError:
pass
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug('Generating %s tables', method)
lr = LRGeneratedTable(grammar, method, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
try:
lr.write_table(tabmodule, outputdir, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
# Write a pickled version of the tables
if picklefile:
try:
lr.pickle_table(picklefile, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
| apache-2.0 | -3,906,361,159,611,087,000 | 38.125612 | 119 | 0.467509 | false |
eonum/medword | model_validation.py | 1 | 11972 | import numpy as np
import preprocess as pp
import os
from random import randint
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import csv
def validate_model(embedding, emb_model_dir, emb_model_fn):
print("Start validation. Loading model. \n")
# load config
config = embedding.config
# load model
embedding.load_model(emb_model_dir, emb_model_fn)
# directories and filenames
val_dir = config.config['val_data_dir']
doesntfit_fn = config.config['doesntfit_file']
doesntfit_src = os.path.join(val_dir, doesntfit_fn)
synonyms_fn = config.config['synonyms_file']
syn_file_src = os.path.join(val_dir, synonyms_fn)
# test with doesn't fit questions
test_doesntfit(embedding, doesntfit_src)
# test with synonyms
# TODO get better syn file (slow, contains many non-significant instances)
# test_synonyms(embedding, syn_file_src)
# test with human similarity TODO remove hardcoding
human_sim_file_src = 'data/validation_data/human_similarity.csv'
test_human_similarity(embedding, human_sim_file_src)
#### Doesn't Fit Validation ####
def doesntfit(embedding, word_list):
"""
- compares each word-vector to mean of all word-vectors of word_list using the vector dot-product
- vector with lowest dot-produt to mean-vector is regarded as the one that dosen't fit
"""
used_words = [word for word in word_list if embedding.may_construct_word_vec(word)]
n_used_words = len(used_words)
n_words = len(word_list)
if n_used_words != n_words:
ignored_words = set(word_list) - set(used_words)
print("vectors for words %s are not present in the model, ignoring these words: ", ignored_words)
if not used_words:
print("cannot select a word from an empty list.")
vectors = np.vstack(embedding.word_vec(word) for word in used_words)
mean = np.mean(vectors, axis=0)
dists = np.dot(vectors, mean)
return sorted(zip(dists, used_words))[0][1]
def test_doesntfit(embedding, file_src):
"""
- tests all doesntfit-questions (lines) of file
- a doesnt-fit question is of the format "word_1 word_2 ... word_N word_NotFitting"
where word_1 to word_n are members of a category but word_NotFitting isn't
eg. "Auto Motorrad Fahrrad Ampel"
"""
# load config
config = embedding.config
print("Validating 'doesntfit' with file", file_src)
num_lines = sum(1 for line in open(file_src))
num_questions = 0
num_right = 0
tokenizer = pp.get_tokenizer(config)
# get questions
with open(file_src) as f:
questions = f.read().splitlines()
tk_questions = [tokenizer.tokenize(q) for q in questions]
# TODO: check if tokenizer has splitted one word to mulitple words and handle it.
# So far no word in the doesnt_fit testfile should be splitted
# vocab used to speed checking if word is in vocabulary
# (also checked by embedding.may_construct_word_vec(word))
vocab = embedding.get_vocab()
# test each question
for question in tk_questions:
# check if all words exist in vocabulary
if all(((word in vocab) or (embedding.may_construct_word_vec(word))) for word in question):
num_questions += 1
if doesntfit(embedding, question) == question[-1]:
num_right += 1
# calculate result
correct_matches = np.round(num_right/np.float(num_questions)*100, 1) if num_questions>0 else 0.0
coverage = np.round(num_questions/np.float(num_lines)*100, 1) if num_lines>0 else 0.0
# log result
print("\n*** Doesn't fit ***")
print('Doesn\'t fit correct: {0}% ({1}/{2})'.format(str(correct_matches), str(num_right), str(num_questions)))
print('Doesn\'t fit coverage: {0}% ({1}/{2}) \n'.format(str(coverage), str(num_questions), str(num_lines)))
#### Synonyms Validation ####
def test_synonyms(embedding, file_src):
"""
- tests all synonym-questions (lines) of file
- a synonym-question is of the format "word_1 word_2"
where word_1 and word_2 are synonyms
eg. "Blutgerinnsel Thrombus"
- for word_1 check if it appears in the n closest words of word_2 using "model.cosine(word, n)"
and vice-versa
- for each synonym-pair TWO CHECKS are made therefore (non-symmetric problem)
"""
print("Validating 'synonyms' with file", file_src)
config = embedding.config
num_lines = sum(1 for line in open(file_src))
num_questions = 0
cos_sim_sum_synonyms = 0
tokenizer = pp.get_tokenizer(config)
# get questions which are still of lenght 2 after tokenization
# TODO: improve for compound words (aaa-bbb) which are splitted by the tokenizer
tk_questions = []
with open(file_src, 'r') as f:
questions = f.read().splitlines()
for q in questions:
# synonyms = q.split(';')#tokenizer.tokenize(q)
# synonyms = [" ".join(tokenizer.tokenize(synonym)) for synonym in
# synonyms]
synonyms = tokenizer.tokenize(q)
if len(synonyms) == 2:
tk_questions.append(synonyms)
vocab = embedding.get_vocab()
# test each question
for tk_quest in tk_questions:
# check if all words exist in vocabulary
if all(((word in vocab) or embedding.may_construct_word_vec(word)) for word in tk_quest):
num_questions += 1
w1 = tk_quest[0]
w2 = tk_quest[1]
cos_sim_sum_synonyms += embedding.similarity(w1, w2)
# compute avg cosine similarity for random vectors to relate to avg_cosine_similarity of synonyms
vocab_size = len(vocab)
n_vals = 1000
similarity_sum_rand_vec = 0
vals1 = [randint(0, vocab_size -1) for i in range(n_vals)]
vals2 = [randint(0, vocab_size -1) for i in range(n_vals)]
for v1, v2 in zip(vals1, vals2):
similarity_sum_rand_vec += embedding.similarity(vocab[v1], vocab[v2])
avg_cosine_similarity_rand_vec = similarity_sum_rand_vec / np.float(n_vals)
# calculate result
avg_cosine_similarity_synonyms = (cos_sim_sum_synonyms / num_questions) if num_questions>0 else 0.0
coverage = np.round(num_questions/np.float(num_lines)*100, 1) if num_lines>0 else 0.0
# log result
print("\n*** Cosine-Similarity ***")
print("Synonyms avg-cos-similarity (SACS):", avg_cosine_similarity_synonyms, "\nRandom avg-cos-similarity (RACS):", avg_cosine_similarity_rand_vec,
"\nRatio SACS/RACS:", avg_cosine_similarity_synonyms/float(avg_cosine_similarity_rand_vec))
print("\n*** Word Coverage ***")
print("Synonyms: {0} pairs in input. {1} pairs after tokenization. {2} pairs could be constructed from model-vocabulary.".format(str(num_lines), str(len(tk_questions)), str(num_questions)))
print("Synonyms coverage: {0}% ({1}/{2})\n".format(str(coverage), str(2*num_questions), str(2*num_lines), ))
def get_human_rating_deviation(embedding, word1, word2, human_similarity):
# compute deviation of human similarity from cosine similarity
# cosine similarity
cosine_similarity = embedding.similarity(word1, word2)
return np.abs(cosine_similarity - human_similarity)
def test_human_similarity(embedding, file_src):
"""
Compare cosine similarity of 2 word-vectors against a similarity value
based on human ratings.
Each line in the file contains two words and the similarity value,
separated by ':'.
The datasets were obtained by asking human subjects to assign a similarity
or relatedness judgment to a number of German word pairs.
https://www.ukp.tu-darmstadt.de/data/semantic-relatedness/german-relatedness-datasets/
"""
config = embedding.config
tokenizer = pp.get_tokenizer(config)
vocab = embedding.get_vocab()
vocab_size = len(vocab)
# accumulate error and count test instances
summed_error = 0.0
n_test_instances = 0
n_skipped_instances = 0
summed_random_error = 0.0
# load file to lines
with open(file_src, 'r') as csvfile:
filereader = csv.reader(csvfile, delimiter=':',)
next(filereader)
# process line by line
for line in filereader:
n_test_instances += 1
# split lines to instances
word1 = tokenizer.tokenize(line[0])[0]
word2 = tokenizer.tokenize(line[1])[0]
human_similarity = np.float32(line[2])
# check if both words are in vocab
if (word1 in embedding.get_vocab()
and word2 in embedding.get_vocab()):
# add current deviation to error
deviation = get_human_rating_deviation(embedding, word1, word2,
human_similarity)
summed_error += deviation
# get a random error for comparison
rand_word1 = vocab[randint(0, vocab_size -1)]
rand_word2 = vocab[randint(0, vocab_size -1)]
random_dev = get_human_rating_deviation(embedding, rand_word1,
rand_word2,
human_similarity)
summed_random_error += random_dev
else:
n_skipped_instances += 1
# print results
print("\n*** Human-Similarity ***")
print("Number of instances: {0}, skipped: {1}"
.format(str(n_test_instances), str(n_skipped_instances)))
# check whether we found any valid test instance
n_processed_instances = n_test_instances - n_skipped_instances
if (n_processed_instances == 0):
print("Error: No instance could be computed with this model.")
else:
mean_error = summed_error / n_processed_instances
random_error = summed_random_error / n_processed_instances
print("random error: {0}, mean error: {1}"
.format(str(random_error), str(mean_error)))
#### Visualization ####
def visualize_words(embedding, word_list, n_nearest_neighbours):
# get indexes and words that you want to visualize
words_to_visualize = []
# word_indexes_to_visualize = []
# get all words and neighbors that you want to visualize
for word in word_list:
if not embedding.may_construct_word_vec(word):
continue
words_to_visualize.append(word)
# word_indexes_to_visualize.append(model.ix(word))
# get neighbours of word
neighbours = [n for (n, m) in embedding.most_similar_n(word, n_nearest_neighbours)]
words_to_visualize.extend(neighbours)
#word_indexes_to_visualize.extend(indexes)
# get vectors from indexes to visualize
if words_to_visualize == []:
print("No word found to show.")
return
emb_vectors = np.vstack([embedding.word_vec(word) for word in words_to_visualize])
# project down to 2D
pca = PCA(n_components=2)
emb_vec_2D = pca.fit_transform(emb_vectors)
n_inputs = len(word_list)
for i in range(n_inputs):
# group word and it's neighbours together (results in different color in plot)
lower = i*n_nearest_neighbours + i
upper = (i+1)*n_nearest_neighbours + (i+1)
# plot 2D
plt.scatter(emb_vec_2D[lower:upper, 0], emb_vec_2D[lower:upper, 1])
for label, x, y in zip(words_to_visualize, emb_vec_2D[:, 0], emb_vec_2D[:, 1]):
plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')
# find nice axes for plot
lower_x = min(emb_vec_2D[:, 0])
upper_x = max(emb_vec_2D[:, 0])
lower_y = min(emb_vec_2D[:, 1])
upper_y = max(emb_vec_2D[:, 1])
# 10% of padding on all sides
pad_x = 0.1 * abs(upper_x - lower_x)
pad_y = 0.1 * abs(upper_y - lower_y)
plt.xlim([lower_x - pad_x, upper_x + pad_x])
plt.ylim([lower_y - pad_y, upper_y + pad_y])
plt.show()
| mit | 407,590,782,591,609,300 | 33.501441 | 193 | 0.634648 | false |
bimbam23/tools-iuc | datatypes/snpsift_dbnsfp_datatypes/snpsift_dbnsfp.py | 9 | 3924 | """
SnpSift dbNSFP datatypes
"""
import gzip
import logging
import os
import os.path
import sys
import traceback
from galaxy.datatypes.data import Text
from galaxy.datatypes.metadata import MetadataElement
log = logging.getLogger(__name__)
class SnpSiftDbNSFP( Text ):
"""Class describing a dbNSFP database prepared fpr use by SnpSift dbnsfp """
MetadataElement( name='reference_name', default='dbSNFP', desc='Reference Name', readonly=True, visible=True, set_in_upload=True, no_value='dbSNFP' )
MetadataElement( name="bgzip", default=None, desc="dbNSFP bgzip", readonly=True, visible=True, no_value=None )
MetadataElement( name="index", default=None, desc="Tabix Index File", readonly=True, visible=True, no_value=None)
MetadataElement( name="annotation", default=[], desc="Annotation Names", readonly=True, visible=True, no_value=[] )
file_ext = "snpsiftdbnsfp"
composite_type = 'auto_primary_file'
allow_datatype_change = False
"""
## The dbNSFP file is a tabular file with 1 header line
## The first 4 columns are required to be: chrom pos ref alt
## These match columns 1,2,4,5 of the VCF file
## SnpSift requires the file to be block-gzipped and the indexed with samtools tabix
## Example:
## Compress using block-gzip algorithm
bgzip dbNSFP2.3.txt
## Create tabix index
tabix -s 1 -b 2 -e 2 dbNSFP2.3.txt.gz
"""
def __init__( self, **kwd ):
Text.__init__( self, **kwd )
self.add_composite_file('%s.grp', description='Group File', substitute_name_with_metadata='reference_name', is_binary=False)
self.add_composite_file('%s.ti', description='', substitute_name_with_metadata='reference_name', is_binary=False)
def init_meta( self, dataset, copy_from=None ):
Text.init_meta( self, dataset, copy_from=copy_from )
def generate_primary_file(self, dataset=None):
"""
This is called only at upload to write the html file
cannot rename the datasets here - they come with the default unfortunately
"""
self.regenerate_primary_file(dataset)
def regenerate_primary_file(self, dataset):
"""
cannot do this until we are setting metadata
"""
annotations = "dbNSFP Annotations: %s\n" % ','.join(dataset.metadata.annotation)
f = open(dataset.file_name, 'a')
if dataset.metadata.bgzip:
bn = dataset.metadata.bgzip
f.write(bn)
f.write('\n')
f.write(annotations)
f.close()
def set_meta( self, dataset, overwrite=True, **kwd ):
try:
efp = dataset.extra_files_path
if os.path.exists(efp):
flist = os.listdir(efp)
for i, fname in enumerate(flist):
if fname.endswith('.gz'):
dataset.metadata.bgzip = fname
try:
fh = gzip.open(os.path.join(efp, fname), 'r')
buf = fh.read(5000)
lines = buf.splitlines()
headers = lines[0].split('\t')
dataset.metadata.annotation = headers[4:]
except Exception as e:
log.warn("set_meta fname: %s %s" % (fname, str(e)))
traceback.print_stack(file=sys.stderr)
finally:
fh.close()
if fname.endswith('.tbi'):
dataset.metadata.index = fname
self.regenerate_primary_file(dataset)
except Exception as e:
log.warn("set_meta fname: %s %s" % (dataset.file_name if dataset and dataset.file_name else 'Unkwown', str(e)))
traceback.print_stack(file=sys.stderr)
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules[__name__])
| mit | 6,438,121,658,966,277,000 | 40.744681 | 153 | 0.590214 | false |
shail2810/nova | nova/tests/functional/api_sample_tests/test_quota_sets.py | 2 | 4070 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class QuotaSetsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-quota-sets"
_api_version = 'v2'
def _get_flags(self):
f = super(QuotaSetsSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.server_group_quotas.'
'Server_group_quotas')
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.quotas.Quotas')
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.extended_quotas.Extended_quotas')
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.user_quotas.User_quotas')
return f
def test_show_quotas(self):
# Get api sample to show quotas.
response = self._do_get('os-quota-sets/fake_tenant')
self._verify_response('quotas-show-get-resp', {}, response, 200)
def test_show_quotas_defaults(self):
# Get api sample to show quotas defaults.
response = self._do_get('os-quota-sets/fake_tenant/defaults')
self._verify_response('quotas-show-defaults-get-resp',
{}, response, 200)
def test_update_quotas(self):
# Get api sample to update quotas.
response = self._do_put('os-quota-sets/fake_tenant',
'quotas-update-post-req',
{})
self._verify_response('quotas-update-post-resp', {}, response, 200)
def test_delete_quotas(self):
# Get api sample to delete quota.
response = self._do_delete('os-quota-sets/fake_tenant')
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_update_quotas_force(self):
# Get api sample to update quotas.
response = self._do_put('os-quota-sets/fake_tenant',
'quotas-update-force-post-req',
{})
return self._verify_response('quotas-update-force-post-resp', {},
response, 200)
def test_show_quotas_for_user(self):
# Get api sample to show quotas for user.
response = self._do_get('os-quota-sets/fake_tenant?user_id=1')
self._verify_response('user-quotas-show-get-resp', {}, response, 200)
def test_delete_quotas_for_user(self):
response = self._do_delete('os-quota-sets/fake_tenant?user_id=1')
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_update_quotas_for_user(self):
# Get api sample to update quotas for user.
response = self._do_put('os-quota-sets/fake_tenant?user_id=1',
'user-quotas-update-post-req',
{})
return self._verify_response('user-quotas-update-post-resp', {},
response, 200)
| apache-2.0 | 8,666,084,188,728,911,000 | 43.23913 | 78 | 0.597789 | false |
mihaic/brainiak | brainiak/utils/utils.py | 1 | 36227 | # Copyright 2016 Intel Corporation, Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import re
import warnings
import os.path
import psutil
from .fmrisim import generate_stimfunction, _double_gamma_hrf, convolve_hrf
from scipy.fftpack import fft, ifft
import logging
logger = logging.getLogger(__name__)
"""
Some utility functions that can be used by different algorithms
"""
__all__ = [
"array_correlation",
"center_mass_exp",
"concatenate_not_none",
"cov2corr",
"from_tri_2_sym",
"from_sym_2_tri",
"gen_design",
"phase_randomize",
"p_from_null",
"ReadDesign",
"sumexp_stable",
"usable_cpu_count",
]
def circ_dist(x, y):
"""
Computes the pairwise circular distance between two arrays of
points (in radians).
Parameters
----------
x: numpy vector of positions on a circle, in radians.
y: numpy vector of positions on a circle, in radians.
Returns
-------
r: numpy vector of distances between inputs.
"""
if x.size != y.size:
raise ValueError("Input sizes must match to compute pairwise "
"comparisons.")
r = np.angle(np.exp(x*1j) / np.exp(y*1j))
return r
def from_tri_2_sym(tri, dim):
"""convert a upper triangular matrix in 1D format
to 2D symmetric matrix
Parameters
----------
tri: 1D array
Contains elements of upper triangular matrix
dim : int
The dimension of target matrix.
Returns
-------
symm : 2D array
Symmetric matrix in shape=[dim, dim]
"""
symm = np.zeros((dim, dim))
symm[np.triu_indices(dim)] = tri
return symm
def from_sym_2_tri(symm):
"""convert a 2D symmetric matrix to an upper
triangular matrix in 1D format
Parameters
----------
symm : 2D array
Symmetric matrix
Returns
-------
tri: 1D array
Contains elements of upper triangular matrix
"""
inds = np.triu_indices_from(symm)
tri = symm[inds]
return tri
def sumexp_stable(data):
"""Compute the sum of exponents for a list of samples
Parameters
----------
data : array, shape=[features, samples]
A data array containing samples.
Returns
-------
result_sum : array, shape=[samples,]
The sum of exponents for each sample divided by the exponent
of the maximum feature value in the sample.
max_value : array, shape=[samples,]
The maximum feature value for each sample.
result_exp : array, shape=[features, samples]
The exponent of each element in each sample divided by the exponent
of the maximum feature value in the sample.
Note
----
This function is more stable than computing the sum(exp(v)).
It useful for computing the softmax_i(v)=exp(v_i)/sum(exp(v)) function.
"""
max_value = data.max(axis=0)
result_exp = np.exp(data - max_value)
result_sum = np.sum(result_exp, axis=0)
return result_sum, max_value, result_exp
def concatenate_not_none(data, axis=0):
"""Construct a numpy array by stacking not-None arrays in a list
Parameters
----------
data : list of arrays
The list of arrays to be concatenated. Arrays have same shape in all
but one dimension or are None, in which case they are ignored.
axis : int, default = 0
Axis for the concatenation
Returns
-------
data_stacked : array
The resulting concatenated array.
"""
# Get the indexes of the arrays in the list
mask = []
for i in range(len(data)):
if data[i] is not None:
mask.append(i)
# Concatenate them
stacked = np.concatenate([data[i] for i in mask], axis=axis)
return stacked
def cov2corr(cov):
"""Calculate the correlation matrix based on a
covariance matrix
Parameters
----------
cov: 2D array
Returns
-------
corr: 2D array
correlation converted from the covarince matrix
"""
assert cov.ndim == 2, 'covariance matrix should be 2D array'
inv_sd = 1 / np.sqrt(np.diag(cov))
corr = cov * inv_sd[None, :] * inv_sd[:, None]
return corr
class ReadDesign:
"""A class which has the ability of reading in design matrix in .1D file,
generated by AFNI's 3dDeconvolve.
Parameters
----------
fname: string, the address of the file to read.
include_orth: Boollean, whether to include "orthogonal" regressors in
the nuisance regressors which are usually head motion parameters.
All the columns of design matrix are still going to be read in,
but the attribute cols_used will reflect whether these orthogonal
regressors are to be included for furhter analysis.
Note that these are not entered into design_task attribute which
include only regressors related to task conditions.
include_pols: Boollean, whether to include polynomial regressors in
the nuisance regressors which are used to capture slow drift of
signals.
Attributes
----------
design: 2d array. The design matrix read in from the csv file.
design_task: 2d array. The part of design matrix corresponding to
task conditions.
n_col: number of total columns in the design matrix.
column_types: 1d array. the types of each column in the design matrix.
0 for orthogonal regressors (usually head motion parameters),
-1 for polynomial basis (capturing slow drift of signals),
values > 0 for stimulus conditions
n_basis: scalar. The number of polynomial bases in the designn matrix.
n_stim: scalar. The number of stimulus conditions.
n_orth: scalar. The number of orthogoanal regressors (usually head
motions)
StimLabels: list. The names of each column in the design matrix.
"""
def __init__(self, fname=None, include_orth=True, include_pols=True):
if fname is None:
# fname is the name of the file to read in the design matrix
self.design = np.zeros([0, 0])
self.n_col = 0
# number of columns (conditions) in the design matrix
self.column_types = np.ones(0)
self.n_basis = 0
self.n_stim = 0
self.n_orth = 0
self.StimLabels = []
else:
# isAFNI = re.match(r'.+[.](1D|1d|txt)$', fname)
filename, ext = os.path.splitext(fname)
# We assume all AFNI 1D files have extension of 1D or 1d or txt
if ext in ['.1D', '.1d', '.txt']:
self.read_afni(fname=fname)
self.include_orth = include_orth
self.include_pols = include_pols
# The two flags above dictates whether columns corresponding to
# baseline drift modeled by polynomial functions of time and
# columns corresponding to other orthogonal signals (usually motion)
# are included in nuisance regressors.
self.cols_task = np.where(self.column_types == 1)[0]
self.design_task = self.design[:, self.cols_task]
if np.ndim(self.design_task) == 1:
self.design_task = self.design_task[:, None]
# part of the design matrix related to task conditions.
self.n_TR = np.size(self.design_task, axis=0)
self.cols_nuisance = np.array([])
if self.include_orth:
self.cols_nuisance = np.int0(
np.sort(np.append(self.cols_nuisance,
np.where(self.column_types == 0)[0])))
if self.include_pols:
self.cols_nuisance = np.int0(
np.sort(np.append(self.cols_nuisance,
np.where(self.column_types == -1)[0])))
if np.size(self.cols_nuisance) > 0:
self.reg_nuisance = self.design[:, self.cols_nuisance]
if np.ndim(self.reg_nuisance) == 1:
self.reg_nuisance = self.reg_nuisance[:, None]
else:
self.reg_nuisance = None
# Nuisance regressors for motion, baseline, etc.
def read_afni(self, fname):
# Read design file written by AFNI
self.n_basis = 0
self.n_stim = 0
self.n_orth = 0
self.StimLabels = []
self.design = np.loadtxt(fname, ndmin=2)
with open(fname) as f:
all_text = f.read()
find_n_column = re.compile(
r'^#[ ]+ni_type[ ]+=[ ]+"(?P<n_col>\d+)[*]', re.MULTILINE)
n_col_found = find_n_column.search(all_text)
if n_col_found:
self.n_col = int(n_col_found.group('n_col'))
if self.n_col != np.size(self.design, axis=1):
warnings.warn(
'The number of columns in the design matrix'
+ 'does not match the header information')
self.n_col = np.size(self.design, axis=1)
else:
self.n_col = np.size(self.design, axis=1)
self.column_types = np.ones(self.n_col)
# default that all columns are conditions of interest
find_ColumnGroups = re.compile(
r'^#[ ]+ColumnGroups[ ]+=[ ]+"(?P<CGtext>.+)"', re.MULTILINE)
CG_found = find_ColumnGroups.search(all_text)
if CG_found:
CG_text = re.split(',', CG_found.group('CGtext'))
curr_idx = 0
for CG in CG_text:
split_by_at = re.split('@', CG)
if len(split_by_at) == 2:
# the first tells the number of columns in this condition
# the second tells the condition type
n_this_cond = int(split_by_at[0])
self.column_types[curr_idx:curr_idx + n_this_cond] = \
int(split_by_at[1])
curr_idx += n_this_cond
elif len(split_by_at) == 1 and \
not re.search(r'\..', split_by_at[0]):
# Just a number, and not the type like '1..4'
self.column_types[curr_idx] = int(split_by_at[0])
curr_idx += 1
else: # must be a single stimulus condition
split_by_dots = re.split(r'\..', CG)
n_this_cond = int(split_by_dots[1])
self.column_types[curr_idx:curr_idx + n_this_cond] = 1
curr_idx += n_this_cond
self.n_basis = np.sum(self.column_types == -1)
self.n_stim = np.sum(self.column_types > 0)
self.n_orth = np.sum(self.column_types == 0)
find_StimLabels = re.compile(
r'^#[ ]+StimLabels[ ]+=[ ]+"(?P<SLtext>.+)"', re.MULTILINE)
StimLabels_found = find_StimLabels.search(all_text)
if StimLabels_found:
self.StimLabels = \
re.split(r'[ ;]+', StimLabels_found.group('SLtext'))
else:
self.StimLabels = []
def gen_design(stimtime_files, scan_duration, TR, style='FSL',
temp_res=0.01,
hrf_para={'response_delay': 6, 'undershoot_delay': 12,
'response_dispersion': 0.9,
'undershoot_dispersion': 0.9,
'undershoot_scale': 0.035}):
""" Generate design matrix based on a list of names of stimulus
timing files. The function will read each file, and generate
a numpy array of size [time_points \\* condition], where
time_points equals duration / TR, and condition is the size of
stimtime_filenames. Each column is the hypothetical fMRI response
based on the stimulus timing in the corresponding file
of stimtime_files.
This function uses generate_stimfunction and double_gamma_hrf
of brainiak.utils.fmrisim.
Parameters
----------
stimtime_files: a string or a list of string.
Each string is the name of the file storing
the stimulus timing information of one task condition.
The contents in the files will be interpretated
based on the style parameter.
Details are explained under the style parameter.
scan_duration: float or a list (or a 1D numpy array) of numbers.
Total duration of each fMRI scan, in unit of seconds.
If there are multiple runs, the duration should be
a list (or 1-d numpy array) of numbers.
If it is a list, then each number in the list
represents the duration of the corresponding scan
in the stimtime_files.
If only a number is provided, it is assumed that
there is only one fMRI scan lasting for scan_duration.
TR: float.
The sampling period of fMRI, in unit of seconds.
style: string, default: 'FSL'
Acceptable inputs: 'FSL', 'AFNI'
The formating style of the stimtime_files.
'FSL' style has one line for each event of the same condition.
Each line contains three numbers. The first number is the onset
of the event relative to the onset of the first scan,
in units of seconds.
(Multiple scans should be treated as a concatenated long scan
for the purpose of calculating onsets.
However, the design matrix from one scan won't leak into the next).
The second number is the duration of the event,
in unit of seconds.
The third number is the amplitude modulation (or weight)
of the response.
It is acceptable to not provide the weight,
or not provide both duration and weight.
In such cases, these parameters will default to 1.0.
This code will accept timing files with only 1 or 2 columns for
convenience but please note that the FSL package does not allow this
'AFNI' style has one line for each scan (run).
Each line has a few triplets in the format of
stim_onsets*weight:duration
(or simpler, see below), separated by spaces.
For example, 3.2\\*2.0:1.5 means that one event starts at 3.2s,
modulated by weight of 2.0 and lasts for 1.5s.
If some run does not include a single event
of a condition (stimulus type), then you can put \\*,
or a negative number, or a very large number in that line.
Either duration or weight can be neglected. In such
cases, they will default to 1.0.
For example, 3.0, 3.0\\*1.0, 3.0:1.0 and 3.0\\*1.0:1.0 all
means an event starting at 3.0s, lasting for 1.0s, with
amplitude modulation of 1.0.
temp_res: float, default: 0.01
Temporal resolution of fMRI, in second.
hrf_para: dictionary
The parameters of the double-Gamma hemodynamic response function.
To set different parameters, supply a dictionary with
the same set of keys as the default, and replace the corresponding
values with the new values.
Returns
-------
design: 2D numpy array
design matrix. Each time row represents one TR
(fMRI sampling time point) and each column represents
one experiment condition, in the order in stimtime_files
"""
if np.ndim(scan_duration) == 0:
scan_duration = [scan_duration]
scan_duration = np.array(scan_duration)
assert np.all(scan_duration > TR), \
'scan duration should be longer than a TR'
if type(stimtime_files) is str:
stimtime_files = [stimtime_files]
assert TR > 0, 'TR should be positive'
assert style == 'FSL' or style == 'AFNI', 'style can only be FSL or AFNI'
n_C = len(stimtime_files) # number of conditions
n_S = np.size(scan_duration) # number of scans
if n_S > 1:
design = [np.empty([int(np.round(duration / TR)), n_C])
for duration in scan_duration]
else:
design = [np.empty([int(np.round(scan_duration / TR)), n_C])]
scan_onoff = np.insert(np.cumsum(scan_duration), 0, 0)
if style == 'FSL':
design_info = _read_stimtime_FSL(stimtime_files, n_C, n_S, scan_onoff)
elif style == 'AFNI':
design_info = _read_stimtime_AFNI(stimtime_files, n_C, n_S, scan_onoff)
response_delay = hrf_para['response_delay']
undershoot_delay = hrf_para['undershoot_delay']
response_disp = hrf_para['response_dispersion']
undershoot_disp = hrf_para['undershoot_dispersion']
undershoot_scale = hrf_para['undershoot_scale']
# generate design matrix
for i_s in range(n_S):
for i_c in range(n_C):
if len(design_info[i_s][i_c]['onset']) > 0:
stimfunction = generate_stimfunction(
onsets=design_info[i_s][i_c]['onset'],
event_durations=design_info[i_s][i_c]['duration'],
total_time=scan_duration[i_s],
weights=design_info[i_s][i_c]['weight'],
temporal_resolution=1.0/temp_res)
hrf = _double_gamma_hrf(response_delay=response_delay,
undershoot_delay=undershoot_delay,
response_dispersion=response_disp,
undershoot_dispersion=undershoot_disp,
undershoot_scale=undershoot_scale,
temporal_resolution=1.0/temp_res)
design[i_s][:, i_c] = convolve_hrf(
stimfunction, TR, hrf_type=hrf, scale_function=False,
temporal_resolution=1.0 / temp_res).transpose() * temp_res
else:
design[i_s][:, i_c] = 0.0
# We multiply the resulting design matrix with
# the temporal resolution to normalize it.
# We do not use the internal normalization
# in double_gamma_hrf because it does not guarantee
# normalizing with the same constant.
return np.concatenate(design, axis=0)
def _read_stimtime_FSL(stimtime_files, n_C, n_S, scan_onoff):
""" Utility called by gen_design. It reads in one or more
stimulus timing file comforming to FSL style,
and return a list (size of [#run \\* #condition])
of dictionary including onsets, durations and weights of each event.
Parameters
----------
stimtime_files: a string or a list of string.
Each string is the name of the file storing the stimulus
timing information of one task condition.
The contents in the files should follow the style of FSL
stimulus timing files, refer to gen_design.
n_C: integer, number of task conditions
n_S: integer, number of scans
scan_onoff: list of numbers.
The onset of each scan after concatenating all scans,
together with the offset of the last scan.
For example, if 3 scans of duration 100s, 150s, 120s are run,
scan_onoff is [0, 100, 250, 370]
Returns
-------
design_info: list of stimulus information
The first level of the list correspond to different scans.
The second level of the list correspond to different conditions.
Each item in the list is a dictiornary with keys "onset",
"duration" and "weight". If one condition includes no event
in a scan, the values of these keys in that scan of the condition
are empty lists.
See also
--------
gen_design
"""
design_info = [[{'onset': [], 'duration': [], 'weight': []}
for i_c in range(n_C)] for i_s in range(n_S)]
# Read stimulus timing files
for i_c in range(n_C):
with open(stimtime_files[i_c]) as f:
for line in f.readlines():
tmp = line.strip().split()
i_s = np.where(
np.logical_and(scan_onoff[:-1] <= float(tmp[0]),
scan_onoff[1:] > float(tmp[0])))[0]
if len(i_s) == 1:
i_s = i_s[0]
design_info[i_s][i_c]['onset'].append(float(tmp[0])
- scan_onoff[i_s])
if len(tmp) >= 2:
design_info[i_s][i_c]['duration'].append(float(tmp[1]))
else:
design_info[i_s][i_c]['duration'].append(1.0)
if len(tmp) >= 3:
design_info[i_s][i_c]['weight'].append(float(tmp[2]))
else:
design_info[i_s][i_c]['weight'].append(1.0)
return design_info
def _read_stimtime_AFNI(stimtime_files, n_C, n_S, scan_onoff):
""" Utility called by gen_design. It reads in one or more stimulus timing
file comforming to AFNI style, and return a list
(size of ``[number of runs \\* number of conditions]``)
of dictionary including onsets, durations and weights of each event.
Parameters
----------
stimtime_files: a string or a list of string.
Each string is the name of the file storing the stimulus
timing information of one task condition.
The contents in the files should follow the style of AFNI
stimulus timing files, refer to gen_design.
n_C: integer, number of task conditions
n_S: integer, number of scans
scan_onoff: list of numbers.
The onset of each scan after concatenating all scans,
together with the offset of the last scan.
For example, if 3 scans of duration 100s, 150s, 120s are run,
scan_onoff is [0, 100, 250, 370]
Returns
-------
design_info: list of stimulus information
The first level of the list correspond to different scans.
The second level of the list correspond to different conditions.
Each item in the list is a dictiornary with keys "onset",
"duration" and "weight". If one condition includes no event
in a scan, the values of these keys in that scan of the condition
are empty lists.
See also
--------
gen_design
"""
design_info = [[{'onset': [], 'duration': [], 'weight': []}
for i_c in range(n_C)] for i_s in range(n_S)]
# Read stimulus timing files
for i_c in range(n_C):
with open(stimtime_files[i_c]) as f:
text = f.readlines()
assert len(text) == n_S, \
'Number of lines does not match number of runs!'
for i_s, line in enumerate(text):
events = line.strip().split()
if events[0] == '*':
continue
for event in events:
assert event != '*'
tmp = str.split(event, ':')
if len(tmp) == 2:
duration = float(tmp[1])
else:
duration = 1.0
tmp = str.split(tmp[0], '*')
if len(tmp) == 2:
weight = float(tmp[1])
else:
weight = 1.0
if (float(tmp[0]) >= 0
and float(tmp[0])
< scan_onoff[i_s + 1] - scan_onoff[i_s]):
design_info[i_s][i_c]['onset'].append(float(tmp[0]))
design_info[i_s][i_c]['duration'].append(duration)
design_info[i_s][i_c]['weight'].append(weight)
return design_info
def center_mass_exp(interval, scale=1.0):
""" Calculate the center of mass of negative exponential distribution
p(x) = exp(-x / scale) / scale
in the interval of (interval_left, interval_right).
scale is the same scale parameter as scipy.stats.expon.pdf
Parameters
----------
interval: size 2 tuple, float
interval must be in the form of (interval_left, interval_right),
where interval_left/interval_right is the starting/end point of the
interval in which the center of mass is calculated for exponential
distribution.
Note that interval_left must be non-negative, since exponential is
not supported in the negative domain, and interval_right must be
bigger than interval_left (thus positive) to form a well-defined
interval.
scale: float, positive
The scale parameter of the exponential distribution. See above.
Returns
-------
m: float
The center of mass in the interval of (interval_left,
interval_right) for exponential distribution.
"""
assert isinstance(interval, tuple), 'interval must be a tuple'
assert len(interval) == 2, 'interval must be length two'
(interval_left, interval_right) = interval
assert interval_left >= 0, 'interval_left must be non-negative'
assert interval_right > interval_left, \
'interval_right must be bigger than interval_left'
assert scale > 0, 'scale must be positive'
if interval_right < np.inf:
return ((interval_left + scale) * np.exp(-interval_left / scale) - (
scale + interval_right) * np.exp(-interval_right / scale)) / (
np.exp(-interval_left / scale) - np.exp(-interval_right / scale))
else:
return interval_left + scale
def usable_cpu_count():
"""Get number of CPUs usable by the current process.
Takes into consideration cpusets restrictions.
Returns
-------
int
"""
try:
result = len(os.sched_getaffinity(0))
except AttributeError:
try:
result = len(psutil.Process().cpu_affinity())
except AttributeError:
result = os.cpu_count()
return result
def phase_randomize(data, voxelwise=False, random_state=None):
"""Randomize phase of time series across subjects
For each subject, apply Fourier transform to voxel time series
and then randomly shift the phase of each frequency before inverting
back into the time domain. This yields time series with the same power
spectrum (and thus the same autocorrelation) as the original time series
but will remove any meaningful temporal relationships among time series
across subjects. By default (voxelwise=False), the same phase shift is
applied across all voxels; however if voxelwise=True, different random
phase shifts are applied to each voxel. The typical input is a time by
voxels by subjects ndarray. The first dimension is assumed to be the
time dimension and will be phase randomized. If a 2-dimensional ndarray
is provided, the last dimension is assumed to be subjects, and different
phase randomizations will be applied to each subject.
The implementation is based on the work in [Lerner2011]_ and
[Simony2016]_.
Parameters
----------
data : ndarray (n_TRs x n_voxels x n_subjects)
Data to be phase randomized (per subject)
voxelwise : bool, default: False
Apply same (False) or different (True) randomizations across voxels
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
Returns
----------
shifted_data : ndarray (n_TRs x n_voxels x n_subjects)
Phase-randomized time series
"""
# Check if input is 2-dimensional
data_ndim = data.ndim
# Get basic shape of data
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Get randomized phase shifts
if n_TRs % 2 == 0:
# Why are we indexing from 1 not zero here? n_TRs / -1 long?
pos_freq = np.arange(1, data.shape[0] // 2)
neg_freq = np.arange(data.shape[0] - 1, data.shape[0] // 2, -1)
else:
pos_freq = np.arange(1, (data.shape[0] - 1) // 2 + 1)
neg_freq = np.arange(data.shape[0] - 1,
(data.shape[0] - 1) // 2, -1)
if not voxelwise:
phase_shifts = (prng.rand(len(pos_freq), 1, n_subjects)
* 2 * np.math.pi)
else:
phase_shifts = (prng.rand(len(pos_freq), n_voxels, n_subjects)
* 2 * np.math.pi)
# Fast Fourier transform along time dimension of data
fft_data = fft(data, axis=0)
# Shift pos and neg frequencies symmetrically, to keep signal real
fft_data[pos_freq, :, :] *= np.exp(1j * phase_shifts)
fft_data[neg_freq, :, :] *= np.exp(-1j * phase_shifts)
# Inverse FFT to put data back in time domain
shifted_data = np.real(ifft(fft_data, axis=0))
# Go back to 2-dimensions if input was 2-dimensional
if data_ndim == 2:
shifted_data = shifted_data[:, 0, :]
return shifted_data
def p_from_null(observed, distribution,
side='two-sided', exact=False,
axis=None):
"""Compute p-value from null distribution
Returns the p-value for an observed test statistic given a null
distribution. Performs either a 'two-sided' (i.e., two-tailed)
test (default) or a one-sided (i.e., one-tailed) test for either the
'left' or 'right' side. For an exact test (exact=True), does not adjust
for the observed test statistic; otherwise, adjusts for observed
test statistic (prevents p-values of zero). If a multidimensional
distribution is provided, use axis argument to specify which axis indexes
resampling iterations.
The implementation is based on the work in [PhipsonSmyth2010]_.
.. [PhipsonSmyth2010] "Permutation p-values should never be zero:
calculating exact p-values when permutations are randomly drawn.",
B. Phipson, G. K., Smyth, 2010, Statistical Applications in Genetics
and Molecular Biology, 9, 1544-6115.
https://doi.org/10.2202/1544-6115.1585
Parameters
----------
observed : float
Observed test statistic
distribution : ndarray
Null distribution of test statistic
side : str, default: 'two-sided'
Perform one-sided ('left' or 'right') or 'two-sided' test
axis: None or int, default: None
Axis indicating resampling iterations in input distribution
Returns
-------
p : float
p-value for observed test statistic based on null distribution
"""
if side not in ('two-sided', 'left', 'right'):
raise ValueError("The value for 'side' must be either "
"'two-sided', 'left', or 'right', got {0}".
format(side))
n_samples = len(distribution)
logger.info("Assuming {0} resampling iterations".format(n_samples))
if side == 'two-sided':
# Numerator for two-sided test
numerator = np.sum(np.abs(distribution) >= np.abs(observed), axis=axis)
elif side == 'left':
# Numerator for one-sided test in left tail
numerator = np.sum(distribution <= observed, axis=axis)
elif side == 'right':
# Numerator for one-sided test in right tail
numerator = np.sum(distribution >= observed, axis=axis)
# If exact test all possible permutations and do not adjust
if exact:
p = numerator / n_samples
# If not exact test, adjust number of samples to account for
# observed statistic; prevents p-value from being zero
else:
p = (numerator + 1) / (n_samples + 1)
return p
def _check_timeseries_input(data):
"""Checks response time series input data (e.g., for ISC analysis)
Input data should be a n_TRs by n_voxels by n_subjects ndarray
(e.g., brainiak.image.MaskedMultiSubjectData) or a list where each
item is a n_TRs by n_voxels ndarray for a given subject. Multiple
input ndarrays must be the same shape. If a 2D array is supplied,
the last dimension is assumed to correspond to subjects. This
function is generally intended to be used internally by other
functions module (e.g., isc, isfc in brainiak.isc).
Parameters
----------
data : ndarray or list
Time series data
Returns
-------
data : ndarray
Input time series data with standardized structure
n_TRs : int
Number of time points (TRs)
n_voxels : int
Number of voxels (or ROIs)
n_subjects : int
Number of subjects
"""
# Convert list input to 3d and check shapes
if type(data) == list:
data_shape = data[0].shape
for i, d in enumerate(data):
if d.shape != data_shape:
raise ValueError("All ndarrays in input list "
"must be the same shape!")
if d.ndim == 1:
data[i] = d[:, np.newaxis]
data = np.dstack(data)
# Convert input ndarray to 3d and check shape
elif isinstance(data, np.ndarray):
if data.ndim == 2:
data = data[:, np.newaxis, :]
elif data.ndim == 3:
pass
else:
raise ValueError("Input ndarray should have 2 "
"or 3 dimensions (got {0})!".format(data.ndim))
# Infer subjects, TRs, voxels and log for user to check
n_TRs, n_voxels, n_subjects = data.shape
logger.info("Assuming {0} subjects with {1} time points "
"and {2} voxel(s) or ROI(s) for ISC analysis.".format(
n_subjects, n_TRs, n_voxels))
return data, n_TRs, n_voxels, n_subjects
def array_correlation(x, y, axis=0):
"""Column- or row-wise Pearson correlation between two arrays
Computes sample Pearson correlation between two 1D or 2D arrays (e.g.,
two n_TRs by n_voxels arrays). For 2D arrays, computes correlation
between each corresponding column (axis=0) or row (axis=1) where axis
indexes observations. If axis=0 (default), each column is considered to
be a variable and each row is an observation; if axis=1, each row is a
variable and each column is an observation (equivalent to transposing
the input arrays). Input arrays must be the same shape with corresponding
variables and observations. This is intended to be an efficient method
for computing correlations between two corresponding arrays with many
variables (e.g., many voxels).
Parameters
----------
x : 1D or 2D ndarray
Array of observations for one or more variables
y : 1D or 2D ndarray
Array of observations for one or more variables (same shape as x)
axis : int (0 or 1), default: 0
Correlation between columns (axis=0) or rows (axis=1)
Returns
-------
r : float or 1D ndarray
Pearson correlation values for input variables
"""
# Accommodate array-like inputs
if not isinstance(x, np.ndarray):
x = np.asarray(x)
if not isinstance(y, np.ndarray):
y = np.asarray(y)
# Check that inputs are same shape
if x.shape != y.shape:
raise ValueError("Input arrays must be the same shape")
# Transpose if axis=1 requested (to avoid broadcasting
# issues introduced by switching axis in mean and sum)
if axis == 1:
x, y = x.T, y.T
# Center (de-mean) input variables
x_demean = x - np.mean(x, axis=0)
y_demean = y - np.mean(y, axis=0)
# Compute summed product of centered variables
numerator = np.sum(x_demean * y_demean, axis=0)
# Compute sum squared error
denominator = np.sqrt(np.sum(x_demean ** 2, axis=0) *
np.sum(y_demean ** 2, axis=0))
return numerator / denominator
| apache-2.0 | 5,286,036,411,698,096,000 | 35.445674 | 79 | 0.60143 | false |
beiko-lab/gengis | bin/Lib/site-packages/scipy/interpolate/__init__.py | 1 | 3385 | """
========================================
Interpolation (:mod:`scipy.interpolate`)
========================================
.. currentmodule:: scipy.interpolate
Sub-package for objects used in interpolation.
As listed below, this sub-package contains spline functions and classes,
one-dimensional and multi-dimensional (univariate and multivariate)
interpolation classes, Lagrange and Taylor polynomial interpolators, and
wrappers for `FITPACK <http://www.cisl.ucar.edu/softlib/FITPACK.html>`_
and DFITPACK functions.
Univariate interpolation
========================
.. autosummary::
:toctree: generated/
interp1d
BarycentricInterpolator
KroghInterpolator
PiecewisePolynomial
PchipInterpolator
barycentric_interpolate
krogh_interpolate
piecewise_polynomial_interpolate
pchip_interpolate
Multivariate interpolation
==========================
Unstructured data:
.. autosummary::
:toctree: generated/
griddata
LinearNDInterpolator
NearestNDInterpolator
CloughTocher2DInterpolator
Rbf
interp2d
For data on a grid:
.. autosummary::
RectBivariateSpline
.. seealso:: `scipy.ndimage.map_coordinates`
1-D Splines
===========
.. autosummary::
:toctree: generated/
UnivariateSpline
InterpolatedUnivariateSpline
LSQUnivariateSpline
The above univariate spline classes have the following methods:
.. autosummary::
UnivariateSpline.__call__
UnivariateSpline.derivatives
UnivariateSpline.integral
UnivariateSpline.roots
UnivariateSpline.derivative
UnivariateSpline.antiderivative
UnivariateSpline.get_coeffs
UnivariateSpline.get_knots
UnivariateSpline.get_residual
UnivariateSpline.set_smoothing_factor
Functional interface to FITPACK functions:
.. autosummary::
:toctree: generated/
splrep
splprep
splev
splint
sproot
spalde
splder
splantider
bisplrep
bisplev
2-D Splines
===========
For data on a grid:
.. autosummary::
:toctree: generated/
RectBivariateSpline
RectSphereBivariateSpline
For unstructured data:
.. autosummary::
:toctree: generated/
BivariateSpline
SmoothBivariateSpline
SmoothSphereBivariateSpline
LSQBivariateSpline
LSQSphereBivariateSpline
Low-level interface to FITPACK functions:
.. autosummary::
:toctree: generated/
bisplrep
bisplev
Additional tools
================
.. autosummary::
:toctree: generated/
lagrange
approximate_taylor_polynomial
.. seealso::
`scipy.ndimage.map_coordinates`,
`scipy.ndimage.spline_filter`,
`scipy.signal.resample`,
`scipy.signal.bspline`,
`scipy.signal.gauss_spline`,
`scipy.signal.qspline1d`,
`scipy.signal.cspline1d`,
`scipy.signal.qspline1d_eval`,
`scipy.signal.cspline1d_eval`,
`scipy.signal.qspline2d`,
`scipy.signal.cspline2d`.
"""
from __future__ import division, print_function, absolute_import
from .interpolate import *
from .fitpack import *
# New interface to fitpack library:
from .fitpack2 import *
from .rbf import Rbf
from .polyint import *
from .ndgriddata import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| gpl-3.0 | 5,469,905,734,267,368,000 | 17.795322 | 72 | 0.672083 | false |
pyfisch/servo | tests/wpt/web-platform-tests/tools/third_party/py/py/_io/saferepr.py | 273 | 2483 | import py
import sys
builtin_repr = repr
reprlib = py.builtin._tryimport('repr', 'reprlib')
class SafeRepr(reprlib.Repr):
""" subclass of repr.Repr that limits the resulting size of repr()
and includes information on exceptions raised during the call.
"""
def repr(self, x):
return self._callhelper(reprlib.Repr.repr, self, x)
def repr_unicode(self, x, level):
# Strictly speaking wrong on narrow builds
def repr(u):
if "'" not in u:
return py.builtin._totext("'%s'") % u
elif '"' not in u:
return py.builtin._totext('"%s"') % u
else:
return py.builtin._totext("'%s'") % u.replace("'", r"\'")
s = repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
return self._callhelper(builtin_repr, x)
def _callhelper(self, call, x, *args):
try:
# Try the vanilla repr and make sure that the result is a string
s = call(x, *args)
except py.builtin._sysex:
raise
except:
cls, e, tb = sys.exc_info()
exc_name = getattr(cls, '__name__', 'unknown')
try:
exc_info = str(e)
except py.builtin._sysex:
raise
except:
exc_info = 'unknown'
return '<[%s("%s") raised in repr()] %s object at 0x%x>' % (
exc_name, exc_info, x.__class__.__name__, id(x))
else:
if len(s) > self.maxsize:
i = max(0, (self.maxsize-3)//2)
j = max(0, self.maxsize-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def saferepr(obj, maxsize=240):
""" return a size-limited safe repr-string for the given object.
Failing __repr__ functions of user instances will be represented
with a short exception info and 'saferepr' generally takes
care to never raise exceptions itself. This function is a wrapper
around the Repr/reprlib functionality of the standard 2.6 lib.
"""
# review exception handling
srepr = SafeRepr()
srepr.maxstring = maxsize
srepr.maxsize = maxsize
srepr.maxother = 160
return srepr.repr(obj)
| mpl-2.0 | -220,596,362,994,338,340 | 33.971831 | 76 | 0.531615 | false |
azoft-dev-team/imagrium | env/Lib/test/test_format.py | 12 | 10507 | from test.test_support import verbose, have_unicode, TestFailed, is_jython
import sys
# test string formatting operator (I am not sure if this is being tested
# elsewhere but, surely, some of the given cases are *not* tested because
# they crash python)
# test on unicode strings as well
overflowok = 1
def testformat(formatstr, args, output=None):
if verbose:
if output:
print "%s %% %s =? %s ..." %\
(repr(formatstr), repr(args), repr(output)),
else:
print "%s %% %s works? ..." % (repr(formatstr), repr(args)),
try:
result = formatstr % args
except OverflowError:
if not overflowok:
raise
if verbose:
print 'overflow (this is fine)'
else:
if output and result != output:
if verbose:
print 'no'
print "%s %% %s == %s != %s" %\
(repr(formatstr), repr(args), repr(result), repr(output))
else:
if verbose:
print 'yes'
def testboth(formatstr, *args):
testformat(formatstr, *args)
if have_unicode:
testformat(unicode(formatstr), *args)
testboth("%.1d", (1,), "1")
testboth("%.*d", (sys.maxint,1)) # expect overflow
testboth("%.100d", (1,), '0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%#.117x", (1,), '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%#.118x", (1,), '0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%f", (1.0,), "1.000000")
# these are trying to test the limits of the internal magic-number-length
# formatting buffer, if that number changes then these tests are less
# effective
testboth("%#.*g", (109, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+100/3.))
# test some ridiculously large precision, expect overflow
testboth('%12.*f', (123456, 1.0))
# Formatting of long integers. Overflow is not ok
overflowok = 0
testboth("%x", 10L, "a")
testboth("%x", 100000000000L, "174876e800")
testboth("%o", 10L, "12")
testboth("%o", 100000000000L, "1351035564000")
testboth("%d", 10L, "10")
testboth("%d", 100000000000L, "100000000000")
big = 123456789012345678901234567890L
testboth("%d", big, "123456789012345678901234567890")
testboth("%d", -big, "-123456789012345678901234567890")
testboth("%5d", -big, "-123456789012345678901234567890")
testboth("%31d", -big, "-123456789012345678901234567890")
testboth("%32d", -big, " -123456789012345678901234567890")
testboth("%-32d", -big, "-123456789012345678901234567890 ")
testboth("%032d", -big, "-0123456789012345678901234567890")
testboth("%-032d", -big, "-123456789012345678901234567890 ")
testboth("%034d", -big, "-000123456789012345678901234567890")
testboth("%034d", big, "0000123456789012345678901234567890")
testboth("%0+34d", big, "+000123456789012345678901234567890")
testboth("%+34d", big, " +123456789012345678901234567890")
testboth("%34d", big, " 123456789012345678901234567890")
testboth("%.2d", big, "123456789012345678901234567890")
testboth("%.30d", big, "123456789012345678901234567890")
testboth("%.31d", big, "0123456789012345678901234567890")
testboth("%32.31d", big, " 0123456789012345678901234567890")
big = 0x1234567890abcdef12345L # 21 hex digits
testboth("%x", big, "1234567890abcdef12345")
testboth("%x", -big, "-1234567890abcdef12345")
testboth("%5x", -big, "-1234567890abcdef12345")
testboth("%22x", -big, "-1234567890abcdef12345")
testboth("%23x", -big, " -1234567890abcdef12345")
testboth("%-23x", -big, "-1234567890abcdef12345 ")
testboth("%023x", -big, "-01234567890abcdef12345")
testboth("%-023x", -big, "-1234567890abcdef12345 ")
testboth("%025x", -big, "-0001234567890abcdef12345")
testboth("%025x", big, "00001234567890abcdef12345")
testboth("%0+25x", big, "+0001234567890abcdef12345")
testboth("%+25x", big, " +1234567890abcdef12345")
testboth("%25x", big, " 1234567890abcdef12345")
testboth("%.2x", big, "1234567890abcdef12345")
testboth("%.21x", big, "1234567890abcdef12345")
testboth("%.22x", big, "01234567890abcdef12345")
testboth("%23.22x", big, " 01234567890abcdef12345")
testboth("%-23.22x", big, "01234567890abcdef12345 ")
testboth("%X", big, "1234567890ABCDEF12345")
testboth("%#X", big, "0X1234567890ABCDEF12345")
testboth("%#x", big, "0x1234567890abcdef12345")
testboth("%#x", -big, "-0x1234567890abcdef12345")
testboth("%#.23x", -big, "-0x001234567890abcdef12345")
testboth("%#+.23x", big, "+0x001234567890abcdef12345")
testboth("%# .23x", big, " 0x001234567890abcdef12345")
testboth("%#+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+26.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+27.23X", big, "+0X001234567890ABCDEF12345 ")
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
# next one gets two leading zeroes from precision, and another from the
# 0 flag and the width
testboth("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
# same, except no 0 flag
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
big = 012345670123456701234567012345670L # 32 octal digits
testboth("%o", big, "12345670123456701234567012345670")
testboth("%o", -big, "-12345670123456701234567012345670")
testboth("%5o", -big, "-12345670123456701234567012345670")
testboth("%33o", -big, "-12345670123456701234567012345670")
testboth("%34o", -big, " -12345670123456701234567012345670")
testboth("%-34o", -big, "-12345670123456701234567012345670 ")
testboth("%034o", -big, "-012345670123456701234567012345670")
testboth("%-034o", -big, "-12345670123456701234567012345670 ")
testboth("%036o", -big, "-00012345670123456701234567012345670")
testboth("%036o", big, "000012345670123456701234567012345670")
testboth("%0+36o", big, "+00012345670123456701234567012345670")
testboth("%+36o", big, " +12345670123456701234567012345670")
testboth("%36o", big, " 12345670123456701234567012345670")
testboth("%.2o", big, "12345670123456701234567012345670")
testboth("%.32o", big, "12345670123456701234567012345670")
testboth("%.33o", big, "012345670123456701234567012345670")
testboth("%34.33o", big, " 012345670123456701234567012345670")
testboth("%-34.33o", big, "012345670123456701234567012345670 ")
testboth("%o", big, "12345670123456701234567012345670")
testboth("%#o", big, "012345670123456701234567012345670")
testboth("%#o", -big, "-012345670123456701234567012345670")
testboth("%#.34o", -big, "-0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%# .34o", big, " 0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+37.34o", big, "+0012345670123456701234567012345670 ")
testboth("%#+37.34o", big, " +0012345670123456701234567012345670")
# next one gets one leading zero from precision
testboth("%.33o", big, "012345670123456701234567012345670")
# base marker shouldn't change that, since "0" is redundant
testboth("%#.33o", big, "012345670123456701234567012345670")
# but reduce precision, and base marker should add a zero
testboth("%#.32o", big, "012345670123456701234567012345670")
# one leading zero from precision, and another from "0" flag & width
testboth("%034.33o", big, "0012345670123456701234567012345670")
# base marker shouldn't change that
testboth("%0#34.33o", big, "0012345670123456701234567012345670")
# Some small ints, in both Python int and long flavors).
testboth("%d", 42, "42")
testboth("%d", -42, "-42")
testboth("%d", 42L, "42")
testboth("%d", -42L, "-42")
testboth("%#x", 1, "0x1")
testboth("%#x", 1L, "0x1")
testboth("%#X", 1, "0X1")
testboth("%#X", 1L, "0X1")
testboth("%#o", 1, "01")
testboth("%#o", 1L, "01")
testboth("%#o", 0, "0")
testboth("%#o", 0L, "0")
testboth("%o", 0, "0")
testboth("%o", 0L, "0")
testboth("%d", 0, "0")
testboth("%d", 0L, "0")
testboth("%#x", 0, "0x0")
testboth("%#x", 0L, "0x0")
testboth("%#X", 0, "0X0")
testboth("%#X", 0L, "0X0")
testboth("%x", 0x42, "42")
testboth("%x", -0x42, "-42")
testboth("%x", 0x42L, "42")
testboth("%x", -0x42L, "-42")
testboth("%o", 042, "42")
testboth("%o", -042, "-42")
testboth("%o", 042L, "42")
testboth("%o", -042L, "-42")
# Test exception for unknown format characters
if verbose:
print 'Testing exceptions'
def test_exc(formatstr, args, exception, excmsg):
try:
testformat(formatstr, args)
except exception, exc:
if str(exc) == excmsg:
if verbose:
print "yes"
else:
if verbose: print 'no'
print 'Unexpected ', exception, ':', repr(str(exc))
except:
if verbose: print 'no'
print 'Unexpected exception'
raise
else:
raise TestFailed, 'did not get expected exception: %s' % excmsg
test_exc('abc %a', 1, ValueError,
"unsupported format character 'a' (0x61) at index 5")
if have_unicode:
test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
"unsupported format character '?' (0x3000) at index 5")
test_exc('%d', '1', TypeError, "int argument required")
test_exc('%g', '1', TypeError, "float argument required")
test_exc('no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc('no format', u'1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', u'1', TypeError,
"not all arguments converted during string formatting")
# for Jython, do we really need to support this? what's the use case
# here! the problem in a nutshell is that it changes __oct__, __hex__
# such that they don't return a string, but later on the exception
# will occur anyway. so seems like a lot of work for no value
# class Foobar(long):
# def __oct__(self):
# # Returning a non-string should not blow up.
# return self + 1
#test_exc('%o', Foobar(), TypeError,
# "expected string or Unicode object, long found")
if sys.maxint == 2**31-1 and not is_jython:
# crashes 2.2.1 and earlier:
try:
"%*d"%(sys.maxint, -127)
except MemoryError:
pass
else:
raise TestFailed, '"%*d"%(sys.maxint, -127) should fail'
| mit | 1,345,069,855,313,016,800 | 40.529644 | 149 | 0.685733 | false |
vlinhd11/vlinhd11-android-scripting | python/src/Tools/modulator/ScrolledListbox.py | 37 | 1477 | # A ScrolledList widget feels like a list widget but also has a
# vertical scroll bar on its right. (Later, options may be added to
# add a horizontal bar as well, to make the bars disappear
# automatically when not needed, to move them to the other side of the
# window, etc.)
#
# Configuration options are passed to the List widget.
# A Frame widget is inserted between the master and the list, to hold
# the Scrollbar widget.
# Most methods calls are inherited from the List widget; Pack methods
# are redirected to the Frame widget however.
from Tkinter import *
from Tkinter import _cnfmerge
class ScrolledListbox(Listbox):
def __init__(self, master=None, cnf={}):
cnf = _cnfmerge(cnf)
fcnf = {}
vcnf = {'name': 'vbar',
Pack: {'side': 'right', 'fill': 'y'},}
for k in cnf.keys():
if type(k) == ClassType or k == 'name':
fcnf[k] = cnf[k]
del cnf[k]
self.frame = Frame(master, fcnf)
self.vbar = Scrollbar(self.frame, vcnf)
cnf[Pack] = {'side': 'left', 'fill': 'both', 'expand': 'yes'}
cnf['name'] = 'list'
Listbox.__init__(self, self.frame, cnf)
self['yscrollcommand'] = (self.vbar, 'set')
self.vbar['command'] = (self, 'yview')
# Copy Pack methods of self.frame -- hack!
for m in Pack.__dict__.keys():
if m[0] != '_' and m != 'config':
setattr(self, m, getattr(self.frame, m))
| apache-2.0 | -4,334,048,460,384,505,300 | 38.918919 | 70 | 0.595125 | false |
mhum/ynab-enhanced | src/common/res/features/l10n/init.py | 1 | 3511 | #!/usr/bin/env python
"""Prepare and download l10ns."""
import urllib, urllib2
import shutil
import os
import zipfile
import json
import sys
import math
if len(sys.argv) != 2:
print ''
print 'ERROR:'
print ''
print 'Please supply a crowd in API key, obtained on this page:'
print 'http://translate.toolkitforynab.com/project/toolkit-for-ynab/settings#integration\n'
print 'Example: ./get_l10ns <api key>'
print ''
exit(1)
ID = 'toolkit-for-ynab'
KEY = sys.argv[1:][0]
API_PREFIX = 'https://api.crowdin.com/api/project/%s/' % ID
KEY_SUFFIX = '?key=%s' % KEY
FILENAME = 'all.zip'
DEST_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'locales')
def export_l10ns():
"""Force crowding to export l10ns."""
url = API_PREFIX + 'export' + KEY_SUFFIX
response = urllib2.urlopen(url)
html = response.read()
return (html.find('success status') >= 0)
def donwload_l10ns():
"""Download all l10ns in zip archive."""
url = API_PREFIX + 'download/' + FILENAME + KEY_SUFFIX
l10ns_file = urllib2.urlopen(url)
with open('all.zip','wb') as f:
f.write(l10ns_file.read())
return True
def get_l10ns_stats():
url = API_PREFIX + "status" + KEY_SUFFIX + "&json=true"
response = urllib2.urlopen(url)
j = response.read()
lang_completed = {}
for i in json.loads(j):
lang_completed[i['name']] = int(math.ceil(int(i["words_translated"])/float(i["words"])*100))
return lang_completed
def unpack(lang_completed):
"""Unpack l10ns, move to one folder, add js initializer."""
os.path.isdir(DEST_DIR) and shutil.rmtree(DEST_DIR)
zipfile.ZipFile(FILENAME).extractall(DEST_DIR)
for root, dirs, files in os.walk(DEST_DIR):
for name in files:
if lang_completed[name.split('.')[0]] != 0:
shutil.move(os.path.join(root, name), DEST_DIR)
# Prepend all JSONs with Ember declaration.
with open(os.path.join(DEST_DIR, name), 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write('ynabToolKit.l10nData = ' + content)
for root, dirs, files in os.walk(DEST_DIR):
for name in dirs:
shutil.rmtree(os.path.join(root, name))
os.remove(FILENAME)
def create_settings(lang_completed):
"""Generate settings.json file."""
settings = {
"name": "l10n",
"type": "select",
"default": "0",
"section": "general",
"title": "Localization of YNAB",
"description": "Localization of interface.",
"options": [
{ "name": "Default", "value": "0" }
],
"actions": {}}
for root, dirs, files in os.walk(DEST_DIR):
for name in files:
if lang_completed[name.split('.')[0]] != 0:
value = name.split('.')[0].lower()
percent = ' (%s%%)' % str(int(lang_completed[name.split('.')[0]]))
settings['options'].append({
"name": name.split('.')[0] + percent,
"value": value })
settings['actions'][value] = ["injectScript", "locales/" + name,
"injectScript", "main.js"]
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'settings.json'), 'w') as f:
json.dump(settings, f, indent=4)
lang_completed = get_l10ns_stats()
export_l10ns()
donwload_l10ns()
unpack(lang_completed)
create_settings(lang_completed)
| mit | -3,303,568,522,253,198,000 | 34.11 | 100 | 0.578183 | false |
sssstest/GameEditor | GameTrigger.py | 1 | 1438 | #!/usr/bin/env python
#@section License
#
#Copyright (C) 2013 ssss
#This file is a part of the GameEditor.
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
from GameResource import *
class GameTrigger(GameResource):
if Class:
MomentMiddle=0
MomentBegin=1
MomentEnd=2
defaults={"id":-1,"name":"noname","condition":"","momentOfChecking":MomentBegin,"constantName":""}
def __init__(self, gameFile, id):
GameResource.__init__(self, gameFile, id)
def ReadGmk(self, stream):
triggerStream = stream.Deserialize()
if not triggerStream.ReadBoolean():
self.exists = False
return
triggerStream.ReadDword()
self.setMember("name", triggerStream.ReadString())
self.setMember("condition", triggerStream.ReadString())
self.setMember("momentOfChecking", triggerStream.ReadDword())
self.setMember("constantName", triggerStream.ReadString())
| gpl-3.0 | 5,770,379,173,756,206,000 | 30.955556 | 99 | 0.751739 | false |
lmorchard/django-allauth | allauth/socialaccount/south_migrations/0011_auto__chg_field_socialtoken_token.py | 77 | 6468 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'SocialToken.token'
db.alter_column('socialaccount_socialtoken', 'token', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'SocialToken.token'
db.alter_column('socialaccount_socialtoken', 'token', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'socialaccount.socialaccount': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'SocialAccount'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('allauth.socialaccount.fields.JSONField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'socialaccount.socialapp': {
'Meta': {'object_name': 'SocialApp'},
'client_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False', 'blank': 'True'})
},
'socialaccount.socialtoken': {
'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'SocialToken'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialAccount']"}),
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialApp']"}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.TextField', [], {}),
'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['socialaccount'] | mit | 8,510,599,914,403,886,000 | 67.819149 | 182 | 0.55334 | false |
partofthething/home-assistant | tests/components/ruckus_unleashed/test_init.py | 3 | 3616 | """Test the Ruckus Unleashed config flow."""
from unittest.mock import patch
from pyruckus.exceptions import AuthenticationError
from homeassistant.components.ruckus_unleashed import (
API_AP,
API_DEVICE_NAME,
API_ID,
API_MAC,
API_MODEL,
API_SYSTEM_OVERVIEW,
API_VERSION,
DOMAIN,
MANUFACTURER,
)
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from tests.components.ruckus_unleashed import (
DEFAULT_AP_INFO,
DEFAULT_SYSTEM_INFO,
DEFAULT_TITLE,
init_integration,
mock_config_entry,
)
async def test_setup_entry_login_error(hass):
"""Test entry setup failed due to login error."""
entry = mock_config_entry()
with patch(
"homeassistant.components.ruckus_unleashed.Ruckus",
side_effect=AuthenticationError,
):
entry.add_to_hass(hass)
result = await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert result is False
async def test_setup_entry_connection_error(hass):
"""Test entry setup failed due to connection error."""
entry = mock_config_entry()
with patch(
"homeassistant.components.ruckus_unleashed.Ruckus",
side_effect=ConnectionError,
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_router_device_setup(hass):
"""Test a router device is created."""
await init_integration(hass)
device_info = DEFAULT_AP_INFO[API_AP][API_ID]["1"]
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get_device(
identifiers={(CONNECTION_NETWORK_MAC, device_info[API_MAC])},
connections={(CONNECTION_NETWORK_MAC, device_info[API_MAC])},
)
assert device
assert device.manufacturer == MANUFACTURER
assert device.model == device_info[API_MODEL]
assert device.name == device_info[API_DEVICE_NAME]
assert device.sw_version == DEFAULT_SYSTEM_INFO[API_SYSTEM_OVERVIEW][API_VERSION]
assert device.via_device_id is None
async def test_unload_entry(hass):
"""Test successful unload of entry."""
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_NOT_LOADED
assert not hass.data.get(DOMAIN)
async def test_config_not_ready_during_setup(hass):
"""Test we throw a ConfigNotReady if Coordinator update fails."""
entry = mock_config_entry()
with patch(
"homeassistant.components.ruckus_unleashed.Ruckus.connect",
return_value=None,
), patch(
"homeassistant.components.ruckus_unleashed.Ruckus.mesh_name",
return_value=DEFAULT_TITLE,
), patch(
"homeassistant.components.ruckus_unleashed.Ruckus.system_info",
return_value=DEFAULT_SYSTEM_INFO,
), patch(
"homeassistant.components.ruckus_unleashed.RuckusUnleashedDataUpdateCoordinator._async_update_data",
side_effect=ConnectionError,
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_SETUP_RETRY
| mit | 3,719,311,980,649,539,000 | 30.443478 | 108 | 0.694414 | false |
wzbozon/statsmodels | statsmodels/tools/print_version.py | 23 | 7951 | #!/usr/bin/env python
from __future__ import print_function
from statsmodels.compat.python import reduce
import sys
from os.path import dirname
def safe_version(module, attr='__version__'):
if not isinstance(attr, list):
attr = [attr]
try:
return reduce(getattr, [module] + attr)
except AttributeError:
return "Cannot detect version"
def _show_versions_only():
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
import os
(sysname, nodename, release, version, machine) = os.uname()
print("OS: %s %s %s %s" % (sysname, release, version, machine))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
except:
pass
try:
from statsmodels import version
has_sm = True
except ImportError:
has_sm = False
print('\nStatsmodels\n===========\n')
if has_sm:
print('Installed: %s' % safe_version(version, 'full_version'))
else:
print('Not installed')
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: %s" % safe_version(Cython))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: %s" % safe_version(numpy, ['version', 'version']))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: %s" % safe_version(scipy, ['version', 'version']))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print("pandas: %s" % safe_version(pandas, ['version', 'version']))
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(" dateutil: %s" % safe_version(dateutil))
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: %s" % safe_version(patsy))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: %s" % safe_version(mpl))
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print("cvxopt: %s" % safe_version(info, 'version'))
except ImportError:
print("cvxopt: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print("IPython: %s" % safe_version(IPython))
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(" jinja2: %s" % safe_version(jinja2))
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: %s" % safe_version(sphinx))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(" pygments: %s" % safe_version(pygments))
except ImportError:
print(" pygments: Not installed")
try:
import nose
print("nose: %s" % safe_version(nose))
except ImportError:
print("nose: Not installed")
try:
import virtualenv
print("virtualenv: %s" % safe_version(virtualenv))
except ImportError:
print("virtualenv: Not installed")
print("\n")
def show_versions(show_dirs=True):
if not show_dirs:
_show_versions_only()
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
import os
(sysname, nodename, release, version, machine) = os.uname()
print("OS: %s %s %s %s" % (sysname, release, version, machine))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
except:
pass
try:
import statsmodels
from statsmodels import version
has_sm = True
except ImportError:
has_sm = False
print('\nStatsmodels\n===========\n')
if has_sm:
print('Installed: %s (%s)' % (safe_version(version, 'full_version'),
dirname(statsmodels.__file__)))
else:
print('Not installed')
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: %s (%s)" % (safe_version(Cython),
dirname(Cython.__file__)))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: %s (%s)" % (safe_version(numpy, ['version', 'version']),
dirname(numpy.__file__)))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: %s (%s)" % (safe_version(scipy, ['version', 'version']),
dirname(scipy.__file__)))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print("pandas: %s (%s)" % (safe_version(pandas, ['version',
'version']),
dirname(pandas.__file__)))
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(" dateutil: %s (%s)" % (safe_version(dateutil),
dirname(dateutil.__file__)))
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: %s (%s)" % (safe_version(patsy),
dirname(patsy.__file__)))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: %s (%s)" % (safe_version(mpl),
dirname(mpl.__file__)))
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print("cvxopt: %s (%s)" % (safe_version(info, 'version'),
dirname(info.__file__)))
except ImportError:
print("cvxopt: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print("IPython: %s (%s)" % (safe_version(IPython),
dirname(IPython.__file__)))
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(" jinja2: %s (%s)" % (safe_version(jinja2),
dirname(jinja2.__file__)))
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: %s (%s)" % (safe_version(sphinx),
dirname(sphinx.__file__)))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(" pygments: %s (%s)" % (safe_version(pygments),
dirname(pygments.__file__)))
except ImportError:
print(" pygments: Not installed")
try:
import nose
print("nose: %s (%s)" % (safe_version(nose), dirname(nose.__file__)))
except ImportError:
print("nose: Not installed")
try:
import virtualenv
print("virtualenv: %s (%s)" % (safe_version(virtualenv),
dirname(virtualenv.__file__)))
except ImportError:
print("virtualenv: Not installed")
print("\n")
if __name__ == "__main__":
show_versions()
| bsd-3-clause | -70,192,191,835,726,800 | 28.66791 | 78 | 0.521695 | false |
cjhak/b2share | invenio/modules/uploader/uploader_tasks.py | 13 | 13474 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Uploader workflow tasks.
Those are the main/common tasks that the uploader will use, they are used
inside the workflows defined in :py:mod:`~invenio.modules.uploader.workflows`.
See: `Simple workflows for Python <https://pypi.python.org/pypi/workflow/1.0>`_
"""
import os
from invenio.base.globals import cfg
from invenio.modules.pidstore.models import PersistentIdentifier
from .errors import UploaderWorkflowException
###########################################################
############## Pre tasks #################
###########################################################
def create_records_for_workflow(records, **kwargs):
"""Create the record object from the json.
:param records: List of records to be process.
:kwargs:
"""
from invenio.modules.records.api import Record
for i, obj in enumerate(records):
records[i] = (obj[0], Record(json=obj[1]))
###########################################################
############## Post tasks #################
###########################################################
def return_recordids_only(records, **kwargs):
"""Retrieve from the records only the record ID to return them.
:param records: Processed list of records
:parma kwargs:
"""
for i, obj in enumerate(records):
records[i] = obj[1].get('recid')
###########################################################
############## Workflow tasks #################
###########################################################
def raise_(ex):
"""Helper task to raise an exception."""
def _raise_(obj, eng):
raise ex
return _raise_
def validate(step):
"""Validate the record.
Validate the record using the `validate` method present in each record and
the validation mode, either from the command line options or from
`UPLOADER_VALIDATION_MODE`.
For the validation the `schema` information from the field definition
is used, see `invenio.modules.jsonalchemy.jsonext.parsers.schema_parser`.
"""
def _validate(obj, eng):
record = obj[1]
mode = eng.getVar('options', {}).get('validation_mode',
cfg['UPLOADER_VALIDATION_MODE'])
eng.log.info("Validating record using mode: '%s'", (mode, ))
if not hasattr(record, 'validate'):
raise UploaderWorkflowException(
step, msg="An 'validate' method is needed")
validator_errors = record.validate()
eng.log.info('Validation errors: %s' % (str(validator_errors), ))
if mode.lower() == 'strict' and validator_errors:
raise UploaderWorkflowException(
step, msg="One or more validation errors have occurred, please"
" check them or change the 'validation_mode' to "
"'permissive'.\n%s" % (str(validator_errors), ))
eng.log.info('Finish validating the current record')
return _validate
def retrieve_record_id_from_pids(step):
"""Retrieve the record identifier from a record using its PIDS.
If any PID matches with any in the DB then the record id found is set to
the current `record`
"""
def _retrieve_record_id_from_pids(obj, eng):
record = obj[1]
eng.log.info('Look for PIDs inside the current record')
if not hasattr(record, 'persistent_identifiers'):
raise UploaderWorkflowException(
step, msg="An 'persistent_identifiers' method is needed")
for pid_name, pid_values in record.persistent_identifiers:
eng.log.info("Found PID '%s' trying to match it", (pid_name, ))
matching_recids = set()
for possible_pid in pid_values:
eng.log.info("Looking for PID %s", (possible_pid, ))
pid = PersistentIdentifier.get(
possible_pid.get('type'), possible_pid.get('value'),
possible_pid.get('provider'))
if pid:
eng.log.info("PID found in the data base %s",
(pid.object_value, ))
matching_recids.add(pid.object_value)
if len(matching_recids) > 1:
raise UploaderWorkflowException(
step, msg="Found multiple match in the database, %s "
"for '%s'" % (repr(matching_recids), pid_name))
elif matching_recids:
record['recid'] = matching_recids.pop()
eng.log.info(
'Finish looking for PIDs inside the current record')
break
eng.log.info('Finish looking for PIDs inside the current record')
return _retrieve_record_id_from_pids
def reserve_record_id(step):
"""Reserve a new record id for the current object and set it inside."""
# TODO: manage exceptions in a better way
def _reserve_record_id(obj, eng):
record = obj[1]
eng.log.info('Reserve a recid for the new record')
try:
pid = PersistentIdentifier.create('recid', pid_value=None,
pid_provider='invenio')
record['recid'] = int(pid.pid_value)
pid.reserve()
eng.log.info("Finish reserving a recid '%s' for the new record",
(pid.pid_value, ))
except Exception as e:
raise UploaderWorkflowException(step, e.message)
return _reserve_record_id
def save_record(step):
"""Save the record to the DB using the `_save` method from it."""
def _save(obj, eng):
record = obj[1]
eng.log.info('Saving record to DB')
if not hasattr(record, '_save'):
raise UploaderWorkflowException(
step, msg="An '_save' method is needed")
try:
record._save()
eng.log.info('Record saved to DB')
except Exception as e:
raise UploaderWorkflowException(step, e.message)
return _save
def save_master_format(step):
"""Put the master format info the `bfmt` DB table."""
def _save_master_format(obj, eng):
from invenio.base.helpers import utf8ifier
from invenio.modules.formatter.models import Bibfmt
from invenio.ext.sqlalchemy import db
from zlib import compress
eng.log.info('Saving master record to DB')
bibfmt = Bibfmt(id_bibrec=obj[1]['recid'],
format=obj[1].additional_info.master_format,
kind='master',
last_updated=obj[1]['modification_date'],
value=compress(utf8ifier(
obj[0]
if obj[1].additional_info.master_format == 'marc'
else obj[1].legacy_export_as_marc()
)))
db.session.add(bibfmt)
db.session.commit()
eng.log.info('Master record saved to DB')
return _save_master_format
def update_pidstore(step):
"""Save each PID present in the record to the PID storage."""
# TODO: manage exceptions
def _update_pidstore(obj, eng):
record = obj[1]
eng.log.info('Look for PIDs inside the current record and register '
'them in the DB')
if not hasattr(record, 'persistent_identifiers'):
raise UploaderWorkflowException(
step, msg="An 'persistent_identifiers' method is needed")
eng.log.info("Found PIDs '%s'", (record.persistent_identifiers, ))
for pid_name, pid_values in record.persistent_identifiers:
eng.log.info("Found PID '%s'", (pid_name, ))
for pid_value in pid_values:
pid = PersistentIdentifier.get(
pid_value.get('type'), pid_value.get('value'),
pid_value.get('provider'))
if pid is None:
pid = PersistentIdentifier.create(
pid_value.get('type'), pid_value.get('value'),
pid_value.get('provider'))
if not pid.has_object('rec', record['recid']):
pid.assign('rec', record['recid'])
eng.log.info('Finish looking for PIDs inside the current record and '
'register them in the DB')
return _update_pidstore
def manage_attached_documents(step):
"""Attach and treat all the documents embeded in the input filex."""
from invenio.modules.documents import api
from invenio.modules.documents.tasks import set_document_contents
from invenio.modules.records.utils import name_generator
def _manage_attached_documents(obj, eng):
record = obj[1]
filename = eng.getVar('options').get('filename')
dirname = os.path.abspath(os.path.dirname(filename)) \
if filename is not None else os.curdir
def _check_path(source):
"""Check if the ``source`` path.
If it is relative path than the directory path of original blob
filename, if defined, or the current directory will be prepended.
"""
if not os.path.isabs(source):
new_source = os.path.join(dirname, source)
if os.path.exists(new_source):
return new_source
eng.log.error('File %s does not exist.', (new_source,))
return source
eng.log.info('Look documents to manage')
def _create_document(metadata, record):
metadata['source'] = _check_path(metadata['source'])
if '_documents' not in record:
record['_documents'] = []
model = metadata.pop('model', 'record_document_base')
if 'recids' not in metadata:
metadata['recids'] = list()
if record.get('recid', -1) not in metadata['recids']:
metadata['recids'].append(record.get('recid', -1), )
document = api.Document.create(metadata, model=model)
eng.log.info('Document %s created', (document['_id'],))
record['_documents'].append((document['title'], document['_id']))
return document
if 'files_to_upload' in record:
eng.log.info('Documents to upload found')
files_to_upload = record.get('files_to_upload', [])
for file_to_upload in files_to_upload:
document = _create_document(file_to_upload, record)
set_document_contents.delay(
document['_id'],
document['source'],
name_generator(document)
)
eng.log.info('Finish creating documents, delete temporary key')
del record['files_to_upload']
if 'files_to_link' in record:
eng.log.info('Documents to link found')
files_to_link = record.get('files_to_link', [])
for file_to_link in files_to_link:
_create_document(file_to_link, record)
eng.log.info('Finish linking documents, delete temporary key')
del record['files_to_link']
return _manage_attached_documents
def legacy(step):
"""Update legacy bibxxx tables."""
def _legacy(obj, eng):
record = obj[1]
if record.additional_info.master_format != 'marc':
return
import marshal
from invenio.legacy.bibupload.engine import (
CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS,
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE,
archive_marcxml_for_history,
update_bibfmt_format,
update_database_with_metadata,
)
modification_date = record['modification_date'].strftime(
'%Y-%m-%d %H:%M:%S')
update_bibfmt_format(
record['recid'],
record.legacy_export_as_marc(),
'xm',
modification_date
)
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
update_bibfmt_format(
record['recid'],
marshal.dumps(record.legacy_create_recstruct()),
'recstruct',
modification_date
)
if not CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS:
archive_marcxml_for_history(
record['recid'], affected_fields={}
)
update_database_with_metadata(
record.legacy_create_recstruct(),
record['recid']
)
eng.log.info(
'Finishing legacy task for record {0}'.format(record['recid'])
)
return _legacy
| gpl-2.0 | -1,170,732,406,976,718,300 | 37.062147 | 79 | 0.562936 | false |
PongPi/isl-odoo | openerp/addons/base/__openerp__.py | 336 | 3703 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Base',
'version': '1.3',
'category': 'Hidden',
'description': """
The kernel of OpenERP, needed for all installation.
===================================================
""",
'author': 'OpenERP SA',
'maintainer': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': [],
'data': [
'base_data.xml',
'res/res_currency_data.xml',
'res/res_country_data.xml',
'security/base_security.xml',
'base_menu.xml',
'res/res_config.xml',
'res/res.country.state.csv',
'ir/ir_actions.xml',
'ir/ir_config_parameter_view.xml',
'ir/ir_cron_view.xml',
'ir/ir_filters.xml',
'ir/ir_mail_server_view.xml',
'ir/ir_model_view.xml',
'ir/ir_attachment_view.xml',
'ir/ir_rule_view.xml',
'ir/ir_sequence_view.xml',
'ir/ir_translation_view.xml',
'ir/ir_ui_menu_view.xml',
'ir/ir_ui_view_view.xml',
'ir/ir_values_view.xml',
'ir/osv_memory_autovacuum.xml',
'ir/ir_model_report.xml',
'ir/ir_logging_view.xml',
'ir/ir_qweb.xml',
'workflow/workflow_view.xml',
'module/module_view.xml',
'module/module_data.xml',
'module/module_report.xml',
'module/wizard/base_module_update_view.xml',
'module/wizard/base_language_install_view.xml',
'module/wizard/base_import_language_view.xml',
'module/wizard/base_module_upgrade_view.xml',
'module/wizard/base_module_configuration_view.xml',
'module/wizard/base_export_language_view.xml',
'module/wizard/base_update_translations_view.xml',
'module/wizard/base_module_immediate_install.xml',
'res/res_company_view.xml',
'res/res_request_view.xml',
'res/res_lang_view.xml',
'res/res_partner_report.xml',
'res/res_partner_view.xml',
'res/res_bank_view.xml',
'res/res_country_view.xml',
'res/res_currency_view.xml',
'res/res_users_view.xml',
'res/res_partner_data.xml',
'res/ir_property_view.xml',
'res/res_security.xml',
'security/ir.model.access.csv',
],
'demo': [
'base_demo.xml',
'res/res_partner_demo.xml',
'res/res_partner_demo.yml',
'res/res_partner_image_demo.xml',
],
'test': [
'tests/base_test.yml',
'tests/test_osv_expression.yml',
'tests/test_ir_rule.yml', # <-- These tests modify/add/delete ir_rules.
],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,989,697,298,476,351,000 | 35.663366 | 79 | 0.569808 | false |
mafiya69/sympy | sympy/vector/tests/test_dyadic.py | 94 | 4076 | from sympy import sin, cos, symbols, pi, ImmutableMatrix as Matrix, \
simplify
from sympy.vector import (CoordSysCartesian, Vector, Dyadic,
DyadicAdd, DyadicMul, DyadicZero,
BaseDyadic, express)
A = CoordSysCartesian('A')
def test_dyadic():
a, b = symbols('a, b')
assert Dyadic.zero != 0
assert isinstance(Dyadic.zero, DyadicZero)
assert BaseDyadic(A.i, A.j) != BaseDyadic(A.j, A.i)
assert (BaseDyadic(Vector.zero, A.i) ==
BaseDyadic(A.i, Vector.zero) == Dyadic.zero)
d1 = A.i | A.i
d2 = A.j | A.j
d3 = A.i | A.j
assert isinstance(d1, BaseDyadic)
d_mul = a*d1
assert isinstance(d_mul, DyadicMul)
assert d_mul.base_dyadic == d1
assert d_mul.measure_number == a
assert isinstance(a*d1 + b*d3, DyadicAdd)
assert d1 == A.i.outer(A.i)
assert d3 == A.i.outer(A.j)
v1 = a*A.i - A.k
v2 = A.i + b*A.j
assert v1 | v2 == v1.outer(v2) == a * (A.i|A.i) + (a*b) * (A.i|A.j) +\
- (A.k|A.i) - b * (A.k|A.j)
assert d1 * 0 == Dyadic.zero
assert d1 != Dyadic.zero
assert d1 * 2 == 2 * (A.i | A.i)
assert d1 / 2. == 0.5 * d1
assert d1.dot(0 * d1) == Vector.zero
assert d1 & d2 == Dyadic.zero
assert d1.dot(A.i) == A.i == d1 & A.i
assert d1.cross(Vector.zero) == Dyadic.zero
assert d1.cross(A.i) == Dyadic.zero
assert d1 ^ A.j == d1.cross(A.j)
assert d1.cross(A.k) == - A.i | A.j
assert d2.cross(A.i) == - A.j | A.k == d2 ^ A.i
assert A.i ^ d1 == Dyadic.zero
assert A.j.cross(d1) == - A.k | A.i == A.j ^ d1
assert Vector.zero.cross(d1) == Dyadic.zero
assert A.k ^ d1 == A.j | A.i
assert A.i.dot(d1) == A.i & d1 == A.i
assert A.j.dot(d1) == Vector.zero
assert Vector.zero.dot(d1) == Vector.zero
assert A.j & d2 == A.j
assert d1.dot(d3) == d1 & d3 == A.i | A.j == d3
assert d3 & d1 == Dyadic.zero
q = symbols('q')
B = A.orient_new_axis('B', q, A.k)
assert express(d1, B) == express(d1, B, B)
assert express(d1, B) == ((cos(q)**2) * (B.i | B.i) + (-sin(q) * cos(q)) *
(B.i | B.j) + (-sin(q) * cos(q)) * (B.j | B.i) + (sin(q)**2) *
(B.j | B.j))
assert express(d1, B, A) == (cos(q)) * (B.i | A.i) + (-sin(q)) * (B.j | A.i)
assert express(d1, A, B) == (cos(q)) * (A.i | B.i) + (-sin(q)) * (A.i | B.j)
assert d1.to_matrix(A) == Matrix([[1, 0, 0], [0, 0, 0], [0, 0, 0]])
assert d1.to_matrix(A, B) == Matrix([[cos(q), -sin(q), 0],
[0, 0, 0],
[0, 0, 0]])
assert d3.to_matrix(A) == Matrix([[0, 1, 0], [0, 0, 0], [0, 0, 0]])
a, b, c, d, e, f = symbols('a, b, c, d, e, f')
v1 = a * A.i + b * A.j + c * A.k
v2 = d * A.i + e * A.j + f * A.k
d4 = v1.outer(v2)
assert d4.to_matrix(A) == Matrix([[a * d, a * e, a * f],
[b * d, b * e, b * f],
[c * d, c * e, c * f]])
d5 = v1.outer(v1)
C = A.orient_new_axis('C', q, A.i)
for expected, actual in zip(C.rotation_matrix(A) * d5.to_matrix(A) * \
C.rotation_matrix(A).T, d5.to_matrix(C)):
assert (expected - actual).simplify() == 0
def test_dyadic_simplify():
x, y, z, k, n, m, w, f, s, A = symbols('x, y, z, k, n, m, w, f, s, A')
N = CoordSysCartesian('N')
dy = N.i | N.i
test1 = (1 / x + 1 / y) * dy
assert (N.i & test1 & N.i) != (x + y) / (x * y)
test1 = test1.simplify()
assert test1.simplify() == simplify(test1)
assert (N.i & test1 & N.i) == (x + y) / (x * y)
test2 = (A**2 * s**4 / (4 * pi * k * m**3)) * dy
test2 = test2.simplify()
assert (N.i & test2 & N.i) == (A**2 * s**4 / (4 * pi * k * m**3))
test3 = ((4 + 4 * x - 2 * (2 + 2 * x)) / (2 + 2 * x)) * dy
test3 = test3.simplify()
assert (N.i & test3 & N.i) == 0
test4 = ((-4 * x * y**2 - 2 * y**3 - 2 * x**2 * y) / (x + y)**2) * dy
test4 = test4.simplify()
assert (N.i & test4 & N.i) == -2 * y
| bsd-3-clause | -360,962,418,185,004,100 | 36.054545 | 80 | 0.467615 | false |
mkheirkhah/mptcp | src/config-store/bindings/modulegen__gcc_ILP32.py | 36 | 68421 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.config_store', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## file-config.h (module 'config-store'): ns3::FileConfig [class]
module.add_class('FileConfig', allow_subclassing=True)
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore [class]
module.add_class('GtkConfigStore')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## file-config.h (module 'config-store'): ns3::NoneFileConfig [class]
module.add_class('NoneFileConfig', parent=root_module['ns3::FileConfig'])
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## config-store.h (module 'config-store'): ns3::ConfigStore [class]
module.add_class('ConfigStore', parent=root_module['ns3::ObjectBase'])
## config-store.h (module 'config-store'): ns3::ConfigStore::Mode [enumeration]
module.add_enum('Mode', ['LOAD', 'SAVE', 'NONE'], outer_class=root_module['ns3::ConfigStore'])
## config-store.h (module 'config-store'): ns3::ConfigStore::FileFormat [enumeration]
module.add_enum('FileFormat', ['XML', 'RAW_TEXT'], outer_class=root_module['ns3::ConfigStore'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, size_t ) *', 'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, size_t ) **', 'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, size_t ) *&', 'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, size_t ) *', 'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, size_t ) **', 'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, size_t ) *&', 'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3FileConfig_methods(root_module, root_module['ns3::FileConfig'])
register_Ns3GtkConfigStore_methods(root_module, root_module['ns3::GtkConfigStore'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3NoneFileConfig_methods(root_module, root_module['ns3::NoneFileConfig'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3ConfigStore_methods(root_module, root_module['ns3::ConfigStore'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3FileConfig_methods(root_module, cls):
## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig() [constructor]
cls.add_constructor([])
## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig(ns3::FileConfig const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FileConfig const &', 'arg0')])
## file-config.h (module 'config-store'): void ns3::FileConfig::Attributes() [member function]
cls.add_method('Attributes',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::Default() [member function]
cls.add_method('Default',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::Global() [member function]
cls.add_method('Global',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3GtkConfigStore_methods(root_module, cls):
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore::GtkConfigStore(ns3::GtkConfigStore const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GtkConfigStore const &', 'arg0')])
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore::GtkConfigStore() [constructor]
cls.add_constructor([])
## gtk-config-store.h (module 'config-store'): void ns3::GtkConfigStore::ConfigureAttributes() [member function]
cls.add_method('ConfigureAttributes',
'void',
[])
## gtk-config-store.h (module 'config-store'): void ns3::GtkConfigStore::ConfigureDefaults() [member function]
cls.add_method('ConfigureDefaults',
'void',
[])
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3NoneFileConfig_methods(root_module, cls):
## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig(ns3::NoneFileConfig const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NoneFileConfig const &', 'arg0')])
## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig() [constructor]
cls.add_constructor([])
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Attributes() [member function]
cls.add_method('Attributes',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Default() [member function]
cls.add_method('Default',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Global() [member function]
cls.add_method('Global',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')],
is_virtual=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3ConfigStore_methods(root_module, cls):
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore(ns3::ConfigStore const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConfigStore const &', 'arg0')])
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore() [constructor]
cls.add_constructor([])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureAttributes() [member function]
cls.add_method('ConfigureAttributes',
'void',
[])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureDefaults() [member function]
cls.add_method('ConfigureDefaults',
'void',
[])
## config-store.h (module 'config-store'): ns3::TypeId ns3::ConfigStore::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## config-store.h (module 'config-store'): static ns3::TypeId ns3::ConfigStore::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFileFormat(ns3::ConfigStore::FileFormat format) [member function]
cls.add_method('SetFileFormat',
'void',
[param('ns3::ConfigStore::FileFormat', 'format')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetMode(ns3::ConfigStore::Mode mode) [member function]
cls.add_method('SetMode',
'void',
[param('ns3::ConfigStore::Mode', 'mode')])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 | -7,992,585,036,314,678,000 | 66.945382 | 383 | 0.641747 | false |
cisc474/board_game_app | api/fetch_desc_bs4.py | 2 | 3669 | #importing the libraries I want, an sqlite interface, a url requester, and an XML parsing library
import sqlite3, urllib2, bs4 as BeautifulSoup
#sudo apt-get install python-bs4
#That's how I was able to install the latest beautiful soup
#this bit I have to google search everytime.
con = sqlite3.connect('bgg.sqlite')
cur = con.cursor()
#this gets an array of responses, not in the best format, I made all of this by
#FIRST going into a python session then playing with the results until I liked it
cur.execute('select objectid as id from games')
data = cur.fetchall()
#con.commit will make sure the database is saved and the next "transaction" can begin
con.commit()
#Note the "create table if not exists" part, that's so I can run this script over and over
#even after I've had to debug
cur.execute('create table if not exists extra (objectid integer primary key, description text, thumbnail text, image text, categories text)')
con.commit()
#this is a simple way of cleaning up the data that came out of my earlier query
#I use this lambda trick often in python, it's a simple one-off function which
#"returns" the part after the : on each element in data
gameids = map(lambda x: x[0], data)
#a simple Python Class just to be classy
class DescRow:
def create_sql(self):
return ["insert or replace into extra (objectid, description, thumbnail, image, categories) values (?, ?, ?, ?, ?)", (self.objectid, self.description, self.thumbnail, self.image, self.categories)]
#another simple procedure to hit the BGG API in the right way
def url_gen(gameid):
return "http://www.boardgamegeek.com/xmlapi2/thing?stats=1&id=%s" % gameid
#one main game look up
def fetch_game_data(gameid):
#this gets the server response from the BGG API
response = urllib2.urlopen(url_gen(gameid))
#this saves the text response in one long string
xml = response.read()
#this creates a beautifulsoup tree out of the xml
#I used to do all of this by hand using regular expressions
#Now I dig using beautifulsoup to parse my webpages and xml responses
bs_tree = BeautifulSoup.BeautifulSoup(xml)
game_data = DescRow()
game_data.objectid = gameid
#This is where I really needed a "by-hand" example to get it right but
# this will go to the first "description" tag and return the contents as a string
game_data.description = bs_tree.find('description').text
#ditto for "thumbnail" and "image" after that
game_data.thumbnail = bs_tree.find('thumbnail').text
game_data.image = bs_tree.find('image').text
#this is my way of making a category data set for the DB
#if someone is in a particular mood, party games or card games or whatever
#this might do the job
game_data.categories = " @@ ".join(map(lambda x: x.attrs['value'], bs_tree.find_all('link', attrs={"type": "boardgamecategory"})))
return game_data
#this procedure does one row creation transaction
def create_row(gameid, cursor, con):
game_data = fetch_game_data(gameid)
sql_query = game_data.create_sql()
cursor.execute(sql_query[0], sql_query[1])
con.commit()
return game_data
import time
#I used this while debugging, I'll leave it here to show that I am very human.
errors = []
#This is the main program, it goes through each gameid and creates a row
#I put a 1 second pause between each command because I was having the API
#cut me off or get backed up, this worked for making sure the API was friendly to me
for gameid in gameids:
gdata = create_row(gameid, cur, con)
time.sleep(1)
#this is just so I know that things were working
print gdata.objectid, gdata.categories
| mit | -3,468,314,654,871,151,600 | 42.678571 | 204 | 0.724721 | false |
dfm/arxiv2speech | arxiv2speech.py | 1 | 3104 | #!/usr/bin/env python
from __future__ import print_function, absolute_import, unicode_literals
__all__ = ["run"]
__version__ = "0.0.4"
__author__ = "Dan Foreman-Mackey ([email protected])"
__copyright__ = "Copyright 2013 Daniel Foreman-Mackey"
__contributors__ = []
import os
import re
import json
import shutil
import subprocess
from multiprocessing import Pool
import feedparser
from html2text import html2text
# Regular expressions.
id_re = re.compile(r"http://arxiv.org/abs/(.*)")
title_re = re.compile(r"(.*) \(arXiv(?:.*?)\)$")
author_re = re.compile(r"<a href=\"(?:.*?)\">(.*?)</a>")
def run(basedir, url="http://export.arxiv.org/rss/astro-ph",
clobber=False, quiet=False, limit=None):
# Make the base directory.
try:
os.makedirs(basedir)
except:
if not clobber:
raise
shutil.rmtree(basedir)
os.makedirs(basedir)
# Fetch the abstracts.
if not quiet:
print("Fetching recent abstracts from: {0}".format(url))
abstracts = get_recent(url)
if not quiet:
print(" ... Found {0} abstracts.".format(len(abstracts)))
if limit is not None:
print("Limiting to {0} total.".format(limit))
abstracts = abstracts[:int(limit)]
if not quiet:
print("Saving audio files (slowly) in: {0}".format(basedir))
p = Pool()
p.map(_run_one, zip([basedir] * len(abstracts), abstracts))
if not quiet:
print(" ... Done.")
def _run_one(args):
basedir, abstract = args
# Create the directory for the audio files.
basedir = os.path.join(basedir, abstract["id"])
os.makedirs(basedir)
# Save the metadata.
json.dump(abstract, open(os.path.join(basedir, "info.json"), "w"),
sort_keys=True, indent=4, separators=(",", ": "))
# Save the audio files.
by = "\n\nBy: " + abstract["authors"][0]
l = len(abstract["authors"])
if l == 2:
by += " and " + abstract["authors"][1]
elif l > 2:
by += " and {0} others.".format(l - 1)
r = text2audio(abstract["title"] + by,
os.path.join(basedir, "brief.m4a"))
assert r == 0, "Couldn't save brief for: {0}".format(abstract["id"])
r = text2audio(", ".join(abstract["authors"]),
os.path.join(basedir, "authors.m4a"))
assert r == 0, "Couldn't save authors for: {0}".format(abstract["id"])
r = text2audio(abstract["abstract"], os.path.join(basedir, "abstract.m4a"))
assert r == 0, "Couldn't save abstract for: {0}".format(abstract["id"])
def get_recent(rss_url):
d = feedparser.parse(rss_url)
results = []
for e in d.entries:
results.append({
"id": id_re.findall(e.id)[0],
"title": title_re.findall(e.title)[0],
"authors": author_re.findall(e.author),
"abstract": html2text(e.summary),
})
return results
def text2audio(text, filename):
p = subprocess.Popen(["say", "-o", filename],
stdin=subprocess.PIPE)
p.communicate(text)
code = p.wait()
return code
| bsd-2-clause | 1,793,591,172,441,115,100 | 27.218182 | 79 | 0.58183 | false |
valentin-krasontovitsch/ansible | test/units/parsing/test_metadata.py | 125 | 10000 | # coding: utf-8
# (c) 2017, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import pytest
from ansible.parsing import metadata as md
LICENSE = b"""# some license text boilerplate
# That we have at the top of files
"""
FUTURE_IMPORTS = b"""
from __future__ import (absolute_import, division, print_function)
"""
REGULAR_IMPORTS = b"""
import test
from foo import bar
"""
STANDARD_METADATA = b"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
"""
TEXT_STD_METADATA = b"""
ANSIBLE_METADATA = u'''
metadata_version: '1.1'
status:
- 'stableinterface'
supported_by: 'core'
'''
"""
BYTES_STD_METADATA = b"""
ANSIBLE_METADATA = b'''
metadata_version: '1.1'
status:
- 'stableinterface'
supported_by: 'core'
'''
"""
TRAILING_COMMENT_METADATA = b"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'} # { Testing }
"""
MULTIPLE_STATEMENTS_METADATA = b"""
DOCUMENTATION = "" ; ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'} ; RETURNS = ""
"""
EMBEDDED_COMMENT_METADATA = b"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
# { Testing }
'supported_by': 'core'}
"""
HASH_SYMBOL_METADATA = b"""
ANSIBLE_METADATA = {'metadata_version': '1.1 # 4',
'status': ['stableinterface'],
'supported_by': 'core # Testing '}
"""
HASH_SYMBOL_METADATA = b"""
ANSIBLE_METADATA = {'metadata_version': '1.1 # 4',
'status': ['stableinterface'],
'supported_by': 'core # Testing '}
"""
HASH_COMBO_METADATA = b"""
ANSIBLE_METADATA = {'metadata_version': '1.1 # 4',
'status': ['stableinterface'],
# { Testing }
'supported_by': 'core'} # { Testing }
"""
METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'}
HASH_SYMBOL_METADATA = {'metadata_version': '1.1 # 4', 'status': ['stableinterface'], 'supported_by': 'core'}
METADATA_EXAMPLES = (
# Standard import
(LICENSE + FUTURE_IMPORTS + STANDARD_METADATA + REGULAR_IMPORTS,
(METADATA, 5, 0, 7, 42, ['ANSIBLE_METADATA'])),
# Metadata at end of file
(LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + STANDARD_METADATA.rstrip(),
(METADATA, 8, 0, 10, 42, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file
(STANDARD_METADATA + LICENSE + REGULAR_IMPORTS,
(METADATA, 1, 0, 3, 42, ['ANSIBLE_METADATA'])),
# Standard import with a trailing comment
(LICENSE + FUTURE_IMPORTS + TRAILING_COMMENT_METADATA + REGULAR_IMPORTS,
(METADATA, 5, 0, 7, 42, ['ANSIBLE_METADATA'])),
# Metadata at end of file with a trailing comment
(LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + TRAILING_COMMENT_METADATA.rstrip(),
(METADATA, 8, 0, 10, 42, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file with a trailing comment
(TRAILING_COMMENT_METADATA + LICENSE + REGULAR_IMPORTS,
(METADATA, 1, 0, 3, 42, ['ANSIBLE_METADATA'])),
# FIXME: Current code cannot handle multiple statements on the same line.
# This is bad style so we're just going to ignore it for now
# Standard import with other statements on the same line
# (LICENSE + FUTURE_IMPORTS + MULTIPLE_STATEMENTS_METADATA + REGULAR_IMPORTS,
# (METADATA, 5, 0, 7, 42, ['ANSIBLE_METADATA'])),
# Metadata at end of file with other statements on the same line
# (LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + MULTIPLE_STATEMENTS_METADATA.rstrip(),
# (METADATA, 8, 0, 10, 42, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file with other statements on the same line
# (MULTIPLE_STATEMENTS_METADATA + LICENSE + REGULAR_IMPORTS,
# (METADATA, 1, 0, 3, 42, ['ANSIBLE_METADATA'])),
# Standard import with comment inside the metadata
(LICENSE + FUTURE_IMPORTS + EMBEDDED_COMMENT_METADATA + REGULAR_IMPORTS,
(METADATA, 5, 0, 8, 42, ['ANSIBLE_METADATA'])),
# Metadata at end of file with comment inside the metadata
(LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + EMBEDDED_COMMENT_METADATA.rstrip(),
(METADATA, 8, 0, 11, 42, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file with comment inside the metadata
(EMBEDDED_COMMENT_METADATA + LICENSE + REGULAR_IMPORTS,
(METADATA, 1, 0, 4, 42, ['ANSIBLE_METADATA'])),
# FIXME: Current code cannot handle hash symbols in the last element of
# the metadata. Fortunately, the metadata currently fully specifies all
# the strings inside of metadata and none of them can contain a hash.
# Need to fix this to future-proof it against strings containing hashes
# Standard import with hash symbol in metadata
# (LICENSE + FUTURE_IMPORTS + HASH_SYMBOL_METADATA + REGULAR_IMPORTS,
# (HASH_SYMBOL_METADATA, 5, 0, 7, 53, ['ANSIBLE_METADATA'])),
# Metadata at end of file with hash symbol in metadata
# (LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + HASH_SYMBOL_HASH_SYMBOL_METADATA.rstrip(),
# (HASH_SYMBOL_METADATA, 8, 0, 10, 53, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file with hash symbol in metadata
# (HASH_SYMBOL_HASH_SYMBOL_METADATA + LICENSE + REGULAR_IMPORTS,
# (HASH_SYMBOL_METADATA, 1, 0, 3, 53, ['ANSIBLE_METADATA'])),
# Standard import with a bunch of hashes everywhere
(LICENSE + FUTURE_IMPORTS + HASH_COMBO_METADATA + REGULAR_IMPORTS,
(HASH_SYMBOL_METADATA, 5, 0, 8, 42, ['ANSIBLE_METADATA'])),
# Metadata at end of file with a bunch of hashes everywhere
(LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + HASH_COMBO_METADATA.rstrip(),
(HASH_SYMBOL_METADATA, 8, 0, 11, 42, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file with a bunch of hashes everywhere
(HASH_COMBO_METADATA + LICENSE + REGULAR_IMPORTS,
(HASH_SYMBOL_METADATA, 1, 0, 4, 42, ['ANSIBLE_METADATA'])),
# Standard import with a junk ANSIBLE_METADATA as well
(LICENSE + FUTURE_IMPORTS + b"\nANSIBLE_METADATA = 10\n" + HASH_COMBO_METADATA + REGULAR_IMPORTS,
(HASH_SYMBOL_METADATA, 7, 0, 10, 42, ['ANSIBLE_METADATA'])),
)
# FIXME: String/yaml metadata is not implemented yet. Need more test cases once it is implemented
STRING_METADATA_EXAMPLES = (
# Standard import
(LICENSE + FUTURE_IMPORTS + TEXT_STD_METADATA + REGULAR_IMPORTS,
(METADATA, 5, 0, 10, 3, ['ANSIBLE_METADATA'])),
# Metadata at end of file
(LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + TEXT_STD_METADATA.rstrip(),
(METADATA, 8, 0, 13, 3, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file
(TEXT_STD_METADATA + LICENSE + REGULAR_IMPORTS,
(METADATA, 1, 0, 6, 3, ['ANSIBLE_METADATA'])),
# Standard import
(LICENSE + FUTURE_IMPORTS + BYTES_STD_METADATA + REGULAR_IMPORTS,
(METADATA, 5, 0, 10, 3, ['ANSIBLE_METADATA'])),
# Metadata at end of file
(LICENSE + FUTURE_IMPORTS + REGULAR_IMPORTS + BYTES_STD_METADATA.rstrip(),
(METADATA, 8, 0, 13, 3, ['ANSIBLE_METADATA'])),
# Metadata at beginning of file
(BYTES_STD_METADATA + LICENSE + REGULAR_IMPORTS,
(METADATA, 1, 0, 6, 3, ['ANSIBLE_METADATA'])),
)
@pytest.mark.parametrize("code, expected", METADATA_EXAMPLES)
def test_dict_metadata(code, expected):
assert md.extract_metadata(module_data=code, offsets=True) == expected
@pytest.mark.parametrize("code, expected", STRING_METADATA_EXAMPLES)
def test_string_metadata(code, expected):
# FIXME: String/yaml metadata is not implemented yet.
with pytest.raises(NotImplementedError):
assert md.extract_metadata(module_data=code, offsets=True) == expected
def test_required_params():
with pytest.raises(TypeError, message='One of module_ast or module_data must be given'):
assert md.extract_metadata()
def test_module_data_param_given_with_offset():
with pytest.raises(TypeError, message='If offsets is True then module_data must also be given'):
assert md.extract_metadata(module_ast='something', offsets=True)
def test_invalid_dict_metadata():
with pytest.raises(SyntaxError):
assert md.extract_metadata(module_data=LICENSE + FUTURE_IMPORTS + b'ANSIBLE_METADATA={"metadata_version": "1.1",\n' + REGULAR_IMPORTS)
with pytest.raises(md.ParseError, message='Unable to find the end of dictionary'):
assert md.extract_metadata(module_ast=ast.parse(LICENSE + FUTURE_IMPORTS + b'ANSIBLE_METADATA={"metadata_version": "1.1"}\n' + REGULAR_IMPORTS),
module_data=LICENSE + FUTURE_IMPORTS + b'ANSIBLE_METADATA={"metadata_version": "1.1",\n' + REGULAR_IMPORTS,
offsets=True)
def test_multiple_statements_limitation():
with pytest.raises(md.ParseError, message='Multiple statements per line confuses the module metadata parser.'):
assert md.extract_metadata(module_data=LICENSE + FUTURE_IMPORTS + b'ANSIBLE_METADATA={"metadata_version": "1.1"}; a=b\n' + REGULAR_IMPORTS,
offsets=True)
| gpl-3.0 | -5,016,550,505,235,944,000 | 40.841004 | 152 | 0.6579 | false |
bitkeeper/python-opcua | opcua/server/binary_server_asyncio.py | 1 | 4546 | """
Socket server forwarding request to internal server
"""
import logging
try:
# we prefer to use bundles asyncio version, otherwise fallback to trollius
import asyncio
except ImportError:
import trollius as asyncio
from opcua import ua
from opcua.server.uaprocessor import UaProcessor
logger = logging.getLogger(__name__)
class BinaryServer(object):
def __init__(self, internal_server, hostname, port):
self.logger = logging.getLogger(__name__)
self.hostname = hostname
self.port = port
self.iserver = internal_server
self.loop = internal_server.loop
self._server = None
self._policies = []
def set_policies(self, policies):
self._policies = policies
def start(self):
class OPCUAProtocol(asyncio.Protocol):
"""
instanciated for every connection
defined as internal class since it needs access
to the internal server object
FIXME: find another solution
"""
iserver = self.iserver
loop = self.loop
logger = self.logger
policies = self._policies
def connection_made(self, transport):
self.peername = transport.get_extra_info('peername')
self.logger.info('New connection from %s', self.peername)
self.transport = transport
self.processor = UaProcessor(self.iserver, self.transport)
self.processor.set_policies(self.policies)
self.data = b""
self.iserver.asyncio_transports.append(transport)
def connection_lost(self, ex):
self.logger.info('Lost connection from %s, %s', self.peername, ex)
self.transport.close()
self.iserver.asyncio_transports.remove(self.transport)
self.processor.close()
def data_received(self, data):
logger.debug("received %s bytes from socket", len(data))
if self.data:
data = self.data + data
self.data = b""
self._process_data(data)
def _process_data(self, data):
buf = ua.utils.Buffer(data)
while True:
try:
backup_buf = buf.copy()
try:
hdr = ua.Header.from_string(buf)
except ua.utils.NotEnoughData:
logger.info("We did not receive enough data from client, waiting for more")
self.data = backup_buf.read(len(backup_buf))
return
if len(buf) < hdr.body_size:
logger.info("We did not receive enough data from client, waiting for more")
self.data = backup_buf.read(len(backup_buf))
return
ret = self.processor.process(hdr, buf)
if not ret:
logger.info("processor returned False, we close connection from %s", self.peername)
self.transport.close()
return
if len(buf) == 0:
return
except Exception:
logger.exception("Exception raised while parsing message from client, closing")
return
coro = self.loop.create_server(OPCUAProtocol, self.hostname, self.port)
self._server = self.loop.run_coro_and_wait(coro)
# get the port and the hostname from the created server socket
# only relevant for dynamic port asignment (when self.port == 0)
if self.port == 0 and len(self._server.sockets) == 1:
# will work for AF_INET and AF_INET6 socket names
# these are to only families supported by the create_server call
sockname = self._server.sockets[0].getsockname()
self.hostname = sockname[0]
self.port = sockname[1]
print('Listening on {0}:{1}'.format(self.hostname, self.port))
def stop(self):
self.logger.info("Closing asyncio socket server")
for transport in self.iserver.asyncio_transports:
transport.close()
if self._server:
self.loop.call_soon(self._server.close)
self.loop.run_coro_and_wait(self._server.wait_closed())
| lgpl-3.0 | -1,068,711,869,721,022,200 | 38.877193 | 111 | 0.544655 | false |
Duoxilian/home-assistant | homeassistant/const.py | 3 | 12028 | # coding: utf-8
"""Constants used by Home Assistant components."""
MAJOR_VERSION = 0
MINOR_VERSION = 39
PATCH_VERSION = '0.dev0'
__short_version__ = '{}.{}'.format(MAJOR_VERSION, MINOR_VERSION)
__version__ = '{}.{}'.format(__short_version__, PATCH_VERSION)
REQUIRED_PYTHON_VER = (3, 4, 2)
REQUIRED_PYTHON_VER_WIN = (3, 5, 2)
PROJECT_NAME = 'Home Assistant'
PROJECT_PACKAGE_NAME = 'homeassistant'
PROJECT_LICENSE = 'Apache License 2.0'
PROJECT_AUTHOR = 'The Home Assistant Authors'
PROJECT_COPYRIGHT = ' 2013, {}'.format(PROJECT_AUTHOR)
PROJECT_URL = 'https://home-assistant.io/'
PROJECT_EMAIL = '[email protected]'
PROJECT_DESCRIPTION = ('Open-source home automation platform '
'running on Python 3.')
PROJECT_LONG_DESCRIPTION = ('Home Assistant is an open-source '
'home automation platform running on Python 3. '
'Track and control all devices at home and '
'automate control. '
'Installation in less than a minute.')
PROJECT_CLASSIFIERS = [
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Topic :: Home Automation'
]
PROJECT_GITHUB_USERNAME = 'home-assistant'
PROJECT_GITHUB_REPOSITORY = 'home-assistant'
PYPI_URL = 'https://pypi.python.org/pypi/{}'.format(PROJECT_PACKAGE_NAME)
GITHUB_PATH = '{}/{}'.format(PROJECT_GITHUB_USERNAME,
PROJECT_GITHUB_REPOSITORY)
GITHUB_URL = 'https://github.com/{}'.format(GITHUB_PATH)
PLATFORM_FORMAT = '{}.{}'
# Can be used to specify a catch all when registering state or event listeners.
MATCH_ALL = '*'
# If no name is specified
DEVICE_DEFAULT_NAME = 'Unnamed Device'
WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
SUN_EVENT_SUNSET = 'sunset'
SUN_EVENT_SUNRISE = 'sunrise'
# #### CONFIG ####
CONF_ABOVE = 'above'
CONF_ACCESS_TOKEN = 'access_token'
CONF_AFTER = 'after'
CONF_ALIAS = 'alias'
CONF_API_KEY = 'api_key'
CONF_AUTHENTICATION = 'authentication'
CONF_BASE = 'base'
CONF_BEFORE = 'before'
CONF_BELOW = 'below'
CONF_BINARY_SENSORS = 'binary_sensors'
CONF_BLACKLIST = 'blacklist'
CONF_BRIGHTNESS = 'brightness'
CONF_CODE = 'code'
CONF_COLOR_TEMP = 'color_temp'
CONF_COMMAND = 'command'
CONF_COMMAND_CLOSE = 'command_close'
CONF_COMMAND_OFF = 'command_off'
CONF_COMMAND_ON = 'command_on'
CONF_COMMAND_OPEN = 'command_open'
CONF_COMMAND_STATE = 'command_state'
CONF_COMMAND_STOP = 'command_stop'
CONF_CONDITION = 'condition'
CONF_COVERS = 'covers'
CONF_CUSTOMIZE = 'customize'
CONF_CUSTOMIZE_DOMAIN = 'customize_domain'
CONF_CUSTOMIZE_GLOB = 'customize_glob'
CONF_DEVICE = 'device'
CONF_DEVICE_CLASS = 'device_class'
CONF_DEVICES = 'devices'
CONF_DISARM_AFTER_TRIGGER = 'disarm_after_trigger'
CONF_DISCOVERY = 'discovery'
CONF_DISPLAY_OPTIONS = 'display_options'
CONF_DOMAIN = 'domain'
CONF_DOMAINS = 'domains'
CONF_ELEVATION = 'elevation'
CONF_EMAIL = 'email'
CONF_ENTITIES = 'entities'
CONF_ENTITY_ID = 'entity_id'
CONF_ENTITY_NAMESPACE = 'entity_namespace'
CONF_EVENT = 'event'
CONF_EXCLUDE = 'exclude'
CONF_FILE_PATH = 'file_path'
CONF_FILENAME = 'filename'
CONF_FRIENDLY_NAME = 'friendly_name'
CONF_HEADERS = 'headers'
CONF_HOST = 'host'
CONF_HOSTS = 'hosts'
CONF_ICON = 'icon'
CONF_INCLUDE = 'include'
CONF_ID = 'id'
CONF_LATITUDE = 'latitude'
CONF_LONGITUDE = 'longitude'
CONF_MAC = 'mac'
CONF_METHOD = 'method'
CONF_MINIMUM = 'minimum'
CONF_MAXIMUM = 'maximum'
CONF_MONITORED_CONDITIONS = 'monitored_conditions'
CONF_MONITORED_VARIABLES = 'monitored_variables'
CONF_NAME = 'name'
CONF_OFFSET = 'offset'
CONF_OPTIMISTIC = 'optimistic'
CONF_PACKAGES = 'packages'
CONF_PASSWORD = 'password'
CONF_PATH = 'path'
CONF_PAYLOAD = 'payload'
CONF_PAYLOAD_OFF = 'payload_off'
CONF_PAYLOAD_ON = 'payload_on'
CONF_PENDING_TIME = 'pending_time'
CONF_PIN = 'pin'
CONF_PLATFORM = 'platform'
CONF_PORT = 'port'
CONF_PREFIX = 'prefix'
CONF_PROTOCOL = 'protocol'
CONF_QUOTE = 'quote'
CONF_RECIPIENT = 'recipient'
CONF_RESOURCE = 'resource'
CONF_RESOURCES = 'resources'
CONF_RGB = 'rgb'
CONF_SCAN_INTERVAL = 'scan_interval'
CONF_SENDER = 'sender'
CONF_SENSOR_CLASS = 'sensor_class'
CONF_SENSORS = 'sensors'
CONF_SSL = 'ssl'
CONF_STATE = 'state'
CONF_STRUCTURE = 'structure'
CONF_SWITCHES = 'switches'
CONF_TEMPERATURE_UNIT = 'temperature_unit'
CONF_TIME_ZONE = 'time_zone'
CONF_TIMEOUT = 'timeout'
CONF_TOKEN = 'token'
CONF_TRIGGER_TIME = 'trigger_time'
CONF_TYPE = 'type'
CONF_UNIT_OF_MEASUREMENT = 'unit_of_measurement'
CONF_UNIT_SYSTEM = 'unit_system'
CONF_URL = 'url'
CONF_USERNAME = 'username'
CONF_VALUE_TEMPLATE = 'value_template'
CONF_VERIFY_SSL = 'verify_ssl'
CONF_WEEKDAY = 'weekday'
CONF_WHITELIST = 'whitelist'
CONF_ZONE = 'zone'
# #### EVENTS ####
EVENT_HOMEASSISTANT_START = 'homeassistant_start'
EVENT_HOMEASSISTANT_STOP = 'homeassistant_stop'
EVENT_HOMEASSISTANT_CLOSE = 'homeassistant_close'
EVENT_STATE_CHANGED = 'state_changed'
EVENT_TIME_CHANGED = 'time_changed'
EVENT_CALL_SERVICE = 'call_service'
EVENT_SERVICE_EXECUTED = 'service_executed'
EVENT_PLATFORM_DISCOVERED = 'platform_discovered'
EVENT_COMPONENT_LOADED = 'component_loaded'
EVENT_SERVICE_REGISTERED = 'service_registered'
# #### STATES ####
STATE_ON = 'on'
STATE_OFF = 'off'
STATE_HOME = 'home'
STATE_NOT_HOME = 'not_home'
STATE_UNKNOWN = 'unknown'
STATE_OPEN = 'open'
STATE_CLOSED = 'closed'
STATE_PLAYING = 'playing'
STATE_PAUSED = 'paused'
STATE_IDLE = 'idle'
STATE_STANDBY = 'standby'
STATE_ALARM_DISARMED = 'disarmed'
STATE_ALARM_ARMED_HOME = 'armed_home'
STATE_ALARM_ARMED_AWAY = 'armed_away'
STATE_ALARM_PENDING = 'pending'
STATE_ALARM_TRIGGERED = 'triggered'
STATE_LOCKED = 'locked'
STATE_UNLOCKED = 'unlocked'
STATE_UNAVAILABLE = 'unavailable'
# #### STATE AND EVENT ATTRIBUTES ####
# Attribution
ATTR_ATTRIBUTION = 'attribution'
# Contains current time for a TIME_CHANGED event
ATTR_NOW = 'now'
# Contains domain, service for a SERVICE_CALL event
ATTR_DOMAIN = 'domain'
ATTR_SERVICE = 'service'
ATTR_SERVICE_DATA = 'service_data'
# Data for a SERVICE_EXECUTED event
ATTR_SERVICE_CALL_ID = 'service_call_id'
# Contains one string or a list of strings, each being an entity id
ATTR_ENTITY_ID = 'entity_id'
# String with a friendly name for the entity
ATTR_FRIENDLY_NAME = 'friendly_name'
# A picture to represent entity
ATTR_ENTITY_PICTURE = 'entity_picture'
# Icon to use in the frontend
ATTR_ICON = 'icon'
# The unit of measurement if applicable
ATTR_UNIT_OF_MEASUREMENT = 'unit_of_measurement'
CONF_UNIT_SYSTEM_METRIC = 'metric' # type: str
CONF_UNIT_SYSTEM_IMPERIAL = 'imperial' # type: str
# Temperature attribute
ATTR_TEMPERATURE = 'temperature'
TEMP_CELSIUS = '°C'
TEMP_FAHRENHEIT = '°F'
# Length units
LENGTH_CENTIMETERS = 'cm' # type: str
LENGTH_METERS = 'm' # type: str
LENGTH_KILOMETERS = 'km' # type: str
LENGTH_INCHES = 'in' # type: str
LENGTH_FEET = 'ft' # type: str
LENGTH_YARD = 'yd' # type: str
LENGTH_MILES = 'mi' # type: str
# Volume units
VOLUME_LITERS = 'L' # type: str
VOLUME_MILLILITERS = 'mL' # type: str
VOLUME_GALLONS = 'gal' # type: str
VOLUME_FLUID_OUNCE = 'fl. oz.' # type: str
# Mass units
MASS_GRAMS = 'g' # type: str
MASS_KILOGRAMS = 'kg' # type: str
MASS_OUNCES = 'oz' # type: str
MASS_POUNDS = 'lb' # type: str
# Contains the information that is discovered
ATTR_DISCOVERED = 'discovered'
# Location of the device/sensor
ATTR_LOCATION = 'location'
ATTR_BATTERY_LEVEL = 'battery_level'
ATTR_WAKEUP = 'wake_up_interval'
# For devices which support a code attribute
ATTR_CODE = 'code'
ATTR_CODE_FORMAT = 'code_format'
# For devices which support an armed state
ATTR_ARMED = 'device_armed'
# For devices which support a locked state
ATTR_LOCKED = 'locked'
# For sensors that support 'tripping', eg. motion and door sensors
ATTR_TRIPPED = 'device_tripped'
# For sensors that support 'tripping' this holds the most recent
# time the device was tripped
ATTR_LAST_TRIP_TIME = 'last_tripped_time'
# For all entity's, this hold whether or not it should be hidden
ATTR_HIDDEN = 'hidden'
# Location of the entity
ATTR_LATITUDE = 'latitude'
ATTR_LONGITUDE = 'longitude'
# Accuracy of location in meters
ATTR_GPS_ACCURACY = 'gps_accuracy'
# If state is assumed
ATTR_ASSUMED_STATE = 'assumed_state'
ATTR_STATE = 'state'
ATTR_OPTION = 'option'
# Bitfield of supported component features for the entity
ATTR_SUPPORTED_FEATURES = 'supported_features'
# Class of device within its domain
ATTR_DEVICE_CLASS = 'device_class'
# #### SERVICES ####
SERVICE_HOMEASSISTANT_STOP = 'stop'
SERVICE_HOMEASSISTANT_RESTART = 'restart'
SERVICE_TURN_ON = 'turn_on'
SERVICE_TURN_OFF = 'turn_off'
SERVICE_TOGGLE = 'toggle'
SERVICE_VOLUME_UP = 'volume_up'
SERVICE_VOLUME_DOWN = 'volume_down'
SERVICE_VOLUME_MUTE = 'volume_mute'
SERVICE_VOLUME_SET = 'volume_set'
SERVICE_MEDIA_PLAY_PAUSE = 'media_play_pause'
SERVICE_MEDIA_PLAY = 'media_play'
SERVICE_MEDIA_PAUSE = 'media_pause'
SERVICE_MEDIA_STOP = 'media_stop'
SERVICE_MEDIA_NEXT_TRACK = 'media_next_track'
SERVICE_MEDIA_PREVIOUS_TRACK = 'media_previous_track'
SERVICE_MEDIA_SEEK = 'media_seek'
SERVICE_ALARM_DISARM = 'alarm_disarm'
SERVICE_ALARM_ARM_HOME = 'alarm_arm_home'
SERVICE_ALARM_ARM_AWAY = 'alarm_arm_away'
SERVICE_ALARM_TRIGGER = 'alarm_trigger'
SERVICE_LOCK = 'lock'
SERVICE_UNLOCK = 'unlock'
SERVICE_OPEN = 'open'
SERVICE_CLOSE = 'close'
SERVICE_CLOSE_COVER = 'close_cover'
SERVICE_CLOSE_COVER_TILT = 'close_cover_tilt'
SERVICE_OPEN_COVER = 'open_cover'
SERVICE_OPEN_COVER_TILT = 'open_cover_tilt'
SERVICE_SET_COVER_POSITION = 'set_cover_position'
SERVICE_SET_COVER_TILT_POSITION = 'set_cover_tilt_position'
SERVICE_STOP_COVER = 'stop_cover'
SERVICE_STOP_COVER_TILT = 'stop_cover_tilt'
SERVICE_SELECT_OPTION = 'select_option'
# #### API / REMOTE ####
SERVER_PORT = 8123
URL_ROOT = '/'
URL_API = '/api/'
URL_API_STREAM = '/api/stream'
URL_API_CONFIG = '/api/config'
URL_API_DISCOVERY_INFO = '/api/discovery_info'
URL_API_STATES = '/api/states'
URL_API_STATES_ENTITY = '/api/states/{}'
URL_API_EVENTS = '/api/events'
URL_API_EVENTS_EVENT = '/api/events/{}'
URL_API_SERVICES = '/api/services'
URL_API_SERVICES_SERVICE = '/api/services/{}/{}'
URL_API_EVENT_FORWARD = '/api/event_forwarding'
URL_API_COMPONENTS = '/api/components'
URL_API_ERROR_LOG = '/api/error_log'
URL_API_LOG_OUT = '/api/log_out'
URL_API_TEMPLATE = '/api/template'
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_MOVED_PERMANENTLY = 301
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_UNPROCESSABLE_ENTITY = 422
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_BASIC_AUTHENTICATION = 'basic'
HTTP_DIGEST_AUTHENTICATION = 'digest'
HTTP_HEADER_HA_AUTH = 'X-HA-access'
HTTP_HEADER_ACCEPT_ENCODING = 'Accept-Encoding'
HTTP_HEADER_CONTENT_TYPE = 'Content-type'
HTTP_HEADER_CONTENT_ENCODING = 'Content-Encoding'
HTTP_HEADER_VARY = 'Vary'
HTTP_HEADER_CONTENT_LENGTH = 'Content-Length'
HTTP_HEADER_CACHE_CONTROL = 'Cache-Control'
HTTP_HEADER_EXPIRES = 'Expires'
HTTP_HEADER_ORIGIN = 'Origin'
HTTP_HEADER_X_REQUESTED_WITH = 'X-Requested-With'
HTTP_HEADER_ACCEPT = 'Accept'
HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN = 'Access-Control-Allow-Origin'
HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS = 'Access-Control-Allow-Headers'
ALLOWED_CORS_HEADERS = [HTTP_HEADER_ORIGIN, HTTP_HEADER_ACCEPT,
HTTP_HEADER_X_REQUESTED_WITH, HTTP_HEADER_CONTENT_TYPE,
HTTP_HEADER_HA_AUTH]
CONTENT_TYPE_JSON = 'application/json'
CONTENT_TYPE_MULTIPART = 'multipart/x-mixed-replace; boundary={}'
CONTENT_TYPE_TEXT_PLAIN = 'text/plain'
# The exit code to send to request a restart
RESTART_EXIT_CODE = 100
UNIT_NOT_RECOGNIZED_TEMPLATE = '{} is not a recognized {} unit.' # type: str
LENGTH = 'length' # type: str
MASS = 'mass' # type: str
VOLUME = 'volume' # type: str
TEMPERATURE = 'temperature' # type: str
SPEED_MS = 'speed_ms' # type: str
ILLUMINANCE = 'illuminance' # type: str
| mit | 8,477,853,142,511,983,000 | 28.47549 | 79 | 0.708216 | false |
lgscofield/odoo | addons/survey/wizard/__init__.py | 385 | 1026 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import survey_email_compose_message
| agpl-3.0 | 7,533,629,147,491,453,000 | 45.636364 | 78 | 0.611111 | false |
marcel-dancak/QGIS | tests/src/python/test_qgsrasterbandcombobox.py | 23 | 3809 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRasterBandComboBox.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '09/05/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.core import QgsRasterLayer
from qgis.gui import QgsRasterBandComboBox
from qgis.testing import start_app, unittest
from qgis.PyQt.QtCore import QFileInfo
from qgis.PyQt.QtTest import QSignalSpy
from utilities import unitTestDataPath
start_app()
class TestQgsRasterBandComboBox(unittest.TestCase):
def testNoLayer(self):
"""
Test widget with no layer
"""
combo = QgsRasterBandComboBox()
self.assertFalse(combo.layer())
self.assertEqual(combo.currentBand(), -1)
combo.setShowNotSetOption(True)
self.assertEqual(combo.currentBand(), -1)
combo.setBand(11111)
self.assertEqual(combo.currentBand(), -1)
combo.setBand(-11111)
self.assertEqual(combo.currentBand(), -1)
def testOneBandRaster(self):
path = os.path.join(unitTestDataPath('raster'),
'band1_float32_noct_epsg4326.tif')
info = QFileInfo(path)
base_name = info.baseName()
layer = QgsRasterLayer(path, base_name)
self.assertTrue(layer)
combo = QgsRasterBandComboBox()
combo.setLayer(layer)
self.assertEqual(combo.layer(), layer)
self.assertEqual(combo.currentBand(), 1)
self.assertEqual(combo.count(), 1)
combo.setShowNotSetOption(True)
self.assertEqual(combo.currentBand(), 1)
self.assertEqual(combo.count(), 2)
combo.setBand(-1)
self.assertEqual(combo.currentBand(), -1)
combo.setBand(1)
self.assertEqual(combo.currentBand(), 1)
combo.setShowNotSetOption(False)
self.assertEqual(combo.currentBand(), 1)
self.assertEqual(combo.count(), 1)
def testMultiBandRaster(self):
path = os.path.join(unitTestDataPath('raster'),
'band3_float32_noct_epsg4326.tif')
info = QFileInfo(path)
base_name = info.baseName()
layer = QgsRasterLayer(path, base_name)
self.assertTrue(layer)
combo = QgsRasterBandComboBox()
combo.setLayer(layer)
self.assertEqual(combo.layer(), layer)
self.assertEqual(combo.currentBand(), 1)
self.assertEqual(combo.count(), 3)
combo.setBand(2)
self.assertEqual(combo.currentBand(), 2)
combo.setShowNotSetOption(True)
self.assertEqual(combo.currentBand(), 2)
self.assertEqual(combo.count(), 4)
combo.setShowNotSetOption(False)
self.assertEqual(combo.currentBand(), 2)
self.assertEqual(combo.count(), 3)
def testSignals(self):
path = os.path.join(unitTestDataPath('raster'),
'band3_float32_noct_epsg4326.tif')
info = QFileInfo(path)
base_name = info.baseName()
layer = QgsRasterLayer(path, base_name)
self.assertTrue(layer)
combo = QgsRasterBandComboBox()
combo.setLayer(layer)
signal_spy = QSignalSpy(combo.bandChanged)
combo.setBand(2)
self.assertEqual(len(signal_spy), 1)
self.assertEqual(signal_spy[0][0], 2)
combo.setBand(3)
self.assertEqual(len(signal_spy), 2)
self.assertEqual(signal_spy[1][0], 3)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -3,210,299,619,995,867,600 | 31.279661 | 78 | 0.640326 | false |
MartyParty21/AwakenDreamsClient | mcp/runtime/startserver.py | 3 | 1524 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 8 16:54:36 2011
@author: ProfMobius
@version: v1.0
"""
import sys
import logging
import json
from optparse import OptionParser
from commands import Commands, SERVER
def main():
parser = OptionParser(version='MCP %s' % Commands.fullversion())
parser.add_option('-c', '--config', dest='config', help='additional configuration file')
parser.add_option('-m', '--main', dest='mainclass', help='Main class to start', default='net.minecraft.server.MinecraftServer')
parser.add_option('-j', '--json', dest='json',action='store_true', help='Use the json file to setup parameters', default=False)
options, _ = parser.parse_args()
startserver(options.config, options.mainclass, options.json)
def startserver(conffile, mainclass, jsonoverride):
try:
commands = Commands(conffile)
#if not mainclass:
# mainclass = "net.minecraft.server.MinecraftServer"
extraargs = ""
if jsonoverride:
jsonData = json.load(open(commands.jsonFile))
mainclass = jsonData['mainClass']
extraargs = jsonData['minecraftArguments']
if not commands.checkbins(SERVER):
commands.logger.warning('!! Can not find server bins !!')
sys.exit(1)
commands.startserver(mainclass, extraargs)
except Exception: # pylint: disable-msg=W0703
logging.exception('FATAL ERROR')
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,415,083,498,612,544,500 | 29.48 | 131 | 0.649606 | false |
jinnykoo/wuyisj | tests/unit/core/customisation_tests.py | 5 | 3068 | import os
import tempfile
from django.test import TestCase
from django.conf import settings
from oscar.core import customisation
VALID_FOLDER_PATH = 'tests/_site/apps'
class TestUtilities(TestCase):
def test_subfolder_extraction(self):
folders = list(customisation.subfolders('/var/www/eggs'))
self.assertEqual(folders, ['/var', '/var/www', '/var/www/eggs'])
class TestForkAppFunction(TestCase):
def setUp(self):
self.tmp_folder = tempfile.mkdtemp()
def test_raises_exception_for_nonexistant_app_label(self):
with self.assertRaises(ValueError):
customisation.fork_app('sillytown', 'somefolder')
def test_raises_exception_if_app_has_already_been_forked(self):
# We piggyback on another test which means a custom app is already in
# the settings we use for the test suite. We just check that's still
# the case here.
assert 'tests._site.apps.partner' in settings.INSTALLED_APPS
with self.assertRaises(ValueError):
customisation.fork_app('partner', VALID_FOLDER_PATH)
def test_creates_new_folder(self):
customisation.fork_app('order', self.tmp_folder)
new_folder_path = os.path.join(self.tmp_folder, 'order')
self.assertTrue(os.path.exists(new_folder_path))
def test_creates_init_file(self):
customisation.fork_app('order', self.tmp_folder)
filepath = os.path.join(self.tmp_folder, 'order', '__init__.py')
self.assertTrue(os.path.exists(filepath))
def test_handles_dashboard_app(self):
# Dashboard apps are fiddly as they aren't identified by a single app
# label.
customisation.fork_app('dashboard.catalogue', self.tmp_folder)
# Check __init__.py created (and supporting folders)
init_path = os.path.join(self.tmp_folder,
'dashboard/catalogue/__init__.py')
self.assertTrue(os.path.exists(init_path))
def test_creates_models_and_admin_file(self):
customisation.fork_app('order', self.tmp_folder)
for module, expected_string in [
('models', 'from oscar.apps.order.models import *'),
('admin', 'from oscar.apps.order.admin import *'),
('config', 'OrderConfig')]:
filepath = os.path.join(self.tmp_folder, 'order', '%s.py' % module)
self.assertTrue(os.path.exists(filepath))
contents = open(filepath).read()
self.assertTrue(expected_string in contents)
def test_copies_in_migrations_when_needed(self):
for app, has_models in [('order', True), ('search', False)]:
customisation.fork_app(app, self.tmp_folder)
native_migration_path = os.path.join(
self.tmp_folder, app, 'migrations')
self.assertEqual(has_models, os.path.exists(native_migration_path))
south_migration_path = os.path.join(
self.tmp_folder, app, 'south_migrations')
self.assertEqual(has_models, os.path.exists(south_migration_path))
| bsd-3-clause | 772,085,229,619,327,900 | 39.368421 | 79 | 0.64309 | false |
nex3/pygments | pygments/styles/vim.py | 75 | 1976 | # -*- coding: utf-8 -*-
"""
pygments.styles.vim
~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by vim.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Token
class VimStyle(Style):
"""
Styles somewhat like vim 7.0
"""
background_color = "#000000"
highlight_color = "#222222"
default_style = "#cccccc"
styles = {
Token: "#cccccc",
Whitespace: "",
Comment: "#000080",
Comment.Preproc: "",
Comment.Special: "bold #cd0000",
Keyword: "#cdcd00",
Keyword.Declaration: "#00cd00",
Keyword.Namespace: "#cd00cd",
Keyword.Pseudo: "",
Keyword.Type: "#00cd00",
Operator: "#3399cc",
Operator.Word: "#cdcd00",
Name: "",
Name.Class: "#00cdcd",
Name.Builtin: "#cd00cd",
Name.Exception: "bold #666699",
Name.Variable: "#00cdcd",
String: "#cd0000",
Number: "#cd00cd",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#cd0000",
Generic.Inserted: "#00cd00",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
| bsd-2-clause | -260,744,558,528,395,100 | 30.365079 | 70 | 0.448887 | false |
babelphish/fridge-cop | fridge_language.py | 1 | 1219 | import random
def get_fridge_language(language):
if (language == "EN"):
return EnglishFridgeLanguage()
class FridgeLanguage:
def _init_(self):
random.seed()
def get_random_letter(self):
value = random.random() * 100
total = 0
for letter_frequency in self.distribution:
letter = letter_frequency[1]
total += letter_frequency[0]
if (value < total):
break
return letter
class EnglishFridgeLanguage(FridgeLanguage):
distribution = [
[ 13.0001 , 'E'],
[ 9.056 , 'T'],
[ 8.167 , 'A'],
[ 7.507 , 'O'],
[ 6.966 , 'I'],
[ 6.749 , 'N'],
[ 6.327 , 'S'],
[ 6.094 , 'H'],
[ 5.987 , 'R'],
[ 4.253 , 'D'],
[ 4.025 , 'L'],
[ 2.782 , 'C'],
[ 2.758 , 'U'],
[ 2.406 , 'M'],
[ 2.360 , 'W'],
[ 2.228 , 'F'],
[ 2.015 , 'G'],
[ 1.974 , 'Y'],
[ 1.929 , 'P'],
[ 1.492 , 'B'],
[ 0.978 , 'V'],
[ 0.772 , 'K'],
[ 0.153 , 'J'],
[ 0.150 , 'X'],
[ 0.095 , 'Q'],
[ 0.074 , 'Z']
]
| agpl-3.0 | 2,068,443,251,326,739,200 | 22.901961 | 50 | 0.376538 | false |
yaojingwu1992/XlsxWriter | xlsxwriter/test/comparison/test_cond_format07.py | 8 | 2470 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'cond_format07.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({'bg_color': '#FF0000', 'dxf_index': 1})
format2 = workbook.add_format({'bg_color': '#92D050', 'dxf_index': 0})
data = [
[90, 80, 50, 10, 20, 90, 40, 90, 30, 40],
[20, 10, 90, 100, 30, 60, 70, 60, 50, 90],
[10, 50, 60, 50, 20, 50, 80, 30, 40, 60],
[10, 90, 20, 40, 10, 40, 50, 70, 90, 50],
[70, 100, 10, 90, 10, 10, 20, 100, 100, 40],
[20, 60, 10, 100, 30, 10, 20, 60, 100, 10],
[10, 60, 10, 80, 100, 80, 30, 30, 70, 40],
[30, 90, 60, 10, 10, 100, 40, 40, 30, 40],
[80, 90, 10, 20, 20, 50, 80, 20, 60, 90],
[60, 80, 30, 30, 10, 50, 80, 60, 50, 30],
]
for row, row_data in enumerate(data):
worksheet.write_row(row, 0, row_data)
row += 1
worksheet.conditional_format('A1:J10',
{'type': 'cell',
'format': format1,
'criteria': '>=',
'value': 50,
})
worksheet.conditional_format('A1:J10',
{'type': 'cell',
'format': format2,
'criteria': '<',
'value': 50,
})
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | 4,847,854,250,150,649,000 | 32.835616 | 88 | 0.444534 | false |
duointeractive/media-nommer | media_nommer/ec2nommerd/node_state.py | 1 | 7367 | """
Contains the :py:class:`NodeStateManager` class, which is an abstraction layer
for storing and communicating the status of EC2_ nodes.
"""
import urllib2
import datetime
import boto
from twisted.internet import reactor
from media_nommer.conf import settings
from media_nommer.utils import logger
from media_nommer.utils.compat import total_seconds
class NodeStateManager(object):
"""
Tracks this node's state, reports it to :doc:`../feederd`, and terminates
itself if certain conditions of inactivity are met.
"""
last_dtime_i_did_something = datetime.datetime.now()
# Used for lazy-loading the SDB connection. Do not refer to directly.
__aws_sdb_connection = None
# Used for lazy-loading the SDB domain. Do not refer to directly.
__aws_sdb_nommer_state_domain = None
# Used for lazy-loading the EC2 connection. Do not refer to directly.
__aws_ec2_connection = None
# Store the instance ID for this EC2 node (if not local).
__instance_id = None
@classmethod
def _aws_ec2_connection(cls):
"""
Lazy-loading of the EC2 boto connection. Refer to this instead of
referencing cls.__aws_ec2_connection directly.
:returns: A boto connection to Amazon's EC2 interface.
"""
if not cls.__aws_ec2_connection:
cls.__aws_ec2_connection = boto.connect_ec2(
settings.AWS_ACCESS_KEY_ID,
settings.AWS_SECRET_ACCESS_KEY)
return cls.__aws_ec2_connection
@classmethod
def _aws_sdb_connection(cls):
"""
Lazy-loading of the SimpleDB boto connection. Refer to this instead of
referencing cls.__aws_sdb_connection directly.
:returns: A boto connection to Amazon's SimpleDB interface.
"""
if not cls.__aws_sdb_connection:
cls.__aws_sdb_connection = boto.connect_sdb(
settings.AWS_ACCESS_KEY_ID,
settings.AWS_SECRET_ACCESS_KEY)
return cls.__aws_sdb_connection
@classmethod
def _aws_sdb_nommer_state_domain(cls):
"""
Lazy-loading of the SimpleDB boto domain. Refer to this instead of
referencing cls.__aws_sdb_nommer_state_domain directly.
:returns: A boto SimpleDB domain for this workflow.
"""
if not cls.__aws_sdb_nommer_state_domain:
cls.__aws_sdb_nommer_state_domain = cls._aws_sdb_connection().create_domain(
settings.SIMPLEDB_EC2_NOMMER_STATE_DOMAIN)
return cls.__aws_sdb_nommer_state_domain
@classmethod
def get_instance_id(cls, is_local=False):
"""
Determine this EC2 instance's unique instance ID. Lazy load this, and
avoid further re-queries after the first one.
:param bool is_local: When True, don't try to hit EC2's meta data server,
When False, just make up a unique ID.
:rtype: str
:returns: The EC2 instance's ID.
"""
if not cls.__instance_id:
if is_local:
cls.__instance_id = 'local-dev'
else:
aws_meta_url = 'http://169.254.169.254/latest/meta-data/instance-id'
response = urllib2.urlopen(aws_meta_url)
cls.__instance_id = response.read()
return cls.__instance_id
@classmethod
def is_ec2_instance(cls):
"""
Determine whether this is an EC2 instance or not.
:rtype: bool
:returns: ``True`` if this is an EC2 instance, ``False`` if otherwise.
"""
return cls.get_instance_id() != 'local-dev'
@classmethod
def send_instance_state_update(cls, state='ACTIVE'):
"""
Sends a status update to feederd through SimpleDB. Lets the daemon
know how many jobs this instance is crunching right now. Also updates
a timestamp field to let feederd know how long it has been since the
instance's last check-in.
:keyword str state: If this EC2_ instance is anything but ``ACTIVE``,
pass the state here. This is useful during node termination.
"""
if cls.is_ec2_instance():
instance_id = cls.get_instance_id()
item = cls._aws_sdb_nommer_state_domain().new_item(instance_id)
item['id'] = instance_id
item['active_jobs'] = cls.get_num_active_threads() - 1
item['last_report_dtime'] = datetime.datetime.now()
item['state'] = state
item.save()
@classmethod
def contemplate_termination(cls, thread_count_mod=0):
"""
Looks at how long it's been since this worker has done something, and
decides whether to self-terminate.
:param int thread_count_mod: Add this to the amount returned by the call
to :py:meth:`get_num_active_threads`. This is useful when calling
this method from a non-encoder thread.
:rtype: bool
:returns: ``True`` if this instance terminated itself, ``False``
if not.
"""
if not cls.is_ec2_instance():
# Developing locally, don't go here.
return False
# This is -1 since this is also a thread doing the contemplation.
# This would always be 1, even if we had no jobs encoding, if we
# didn't take into account this thread.
num_active_threads = cls.get_num_active_threads() + thread_count_mod
if num_active_threads > 0:
# Encoding right now, don't terminate.
return False
tdelt = datetime.datetime.now() - cls.last_dtime_i_did_something
# Total seconds of inactivity.
inactive_secs = total_seconds(tdelt)
# If we're over the inactivity threshold...
if inactive_secs > settings.NOMMERD_MAX_INACTIVITY:
instance_id = cls.get_instance_id()
conn = cls._aws_ec2_connection()
# Find this particular EC2 instance via boto.
reservations = conn.get_all_instances(instance_ids=[instance_id])
# This should only be one match, but in the interest of
# playing along...
for reservation in reservations:
for instance in reservation.instances:
# Here's the instance, terminate it.
logger.info("Goodbye, cruel world.")
cls.send_instance_state_update(state='TERMINATED')
instance.terminate()
# Seeya later!
return True
# Continue existence, no termination.
return False
@classmethod
def get_num_active_threads(cls):
"""
Checks the reactor's threadpool to see how many threads are currently
working. This can be used to determine how busy this node is.
:rtype: int
:returns: The number of active threads.
"""
return len(reactor.getThreadPool().working)
@classmethod
def i_did_something(cls):
"""
Pat ourselves on the back each time we do something.
Used for determining whether this node's continued existence is
necessary anymore in :py:meth:`contemplate_termination`.
"""
cls.last_dtime_i_did_something = datetime.datetime.now()
| bsd-3-clause | 859,487,332,232,769,000 | 37.773684 | 88 | 0.606217 | false |
javierTerry/odoo | addons/point_of_sale/__openerp__.py | 261 | 3612 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Point of Sale',
'version': '1.0.1',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Touchscreen Interface for Shops',
'description': """
Quick and Easy sale process
===========================
This module allows you to manage your shop sales very easily with a fully web based touchscreen interface.
It is compatible with all PC tablets and the iPad, offering multiple payment methods.
Product selection can be done in several ways:
* Using a barcode reader
* Browsing through categories of products or via a text search.
Main Features
-------------
* Fast encoding of the sale
* Choose one payment method (the quick way) or split the payment between several payment methods
* Computation of the amount of money to return
* Create and confirm the picking list automatically
* Allows the user to create an invoice automatically
* Refund previous sales
""",
'author': 'OpenERP SA',
'depends': ['sale_stock'],
'data': [
'data/report_paperformat.xml',
'security/point_of_sale_security.xml',
'security/ir.model.access.csv',
'wizard/pos_box.xml',
'wizard/pos_confirm.xml',
'wizard/pos_details.xml',
'wizard/pos_discount.xml',
'wizard/pos_open_statement.xml',
'wizard/pos_payment.xml',
'wizard/pos_session_opening.xml',
'views/templates.xml',
'point_of_sale_report.xml',
'point_of_sale_view.xml',
'point_of_sale_sequence.xml',
'point_of_sale_data.xml',
'report/pos_order_report_view.xml',
'point_of_sale_workflow.xml',
'account_statement_view.xml',
'account_statement_report.xml',
'res_users_view.xml',
'res_partner_view.xml',
'views/report_statement.xml',
'views/report_usersproduct.xml',
'views/report_receipt.xml',
'views/report_saleslines.xml',
'views/report_detailsofsales.xml',
'views/report_payment.xml',
'views/report_sessionsummary.xml',
'views/point_of_sale.xml',
],
'demo': [
'point_of_sale_demo.xml',
'account_statement_demo.xml',
],
'test': [
'test/00_register_open.yml',
'test/01_order_to_payment.yml',
'test/02_order_to_invoice.yml',
'test/point_of_sale_report.yml',
'test/account_statement_reports.yml',
],
'installable': True,
'application': True,
'qweb': ['static/src/xml/pos.xml'],
'website': 'https://www.odoo.com/page/point-of-sale',
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,499,701,014,386,350,000 | 35.12 | 106 | 0.616002 | false |
ssh1/stbgui | lib/python/Components/Converter/PliExtraInfo.py | 4 | 12709 | # shamelessly copied from pliExpertInfo (Vali, Mirakels, Littlesat)
from enigma import iServiceInformation, iPlayableService
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.config import config
from Tools.Transponder import ConvertToHumanReadable, getChannelNumber
from Tools.GetEcmInfo import GetEcmInfo
from Poll import Poll
def addspace(text):
if text:
text += " "
return text
class PliExtraInfo(Poll, Converter, object):
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.type = type
self.poll_interval = 1000
self.poll_enabled = True
self.caid_data = (
( "0x100", "0x1ff", "Seca", "S", True ),
( "0x500", "0x5ff", "Via", "V", True ),
( "0x600", "0x6ff", "Irdeto", "I", True ),
( "0x900", "0x9ff", "NDS", "Nd", True ),
( "0xb00", "0xbff", "Conax", "Co", True ),
( "0xd00", "0xdff", "CryptoW", "Cw", True ),
( "0xe00", "0xeff", "PowerVU", "P", False ),
("0x1700", "0x17ff", "Beta", "B", True ),
("0x1800", "0x18ff", "Nagra", "N", True ),
("0x2600", "0x2600", "Biss", "Bi", False ),
("0x4ae0", "0x4ae1", "Dre", "D", False ),
("0x4aee", "0x4aee", "BulCrypt", "B1", False ),
("0x5581", "0x5581", "BulCrypt", "B2", False )
)
self.ca_table = (
("CryptoCaidSecaAvailable", "S", False),
("CryptoCaidViaAvailable", "V", False),
("CryptoCaidIrdetoAvailable", "I", False),
("CryptoCaidNDSAvailable", "Nd", False),
("CryptoCaidConaxAvailable", "Co", False),
("CryptoCaidCryptoWAvailable", "Cw", False),
("CryptoCaidPowerVUAvailable", "P", False),
("CryptoCaidBetaAvailable", "B", False),
("CryptoCaidNagraAvailable", "N", False),
("CryptoCaidBissAvailable", "Bi", False),
("CryptoCaidDreAvailable", "D", False),
("CryptoCaidBulCrypt1Available","B1", False),
("CryptoCaidBulCrypt2Available","B2", False),
("CryptoCaidSecaSelected", "S", True),
("CryptoCaidViaSelected", "V", True),
("CryptoCaidIrdetoSelected", "I", True),
("CryptoCaidNDSSelected", "Nd", True),
("CryptoCaidConaxSelected", "Co", True),
("CryptoCaidCryptoWSelected", "Cw", True),
("CryptoCaidPowerVUSelected", "P", True),
("CryptoCaidBetaSelected", "B", True),
("CryptoCaidNagraSelected", "N", True),
("CryptoCaidBissSelected", "Bi", True),
("CryptoCaidDreSelected", "D", True),
("CryptoCaidBulCrypt1Selected", "B1", True),
("CryptoCaidBulCrypt2Selected", "B2", True),
)
self.ecmdata = GetEcmInfo()
self.feraw = self.fedata = self.updateFEdata = None
def getCryptoInfo(self, info):
if (info.getInfo(iServiceInformation.sIsCrypted) == 1):
data = self.ecmdata.getEcmData()
self.current_source = data[0]
self.current_caid = data[1]
self.current_provid = data[2]
self.current_ecmpid = data[3]
else:
self.current_source = ""
self.current_caid = "0"
self.current_provid = "0"
self.current_ecmpid = "0"
def createCryptoBar(self, info):
res = ""
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if int(self.current_caid, 16) >= int(caid_entry[0], 16) and int(self.current_caid, 16) <= int(caid_entry[1], 16):
color="\c0000??00"
else:
color = "\c007?7?7?"
try:
for caid in available_caids:
if caid >= int(caid_entry[0], 16) and caid <= int(caid_entry[1], 16):
color="\c00????00"
except:
pass
if color != "\c007?7?7?" or caid_entry[4]:
if res: res += " "
res += color + caid_entry[3]
res += "\c00??????"
return res
def createCryptoSpecial(self, info):
caid_name = "FTA"
try:
for caid_entry in self.caid_data:
if int(self.current_caid, 16) >= int(caid_entry[0], 16) and int(self.current_caid, 16) <= int(caid_entry[1], 16):
caid_name = caid_entry[2]
break
return caid_name + ":%04x:%04x:%04x:%04x" % (int(self.current_caid,16), int(self.current_provid,16), info.getInfo(iServiceInformation.sSID), int(self.current_ecmpid,16))
except:
pass
return ""
def createResolution(self, info):
xres = info.getInfo(iServiceInformation.sVideoWidth)
if xres == -1:
return ""
yres = info.getInfo(iServiceInformation.sVideoHeight)
mode = ("i", "p", " ")[info.getInfo(iServiceInformation.sProgressive)]
fps = str((info.getInfo(iServiceInformation.sFrameRate) + 500) / 1000)
return str(xres) + "x" + str(yres) + mode + fps
def createVideoCodec(self, info):
return ("MPEG2", "MPEG4", "MPEG1", "MPEG4-II", "VC1", "VC1-SM", "")[info.getInfo(iServiceInformation.sVideoType)]
def createPIDInfo(self, info):
vpid = info.getInfo(iServiceInformation.sVideoPID)
apid = info.getInfo(iServiceInformation.sAudioPID)
pcrpid = info.getInfo(iServiceInformation.sPCRPID)
sidpid = info.getInfo(iServiceInformation.sSID)
tsid = info.getInfo(iServiceInformation.sTSID)
onid = info.getInfo(iServiceInformation.sONID)
if vpid < 0 : vpid = 0
if apid < 0 : apid = 0
if pcrpid < 0 : pcrpid = 0
if sidpid < 0 : sidpid = 0
if tsid < 0 : tsid = 0
if onid < 0 : onid = 0
return "%d-%d:%05d:%04d:%04d:%04d" % (onid, tsid, sidpid, vpid, apid, pcrpid)
def createTransponderInfo(self, fedata, feraw):
if not feraw:
return ""
elif "DVB-T" in feraw.get("tuner_type"):
tmp = addspace(self.createChannelNumber(fedata, feraw)) + addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata))
else:
tmp = addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata))
return addspace(self.createTunerSystem(fedata)) + tmp + addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) \
+ addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw))
def createFrequency(self, feraw):
frequency = feraw.get("frequency")
if frequency:
if "DVB-T" in feraw.get("tuner_type"):
return str(int(frequency / 1000000. + 0.5))
else:
return str(int(frequency / 1000 + 0.5))
return ""
def createChannelNumber(self, fedata, feraw):
return "DVB-T" in feraw.get("tuner_type") and fedata.get("channel") or ""
def createSymbolRate(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
bandwidth = fedata.get("bandwidth")
if bandwidth:
return bandwidth
else:
symbolrate = fedata.get("symbol_rate")
if symbolrate:
return str(symbolrate / 1000)
return ""
def createPolarization(self, fedata):
return fedata.get("polarization_abbreviation") or ""
def createFEC(self, fedata, feraw):
if "DVB-T" in feraw.get("tuner_type"):
code_rate_lp = fedata.get("code_rate_lp")
code_rate_hp = fedata.get("code_rate_hp")
if code_rate_lp and code_rate_hp:
return code_rate_lp + "-" + code_rate_hp
else:
fec = fedata.get("fec_inner")
if fec:
return fec
return ""
def createModulation(self, fedata):
if fedata.get("tuner_type") == _("Terrestrial"):
constellation = fedata.get("constellation")
if constellation:
return constellation
else:
modulation = fedata.get("modulation")
if modulation:
return modulation
return ""
def createTunerType(self, feraw):
return feraw.get("tuner_type") or ""
def createTunerSystem(self, fedata):
return fedata.get("system") or ""
def createOrbPos(self, feraw):
orbpos = feraw.get("orbital_position")
if orbpos > 1800:
return str((float(3600 - orbpos)) / 10.0) + "\xc2\xb0 W"
elif orbpos > 0:
return str((float(orbpos)) / 10.0) + "\xc2\xb0 E"
return ""
def createOrbPosOrTunerSystem(self, fedata,feraw):
orbpos = self.createOrbPos(feraw)
if orbpos is not "":
return orbpos
return self.createTunerSystem(fedata)
def createProviderName(self, info):
return info.getInfoString(iServiceInformation.sProvider)
@cached
def getText(self):
service = self.source.service
if service is None:
return ""
info = service and service.info()
if not info:
return ""
if self.type == "CryptoInfo":
self.getCryptoInfo(info)
if config.usage.show_cryptoinfo.value:
return addspace(self.createCryptoBar(info)) + self.createCryptoSpecial(info)
else:
return addspace(self.createCryptoBar(info)) + addspace(self.current_source) + self.createCryptoSpecial(info)
if self.type == "CryptoBar":
self.getCryptoInfo(info)
return self.createCryptoBar(info)
if self.type == "CryptoSpecial":
self.getCryptoInfo(info)
return self.createCryptoSpecial(info)
if self.type == "ResolutionString":
return self.createResolution(info)
if self.type == "VideoCodec":
return self.createVideoCodec(info)
if self.updateFEdata:
feinfo = service.frontendInfo()
if feinfo:
self.feraw = feinfo.getAll(config.usage.infobar_frontend_source.value == "settings")
if self.feraw:
self.fedata = ConvertToHumanReadable(self.feraw)
feraw = self.feraw
if not feraw:
feraw = info.getInfoObject(iServiceInformation.sTransponderData)
fedata = ConvertToHumanReadable(feraw)
else:
fedata = self.fedata
if self.type == "All":
self.getCryptoInfo(info)
if config.usage.show_cryptoinfo.value:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata, feraw) + "\n" \
+ addspace(self.createCryptoBar(info)) + addspace(self.createCryptoSpecial(info)) + "\n" \
+ addspace(self.createPIDInfo(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
else:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata, feraw) + "\n" \
+ addspace(self.createCryptoBar(info)) + self.current_source + "\n" \
+ addspace(self.createCryptoSpecial(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "PIDInfo":
return self.createPIDInfo(info)
if not feraw:
return ""
if self.type == "ServiceInfo":
return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createFrequency(feraw)) + addspace(self.createPolarization(fedata)) \
+ addspace(self.createSymbolRate(fedata, feraw)) + addspace(self.createFEC(fedata, feraw)) + addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw)) \
+ addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "TransponderInfo":
return self.createTransponderInfo(fedata, feraw)
if self.type == "TransponderFrequency":
return self.createFrequency(feraw)
if self.type == "TransponderSymbolRate":
return self.createSymbolRate(fedata, feraw)
if self.type == "TransponderPolarization":
return self.createPolarization(fedata)
if self.type == "TransponderFEC":
return self.createFEC(fedata, feraw)
if self.type == "TransponderModulation":
return self.createModulation(fedata)
if self.type == "OrbitalPosition":
return self.createOrbPos(feraw)
if self.type == "TunerType":
return self.createTunerType(feraw)
if self.type == "TunerSystem":
return self.createTunerSystem(fedata)
if self.type == "OrbitalPositionOrTunerSystem":
return self.createOrbPosOrTunerSystem(fedata,feraw)
if self.type == "TerrestrialChannelNumber":
return self.createChannelNumber(fedata, feraw)
return _("invalid type")
text = property(getText)
@cached
def getBool(self):
service = self.source.service
info = service and service.info()
if not info:
return False
request_caid = None
for x in self.ca_table:
if x[0] == self.type:
request_caid = x[1]
request_selected = x[2]
break
if request_caid is None:
return False
if info.getInfo(iServiceInformation.sIsCrypted) != 1:
return False
data = self.ecmdata.getEcmData()
if data is None:
return False
current_caid = data[1]
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if caid_entry[3] == request_caid:
if(request_selected):
if int(current_caid, 16) >= int(caid_entry[0], 16) and int(current_caid, 16) <= int(caid_entry[1], 16):
return True
else: # request available
try:
for caid in available_caids:
if caid >= int(caid_entry[0], 16) and caid <= int(caid_entry[1], 16):
return True
except:
pass
return False
boolean = property(getBool)
def changed(self, what):
if what[0] == self.CHANGED_SPECIFIC:
self.updateFEdata = False
if what[1] == iPlayableService.evNewProgramInfo:
self.updateFEdata = True
if what[1] == iPlayableService.evEnd:
self.feraw = self.fedata = None
Converter.changed(self, what)
elif what[0] == self.CHANGED_POLL and self.updateFEdata is not None:
self.updateFEdata = False
Converter.changed(self, what)
| gpl-2.0 | -7,293,005,866,189,446,000 | 31.755155 | 178 | 0.680305 | false |
pschmitt/home-assistant | homeassistant/components/supla/cover.py | 6 | 2904 | """Support for Supla cover - curtains, rollershutters, entry gate etc."""
import logging
from pprint import pformat
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASS_GARAGE,
CoverEntity,
)
from homeassistant.components.supla import SuplaChannel
_LOGGER = logging.getLogger(__name__)
SUPLA_SHUTTER = "CONTROLLINGTHEROLLERSHUTTER"
SUPLA_GATE = "CONTROLLINGTHEGATE"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Supla covers."""
if discovery_info is None:
return
_LOGGER.debug("Discovery: %s", pformat(discovery_info))
entities = []
for device in discovery_info:
device_name = device["function"]["name"]
if device_name == SUPLA_SHUTTER:
entities.append(SuplaCover(device))
elif device_name == SUPLA_GATE:
entities.append(SuplaGateDoor(device))
add_entities(entities)
class SuplaCover(SuplaChannel, CoverEntity):
"""Representation of a Supla Cover."""
@property
def current_cover_position(self):
"""Return current position of cover. 0 is closed, 100 is open."""
state = self.channel_data.get("state")
if state:
return 100 - state["shut"]
return None
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
self.action("REVEAL", percentage=kwargs.get(ATTR_POSITION))
@property
def is_closed(self):
"""Return if the cover is closed."""
if self.current_cover_position is None:
return None
return self.current_cover_position == 0
def open_cover(self, **kwargs):
"""Open the cover."""
self.action("REVEAL")
def close_cover(self, **kwargs):
"""Close the cover."""
self.action("SHUT")
def stop_cover(self, **kwargs):
"""Stop the cover."""
self.action("STOP")
class SuplaGateDoor(SuplaChannel, CoverEntity):
"""Representation of a Supla gate door."""
@property
def is_closed(self):
"""Return if the gate is closed or not."""
state = self.channel_data.get("state")
if state and "hi" in state:
return state.get("hi")
return None
def open_cover(self, **kwargs) -> None:
"""Open the gate."""
if self.is_closed:
self.action("OPEN_CLOSE")
def close_cover(self, **kwargs) -> None:
"""Close the gate."""
if not self.is_closed:
self.action("OPEN_CLOSE")
def stop_cover(self, **kwargs) -> None:
"""Stop the gate."""
self.action("OPEN_CLOSE")
def toggle(self, **kwargs) -> None:
"""Toggle the gate."""
self.action("OPEN_CLOSE")
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_GARAGE
| apache-2.0 | -6,450,520,718,000,023,000 | 27.470588 | 77 | 0.61157 | false |
h2oai/h2o | py/testdir_single_jvm/test_KMeans_twit_fvec.py | 9 | 2516 | import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_kmeans, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1, java_heap_GB=14)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_KMeans_twit_fvec(self):
csvFilename = "Twitter2DB.txt"
print "\nStarting", csvFilename
# h2b.browseTheCloud()
parseResult = h2i.import_parse(bucket='smalldata', path=csvFilename, hex_key=csvFilename + ".hex", schema='put')
# both of these centers match what different R/Scikit packages get
expected1 = [
# expected centers are from R. rest is just from h2o
([310527.2, 13433.89], 11340, None),
([5647967.1, 40487.76], 550, None),
([21765291.7, 93129.26], 14, None),
]
# this is what we get with Furthest
expected2 = [
([351104.74065255735, 15421.749823633158], 11340, 5021682274541967.0) ,
([7292636.589090909, 7575.630909090909], 550, 6373072701775582.0) ,
([34406781.071428575, 244878.0], 14, 123310713697348.92) ,
]
# all are multipliers of expected tuple value
allowedDelta = (0.0001, 0.0001, 0.0001)
for trial in range(2):
kwargs = {
'k': 3,
'max_iter': 50,
'normalize': 0,
'initialization': 'Furthest',
# 'initialization': 'PlusPlus',
'destination_key': 'kmeans_dest_key',
# reuse the same seed, to get deterministic results (otherwise sometimes fails
'seed': 265211114317615310
}
init_choices = ['Furthest', 'PlusPlus']
kwargs['initialization'] = init_choices[trial % len(init_choices)]
kmeans = h2o_cmd.runKMeans(parseResult=parseResult, timeoutSecs=15, **kwargs)
# can't inspect a kmeans2 model?
# inspect = h2o_cmd.runInspect(None, key=kmeans['model']['_key'], verbose=True)
(centers, tupleResultList) = h2o_kmeans.bigCheckResults(self, kmeans, csvFilename, parseResult, 'd', **kwargs)
h2o_kmeans.compareResultsToExpected(self, tupleResultList, expected2, allowedDelta, trial=trial)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 | 2,227,448,377,894,596,000 | 37.707692 | 122 | 0.575914 | false |
bestwpw/mysql-5.6 | xtrabackup/test/python/subunit/tests/test_subunit_filter.py | 50 | 8259 | #
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2005 Robert Collins <[email protected]>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Tests for subunit.TestResultFilter."""
from datetime import datetime
from subunit import iso8601
import unittest
from testtools import TestCase
from testtools.compat import _b, BytesIO, StringIO
from testtools.testresult.doubles import ExtendedTestResult
import subunit
from subunit.test_results import TestResultFilter
class TestTestResultFilter(TestCase):
"""Test for TestResultFilter, a TestResult object which filters tests."""
# While TestResultFilter works on python objects, using a subunit stream
# is an easy pithy way of getting a series of test objects to call into
# the TestResult, and as TestResultFilter is intended for use with subunit
# also has the benefit of detecting any interface skew issues.
example_subunit_stream = _b("""\
tags: global
test passed
success passed
test failed
tags: local
failure failed
test error
error error [
error details
]
test skipped
skip skipped
test todo
xfail todo
""")
def run_tests(self, result_filter, input_stream=None):
"""Run tests through the given filter.
:param result_filter: A filtering TestResult object.
:param input_stream: Bytes of subunit stream data. If not provided,
uses TestTestResultFilter.example_subunit_stream.
"""
if input_stream is None:
input_stream = self.example_subunit_stream
test = subunit.ProtocolTestCase(BytesIO(input_stream))
test.run(result_filter)
def test_default(self):
"""The default is to exclude success and include everything else."""
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result)
self.run_tests(result_filter)
# skips are seen as success by default python TestResult.
self.assertEqual(['error'],
[error[0].id() for error in filtered_result.errors])
self.assertEqual(['failed'],
[failure[0].id() for failure in
filtered_result.failures])
self.assertEqual(4, filtered_result.testsRun)
def test_exclude_errors(self):
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result, filter_error=True)
self.run_tests(result_filter)
# skips are seen as errors by default python TestResult.
self.assertEqual([], filtered_result.errors)
self.assertEqual(['failed'],
[failure[0].id() for failure in
filtered_result.failures])
self.assertEqual(3, filtered_result.testsRun)
def test_fixup_expected_failures(self):
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result,
fixup_expected_failures=set(["failed"]))
self.run_tests(result_filter)
self.assertEqual(['failed', 'todo'],
[failure[0].id() for failure in filtered_result.expectedFailures])
self.assertEqual([], filtered_result.failures)
self.assertEqual(4, filtered_result.testsRun)
def test_fixup_expected_errors(self):
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result,
fixup_expected_failures=set(["error"]))
self.run_tests(result_filter)
self.assertEqual(['error', 'todo'],
[failure[0].id() for failure in filtered_result.expectedFailures])
self.assertEqual([], filtered_result.errors)
self.assertEqual(4, filtered_result.testsRun)
def test_fixup_unexpected_success(self):
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result, filter_success=False,
fixup_expected_failures=set(["passed"]))
self.run_tests(result_filter)
self.assertEqual(['passed'],
[passed.id() for passed in filtered_result.unexpectedSuccesses])
self.assertEqual(5, filtered_result.testsRun)
def test_exclude_failure(self):
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result, filter_failure=True)
self.run_tests(result_filter)
self.assertEqual(['error'],
[error[0].id() for error in filtered_result.errors])
self.assertEqual([],
[failure[0].id() for failure in
filtered_result.failures])
self.assertEqual(3, filtered_result.testsRun)
def test_exclude_skips(self):
filtered_result = subunit.TestResultStats(None)
result_filter = TestResultFilter(filtered_result, filter_skip=True)
self.run_tests(result_filter)
self.assertEqual(0, filtered_result.skipped_tests)
self.assertEqual(2, filtered_result.failed_tests)
self.assertEqual(3, filtered_result.testsRun)
def test_include_success(self):
"""Successes can be included if requested."""
filtered_result = unittest.TestResult()
result_filter = TestResultFilter(filtered_result,
filter_success=False)
self.run_tests(result_filter)
self.assertEqual(['error'],
[error[0].id() for error in filtered_result.errors])
self.assertEqual(['failed'],
[failure[0].id() for failure in
filtered_result.failures])
self.assertEqual(5, filtered_result.testsRun)
def test_filter_predicate(self):
"""You can filter by predicate callbacks"""
filtered_result = unittest.TestResult()
def filter_cb(test, outcome, err, details):
return outcome == 'success'
result_filter = TestResultFilter(filtered_result,
filter_predicate=filter_cb,
filter_success=False)
self.run_tests(result_filter)
# Only success should pass
self.assertEqual(1, filtered_result.testsRun)
def test_time_ordering_preserved(self):
# Passing a subunit stream through TestResultFilter preserves the
# relative ordering of 'time' directives and any other subunit
# directives that are still included.
date_a = datetime(year=2000, month=1, day=1, tzinfo=iso8601.UTC)
date_b = datetime(year=2000, month=1, day=2, tzinfo=iso8601.UTC)
date_c = datetime(year=2000, month=1, day=3, tzinfo=iso8601.UTC)
subunit_stream = _b('\n'.join([
"time: %s",
"test: foo",
"time: %s",
"error: foo",
"time: %s",
""]) % (date_a, date_b, date_c))
result = ExtendedTestResult()
result_filter = TestResultFilter(result)
self.run_tests(result_filter, subunit_stream)
foo = subunit.RemotedTestCase('foo')
self.assertEquals(
[('time', date_a),
('startTest', foo),
('time', date_b),
('addError', foo, {}),
('stopTest', foo),
('time', date_c)], result._events)
def test_skip_preserved(self):
subunit_stream = _b('\n'.join([
"test: foo",
"skip: foo",
""]))
result = ExtendedTestResult()
result_filter = TestResultFilter(result)
self.run_tests(result_filter, subunit_stream)
foo = subunit.RemotedTestCase('foo')
self.assertEquals(
[('startTest', foo),
('addSkip', foo, {}),
('stopTest', foo), ], result._events)
def test_suite():
loader = subunit.tests.TestUtil.TestLoader()
result = loader.loadTestsFromName(__name__)
return result
| gpl-2.0 | 3,714,596,544,201,391,600 | 38.706731 | 80 | 0.652379 | false |
ayoubg/gem5-graphics | Mesa-7.11.2_GPGPU-Sim/src/gallium/drivers/i965/brw_structs_dump.py | 34 | 8917 | #!/usr/bin/env python
'''
Generates dumpers for the i965 state strucutures using pygccxml.
Run as
PYTHONPATH=/path/to/pygccxml-1.0.0 python brw_structs_dump.py
Jose Fonseca <[email protected]>
'''
copyright = '''
/**************************************************************************
*
* Copyright 2009 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
**************************************************************************/
'''
import os
import sys
import re
from pygccxml import parser
from pygccxml import declarations
from pygccxml.declarations import algorithm
from pygccxml.declarations import decl_visitor
from pygccxml.declarations import type_traits
from pygccxml.declarations import type_visitor
enums = True
def vars_filter(variable):
name = variable.name
return not re.match('^pad\d*', name) and name != 'dword'
class decl_dumper_t(decl_visitor.decl_visitor_t):
def __init__(self, stream, instance = '', decl = None):
decl_visitor.decl_visitor_t.__init__(self)
self.stream = stream
self._instance = instance
self.decl = decl
def clone(self):
return decl_dumper_t(self.stream, self._instance, self.decl)
def visit_class(self):
class_ = self.decl
assert self.decl.class_type in ('struct', 'union')
for variable in class_.variables(recursive = False):
if vars_filter(variable):
dump_type(self.stream, self._instance + '.' + variable.name, variable.type)
def visit_enumeration(self):
if enums:
self.stream.write(' switch(%s) {\n' % ("(*ptr)" + self._instance,))
for name, value in self.decl.values:
self.stream.write(' case %s:\n' % (name,))
self.stream.write(' debug_printf("\\t\\t%s = %s\\n");\n' % (self._instance, name))
self.stream.write(' break;\n')
self.stream.write(' default:\n')
self.stream.write(' debug_printf("\\t\\t%s = %%i\\n", %s);\n' % (self._instance, "(*ptr)" + self._instance))
self.stream.write(' break;\n')
self.stream.write(' }\n')
else:
self.stream.write(' debug_printf("\\t\\t%s = %%i\\n", %s);\n' % (self._instance, "(*ptr)" + self._instance))
def dump_decl(stream, instance, decl):
dumper = decl_dumper_t(stream, instance, decl)
algorithm.apply_visitor(dumper, decl)
class type_dumper_t(type_visitor.type_visitor_t):
def __init__(self, stream, instance, type_):
type_visitor.type_visitor_t.__init__(self)
self.stream = stream
self.instance = instance
self.type = type_
def clone(self):
return type_dumper_t(self.instance, self.type)
def visit_bool(self):
self.print_instance('%i')
def visit_char(self):
#self.print_instance('%i')
self.print_instance('0x%x')
def visit_unsigned_char(self):
#self.print_instance('%u')
self.print_instance('0x%x')
def visit_signed_char(self):
#self.print_instance('%i')
self.print_instance('0x%x')
def visit_wchar(self):
self.print_instance('0x%x')
def visit_short_int(self):
#self.print_instance('%i')
self.print_instance('0x%x')
def visit_short_unsigned_int(self):
#self.print_instance('%u')
self.print_instance('0x%x')
def visit_int(self):
#self.print_instance('%i')
self.print_instance('0x%x')
def visit_unsigned_int(self):
#self.print_instance('%u')
self.print_instance('0x%x')
def visit_long_int(self):
#self.print_instance('%li')
self.print_instance('0x%lx')
def visit_long_unsigned_int(self):
#self.print_instance('%lu')
self.print_instance('%0xlx')
def visit_long_long_int(self):
#self.print_instance('%lli')
self.print_instance('%0xllx')
def visit_long_long_unsigned_int(self):
#self.print_instance('%llu')
self.print_instance('0x%llx')
def visit_float(self):
self.print_instance('%f')
def visit_double(self):
self.print_instance('%f')
def visit_array(self):
for i in range(type_traits.array_size(self.type)):
dump_type(self.stream, self.instance + '[%i]' % i, type_traits.base_type(self.type))
def visit_pointer(self):
self.print_instance('%p')
def visit_declarated(self):
#stream.write('decl = %r\n' % self.type.decl_string)
decl = type_traits.remove_declarated(self.type)
dump_decl(self.stream, self.instance, decl)
def print_instance(self, format):
self.stream.write(' debug_printf("\\t\\t%s = %s\\n", %s);\n' % (self.instance, format, "(*ptr)" + self.instance))
def dump_type(stream, instance, type_):
type_ = type_traits.remove_alias(type_)
visitor = type_dumper_t(stream, instance, type_)
algorithm.apply_visitor(visitor, type_)
def dump_struct_interface(stream, class_, suffix = ';'):
name = class_.name
assert name.startswith('brw_');
name = name[:4] + 'dump_' + name[4:]
stream.write('void\n')
stream.write('%s(const struct %s *ptr)%s\n' % (name, class_.name, suffix))
def dump_struct_implementation(stream, decls, class_):
dump_struct_interface(stream, class_, suffix = '')
stream.write('{\n')
dump_decl(stream, '', class_)
stream.write('}\n')
stream.write('\n')
def dump_header(stream):
stream.write(copyright.strip() + '\n')
stream.write('\n')
stream.write('/**\n')
stream.write(' * @file\n')
stream.write(' * Dump i965 data structures.\n')
stream.write(' *\n')
stream.write(' * Generated automatically from brw_structs.h by brw_structs_dump.py.\n')
stream.write(' */\n')
stream.write('\n')
def dump_interfaces(decls, global_ns, names):
stream = open('brw_structs_dump.h', 'wt')
dump_header(stream)
stream.write('#ifndef BRW_STRUCTS_DUMP_H\n')
stream.write('#define BRW_STRUCTS_DUMP_H\n')
stream.write('\n')
for name in names:
stream.write('struct %s;\n' % (name,))
stream.write('\n')
for name in names:
(class_,) = global_ns.classes(name = name)
dump_struct_interface(stream, class_)
stream.write('\n')
stream.write('\n')
stream.write('#endif /* BRW_STRUCTS_DUMP_H */\n')
def dump_implementations(decls, global_ns, names):
stream = open('brw_structs_dump.c', 'wt')
dump_header(stream)
stream.write('#include "util/u_debug.h"\n')
stream.write('\n')
stream.write('#include "brw_types.h"\n')
stream.write('#include "brw_structs.h"\n')
stream.write('#include "brw_structs_dump.h"\n')
stream.write('\n')
for name in names:
(class_,) = global_ns.classes(name = name)
dump_struct_implementation(stream, decls, class_)
def decl_filter(decl):
'''Filter the declarations we're interested in'''
name = decl.name
return name.startswith('brw_') and name not in ('brw_instruction',)
def main():
config = parser.config_t(
include_paths = [
'../../include',
],
compiler = 'gcc',
)
headers = [
'brw_types.h',
'brw_structs.h',
]
decls = parser.parse(headers, config, parser.COMPILATION_MODE.ALL_AT_ONCE)
global_ns = declarations.get_global_namespace(decls)
names = []
for class_ in global_ns.classes(decl_filter):
names.append(class_.name)
names.sort()
dump_interfaces(decls, global_ns, names)
dump_implementations(decls, global_ns, names)
if __name__ == '__main__':
main()
| bsd-3-clause | -7,219,475,154,285,351,000 | 29.642612 | 125 | 0.60222 | false |
Beauhurst/django | tests/auth_tests/test_basic.py | 31 | 5389 | from django.contrib.auth import get_user, get_user_model
from django.contrib.auth.models import AnonymousUser, User
from django.core.exceptions import ImproperlyConfigured
from django.db import IntegrityError
from django.http import HttpRequest
from django.test import TestCase, override_settings
from django.utils import translation
from .models import CustomUser
class BasicTestCase(TestCase):
def test_user(self):
"Users can be created and can set their password"
u = User.objects.create_user('testuser', '[email protected]', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check username getter
self.assertEqual(u.get_username(), 'testuser')
# Check authentication/permissions
self.assertFalse(u.is_anonymous)
self.assertTrue(u.is_authenticated)
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', '[email protected]')
self.assertFalse(u2.has_usable_password())
def test_unicode_username(self):
User.objects.create_user('jörg')
User.objects.create_user('Григорий')
# Two equivalent unicode normalized usernames should be duplicates
omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = 'iamtheΩ' # U+2126 OHM SIGN
User.objects.create_user(ohm_username)
with self.assertRaises(IntegrityError):
User.objects.create_user(omega_username)
def test_user_no_email(self):
"Users can be created without an email"
u = User.objects.create_user('testuser1')
self.assertEqual(u.email, '')
u2 = User.objects.create_user('testuser2', email='')
self.assertEqual(u2.email, '')
u3 = User.objects.create_user('testuser3', email=None)
self.assertEqual(u3.email, '')
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertIsNone(a.pk)
self.assertEqual(a.username, '')
self.assertEqual(a.get_username(), '')
self.assertTrue(a.is_anonymous)
self.assertFalse(a.is_authenticated)
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', '[email protected]', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_get_user_model(self):
"The current user model can be retrieved"
self.assertEqual(get_user_model(), User)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
def test_swappable_user(self):
"The current user model can be swapped out for another"
self.assertEqual(get_user_model(), CustomUser)
with self.assertRaises(AttributeError):
User.objects.all()
@override_settings(AUTH_USER_MODEL='badsetting')
def test_swappable_user_bad_setting(self):
"The alternate user setting must point to something in the format app.model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_swappable_user_nonexistent_model(self):
"The current user model must point to an installed model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
def test_user_verbose_names_translatable(self):
"Default User model verbose names are translatable (#19945)"
with translation.override('en'):
self.assertEqual(User._meta.verbose_name, 'user')
self.assertEqual(User._meta.verbose_name_plural, 'users')
with translation.override('es'):
self.assertEqual(User._meta.verbose_name, 'usuario')
self.assertEqual(User._meta.verbose_name_plural, 'usuarios')
class TestGetUser(TestCase):
def test_get_user_anonymous(self):
request = HttpRequest()
request.session = self.client.session
user = get_user(request)
self.assertIsInstance(user, AnonymousUser)
def test_get_user(self):
created_user = User.objects.create_user('testuser', '[email protected]', 'testpw')
self.client.login(username='testuser', password='testpw')
request = HttpRequest()
request.session = self.client.session
user = get_user(request)
self.assertIsInstance(user, User)
self.assertEqual(user.username, created_user.username)
| bsd-3-clause | -1,623,448,651,716,338,700 | 39.126866 | 89 | 0.666543 | false |
unho/pootle | pootle/apps/pootle_store/migrations/0025_unit_on_delete_user.py | 7 | 1163 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-02 14:03
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import pootle.core.user
class Migration(migrations.Migration):
dependencies = [
('pootle_store', '0024_set_store_base_manager_name'),
]
operations = [
migrations.AlterField(
model_name='unit',
name='commented_by',
field=models.ForeignKey(null=True, on_delete=models.SET(pootle.core.user.get_system_user), related_name='commented', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='unit',
name='reviewed_by',
field=models.ForeignKey(null=True, on_delete=models.SET(pootle.core.user.get_system_user), related_name='reviewed', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='unit',
name='submitted_by',
field=models.ForeignKey(null=True, on_delete=models.SET(pootle.core.user.get_system_user), related_name='submitted', to=settings.AUTH_USER_MODEL),
),
]
| gpl-3.0 | -3,931,261,806,396,813,000 | 34.242424 | 158 | 0.637145 | false |
ajaybhat/scikit-image | skimage/measure/tests/test_fit.py | 6 | 10401 | import numpy as np
from numpy.testing import assert_equal, assert_raises, assert_almost_equal
from skimage.measure import LineModelND, CircleModel, EllipseModel, ransac
from skimage.transform import AffineTransform
from skimage.measure.fit import _dynamic_max_trials
from skimage._shared._warnings import expected_warnings
def test_line_model_invalid_input():
assert_raises(ValueError, LineModelND().estimate, np.empty((1, 3)))
def test_line_model_predict():
model = LineModelND()
model.params = ((0, 0), (1, 1))
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_estimate():
# generate original data without noise
model0 = LineModelND()
model0.params = ((0, 0), (1, 1))
x0 = np.arange(-100, 100)
y0 = model0.predict_y(x0)
data = np.column_stack([x0, y0])
# estimate parameters of noisy data
model_est = LineModelND()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
random_state = np.random.RandomState(1234)
x = random_state.rand(100, 2)
assert_almost_equal(model0.predict(x), model_est.predict(x), 1)
def test_line_model_residuals():
model = LineModelND()
model.params = (np.array([0, 0]), np.array([0, 1]))
assert_equal(model.residuals(np.array([[0, 0]])), 0)
assert_equal(model.residuals(np.array([[0, 10]])), 0)
assert_equal(model.residuals(np.array([[10, 0]])), 10)
model.params = (np.array([-2, 0]), np.array([1, 1]) / np.sqrt(2))
assert_equal(model.residuals(np.array([[0, 0]])), np.sqrt(2))
assert_almost_equal(model.residuals(np.array([[-4, 0]])), np.sqrt(2))
def test_line_model_under_determined():
data = np.empty((1, 2))
assert_raises(ValueError, LineModelND().estimate, data)
def test_line_modelND_invalid_input():
assert_raises(ValueError, LineModelND().estimate, np.empty((5, 1)))
def test_line_modelND_predict():
model = LineModelND()
model.params = (np.array([0, 0]), np.array([0.2, 0.98]))
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_modelND_estimate():
# generate original data without noise
model0 = LineModelND()
model0.params = (np.array([0,0,0], dtype='float'),
np.array([1,1,1], dtype='float')/np.sqrt(3))
# we scale the unit vector with a factor 10 when generating points on the
# line in order to compensate for the scale of the random noise
data0 = (model0.params[0] +
10 * np.arange(-100,100)[...,np.newaxis] * model0.params[1])
# add gaussian noise to data
random_state = np.random.RandomState(1234)
data = data0 + random_state.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = LineModelND()
model_est.estimate(data)
# test whether estimated parameters are correct
# we use the following geometric property: two aligned vectors have
# a cross-product equal to zero
# test if direction vectors are aligned
assert_almost_equal(np.linalg.norm(np.cross(model0.params[1],
model_est.params[1])), 0, 1)
# test if origins are aligned with the direction
a = model_est.params[0] - model0.params[0]
if np.linalg.norm(a) > 0:
a /= np.linalg.norm(a)
assert_almost_equal(np.linalg.norm(np.cross(model0.params[1], a)), 0, 1)
def test_line_modelND_residuals():
model = LineModelND()
model.params = (np.array([0, 0, 0]), np.array([0, 0, 1]))
assert_equal(abs(model.residuals(np.array([[0, 0, 0]]))), 0)
assert_equal(abs(model.residuals(np.array([[0, 0, 1]]))), 0)
assert_equal(abs(model.residuals(np.array([[10, 0, 0]]))), 10)
def test_line_modelND_under_determined():
data = np.empty((1, 3))
assert_raises(ValueError, LineModelND().estimate, data)
def test_circle_model_invalid_input():
assert_raises(ValueError, CircleModel().estimate, np.empty((5, 3)))
def test_circle_model_predict():
model = CircleModel()
r = 5
model.params = (0, 0, r)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 5), (-5, 0), (0, -5)))
assert_almost_equal(xy, model.predict_xy(t))
def test_circle_model_estimate():
# generate original data without noise
model0 = CircleModel()
model0.params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add gaussian noise to data
random_state = np.random.RandomState(1234)
data = data0 + random_state.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = CircleModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0.params, model_est.params, 1)
def test_circle_model_residuals():
model = CircleModel()
model.params = (0, 0, 5)
assert_almost_equal(abs(model.residuals(np.array([[5, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[6, 6]]))),
np.sqrt(2 * 6**2) - 5)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 5)
def test_ellipse_model_invalid_input():
assert_raises(ValueError, EllipseModel().estimate, np.empty((5, 3)))
def test_ellipse_model_predict():
model = EllipseModel()
r = 5
model.params = (0, 0, 5, 10, 0)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 10), (-5, 0), (0, -10)))
assert_almost_equal(xy, model.predict_xy(t))
def test_ellipse_model_estimate():
# generate original data without noise
model0 = EllipseModel()
model0.params = (10, 20, 15, 25, 0)
t = np.linspace(0, 2 * np.pi, 100)
data0 = model0.predict_xy(t)
# add gaussian noise to data
random_state = np.random.RandomState(1234)
data = data0 + random_state.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = EllipseModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0.params, model_est.params, 0)
def test_ellipse_model_residuals():
model = EllipseModel()
# vertical line through origin
model.params = (0, 0, 10, 5, 0)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 5]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 10]]))), 5)
def test_ransac_shape():
# generate original data without noise
model0 = CircleModel()
model0.params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add some faulty data
outliers = (10, 30, 200)
data0[outliers[0], :] = (1000, 1000)
data0[outliers[1], :] = (-50, 50)
data0[outliers[2], :] = (-100, -10)
# estimate parameters of corrupted data
model_est, inliers = ransac(data0, CircleModel, 3, 5,
random_state=1)
# test whether estimated parameters equal original parameters
assert_equal(model0.params, model_est.params)
for outlier in outliers:
assert outlier not in inliers
def test_ransac_geometric():
random_state = np.random.RandomState(1)
# generate original data without noise
src = 100 * random_state.random_sample((50, 2))
model0 = AffineTransform(scale=(0.5, 0.3), rotation=1,
translation=(10, 20))
dst = model0(src)
# add some faulty data
outliers = (0, 5, 20)
dst[outliers[0]] = (10000, 10000)
dst[outliers[1]] = (-100, 100)
dst[outliers[2]] = (50, 50)
# estimate parameters of corrupted data
model_est, inliers = ransac((src, dst), AffineTransform, 2, 20,
random_state=random_state)
# test whether estimated parameters equal original parameters
assert_almost_equal(model0.params, model_est.params)
assert np.all(np.nonzero(inliers == False)[0] == outliers)
def test_ransac_is_data_valid():
is_data_valid = lambda data: data.shape[0] > 2
model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
is_data_valid=is_data_valid, random_state=1)
assert_equal(model, None)
assert_equal(inliers, None)
def test_ransac_is_model_valid():
def is_model_valid(model, data):
return False
model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
is_model_valid=is_model_valid, random_state=1)
assert_equal(model, None)
assert_equal(inliers, None)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 5
assert_equal(_dynamic_max_trials(1, 100, 5, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 5, 1), np.inf)
def test_ransac_invalid_input():
assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, max_trials=-1)
assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, stop_probability=-1)
assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2,
residual_threshold=0, stop_probability=1.01)
if __name__ == "__main__":
np.testing.run_module_suite()
| bsd-3-clause | -8,422,721,417,998,824,000 | 33.213816 | 77 | 0.630997 | false |
GotlingSystem/apnea | src/apps/dive_log/models.py | 1 | 2496 | # coding=utf-8
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext as _
from discipline.models import Discipline
class Session(models.Model):
#pool = models.ForeignKey(Pool)
user = models.ForeignKey(User)
date = models.DateField(verbose_name=_(u'Datum'))
time = models.TimeField(verbose_name=_(u'Tid'))
comment = models.CharField(verbose_name=_(u'Kommentar'), max_length=512, blank=True)
class Meta:
verbose_name = _(u'Session')
verbose_name_plural = _(u'Sessioner')
ordering = ['-date', '-time']
def __unicode__(self):
return "{} {}".format(self.date, self.time)
class Dive(models.Model):
session = models.ForeignKey(Session)
discipline = models.ForeignKey(Discipline, verbose_name=_(u'Disciplin'), null=True, blank=True)
rest_duration = models.DurationField(_(u'Vila'), null=True, blank=True)
start = models.TimeField(null=True, blank=True)
dive_duration = models.DurationField(_(u'Dyktid'), null=True, blank=True)
distance = models.IntegerField(_(u'Distans'), help_text=_(u'i meter'), null=True)
temperature = models.IntegerField(_(u'Temperatur'), help_text=_(u'i celsius'), null=True, blank=True)
comment = models.CharField(_(u'Kommentar'), max_length=512, blank=True)
# TODO: Tag migrations broken with Django 1.7.2 https://github.com/alex/django-taggit/issues/285
#tags = TaggableManager(verbose_name=_(u'Taggar'), blank=True)
class Meta:
verbose_name = _(u'Dyk')
verbose_name_plural = _(u'Dyk')
ordering = ['id']
def __unicode__(self):
if self.discipline:
return "{} - {}".format(self.discipline.abbreviation, str(self.dive_duration))
else:
return str(self.dive_duration)
class DataPoint(models.Model):
dive = models.ForeignKey(Dive)
second = models.IntegerField(verbose_name=_(u'Sekund'))
depth = models.DecimalField(verbose_name=_(u'Djup'), decimal_places=1, max_digits=4, null=True, blank=True)
temperature = models.DecimalField(verbose_name=_(u'Temperatur'), decimal_places=1, max_digits=3, null=True, blank=True)
heart_rate = models.IntegerField(verbose_name=_(u'Puls'), null=True, blank=True)
class Meta:
verbose_name = _(u'Datapunkt')
verbose_name_plural = _(u'Datapunkter')
ordering = ['second']
def __unicode__(self):
return u'{} - {} m'.format(self.second, self.depth) | mit | 8,125,022,391,499,963,000 | 39.934426 | 123 | 0.663061 | false |
ThiagoGarciaAlves/intellij-community | python/lib/Lib/site-packages/django/conf/locale/ka/formats.py | 329 | 1888 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'l, j F, Y'
TIME_FORMAT = 'h:i:s a'
DATETIME_FORMAT = 'j F, Y h:i:s a'
YEAR_MONTH_FORMAT = 'F, Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j.M.Y'
SHORT_DATETIME_FORMAT = 'j.M.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%d %b %Y', '%d %b, %Y', '%d %b. %Y', # '25 Oct 2006', '25 Oct, 2006', '25 Oct. 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
# '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = " "
NUMBER_GROUPING = 3
| apache-2.0 | 2,978,498,176,289,796,600 | 39.170213 | 93 | 0.474047 | false |
openstack/heat | heat/tests/engine/test_engine_worker.py | 1 | 12872 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from heat.db.sqlalchemy import api as db_api
from heat.engine import check_resource
from heat.engine import stack as parser
from heat.engine import template as templatem
from heat.engine import worker
from heat.objects import stack as stack_objects
from heat.rpc import worker_client as wc
from heat.tests import common
from heat.tests import utils
class WorkerServiceTest(common.HeatTestCase):
def test_make_sure_rpc_version(self):
self.assertEqual(
'1.4',
worker.WorkerService.RPC_API_VERSION,
('RPC version is changed, please update this test to new version '
'and make sure additional test cases are added for RPC APIs '
'added in new version'))
@mock.patch('heat.common.messaging.get_rpc_server',
return_value=mock.Mock())
@mock.patch('oslo_messaging.Target',
return_value=mock.Mock())
@mock.patch('heat.rpc.worker_client.WorkerClient',
return_value=mock.Mock())
def test_service_start(self,
rpc_client_class,
target_class,
rpc_server_method
):
self.worker = worker.WorkerService('host-1',
'topic-1',
'engine_id',
mock.Mock())
self.worker.start()
# Make sure target is called with proper parameters
target_class.assert_called_once_with(
version=worker.WorkerService.RPC_API_VERSION,
server=self.worker.engine_id,
topic=self.worker.topic)
# Make sure rpc server creation with proper target
# and WorkerService is initialized with it
target = target_class.return_value
rpc_server_method.assert_called_once_with(target,
self.worker)
rpc_server = rpc_server_method.return_value
self.assertEqual(rpc_server,
self.worker._rpc_server,
"Failed to create RPC server")
# Make sure rpc server is started.
rpc_server.start.assert_called_once_with()
# Make sure rpc client is created and initialized in WorkerService
rpc_client = rpc_client_class.return_value
rpc_client_class.assert_called_once_with()
self.assertEqual(rpc_client,
self.worker._rpc_client,
"Failed to create RPC client")
def test_service_stop(self):
self.worker = worker.WorkerService('host-1',
'topic-1',
'engine_id',
mock.Mock())
with mock.patch.object(self.worker, '_rpc_server') as mock_rpc_server:
self.worker.stop()
mock_rpc_server.stop.assert_called_once_with()
mock_rpc_server.wait.assert_called_once_with()
@mock.patch.object(check_resource, 'load_resource')
@mock.patch.object(check_resource.CheckResource, 'check')
def test_check_resource_adds_and_removes_msg_queue(self,
mock_check,
mock_load_resource):
mock_tgm = mock.MagicMock()
mock_tgm.add_msg_queue = mock.Mock(return_value=None)
mock_tgm.remove_msg_queue = mock.Mock(return_value=None)
self.worker = worker.WorkerService('host-1',
'topic-1',
'engine_id',
mock_tgm)
ctx = utils.dummy_context()
current_traversal = 'something'
fake_res = mock.MagicMock()
fake_res.current_traversal = current_traversal
mock_load_resource.return_value = (fake_res, fake_res, fake_res)
self.worker.check_resource(ctx, mock.Mock(), current_traversal,
{}, mock.Mock(), mock.Mock())
self.assertTrue(mock_tgm.add_msg_queue.called)
self.assertTrue(mock_tgm.remove_msg_queue.called)
@mock.patch.object(check_resource, 'load_resource')
@mock.patch.object(check_resource.CheckResource, 'check')
def test_check_resource_adds_and_removes_msg_queue_on_exception(
self, mock_check, mock_load_resource):
# even if the check fails; the message should be removed
mock_tgm = mock.MagicMock()
mock_tgm.add_msg_queue = mock.Mock(return_value=None)
mock_tgm.remove_msg_queue = mock.Mock(return_value=None)
self.worker = worker.WorkerService('host-1',
'topic-1',
'engine_id',
mock_tgm)
ctx = utils.dummy_context()
current_traversal = 'something'
fake_res = mock.MagicMock()
fake_res.current_traversal = current_traversal
mock_load_resource.return_value = (fake_res, fake_res, fake_res)
mock_check.side_effect = BaseException
self.assertRaises(BaseException, self.worker.check_resource,
ctx, mock.Mock(), current_traversal, {},
mock.Mock(), mock.Mock())
self.assertTrue(mock_tgm.add_msg_queue.called)
# ensure remove is also called
self.assertTrue(mock_tgm.remove_msg_queue.called)
@mock.patch.object(worker, '_wait_for_cancellation')
@mock.patch.object(worker, '_cancel_check_resource')
@mock.patch.object(wc.WorkerClient, 'cancel_check_resource')
@mock.patch.object(db_api, 'engine_get_all_locked_by_stack')
def test_cancel_workers_when_no_resource_found(self, mock_get_locked,
mock_ccr, mock_wccr,
mock_wc):
mock_tgm = mock.Mock()
_worker = worker.WorkerService('host-1', 'topic-1', 'engine-001',
mock_tgm)
stack = mock.MagicMock()
stack.id = 'stack_id'
mock_get_locked.return_value = []
worker._cancel_workers(stack, mock_tgm, 'engine-001',
_worker._rpc_client)
self.assertFalse(mock_wccr.called)
self.assertFalse(mock_ccr.called)
@mock.patch.object(worker, '_wait_for_cancellation')
@mock.patch.object(worker, '_cancel_check_resource')
@mock.patch.object(wc.WorkerClient, 'cancel_check_resource')
@mock.patch.object(db_api, 'engine_get_all_locked_by_stack')
def test_cancel_workers_with_resources_found(self, mock_get_locked,
mock_ccr, mock_wccr,
mock_wc):
mock_tgm = mock.Mock()
_worker = worker.WorkerService('host-1', 'topic-1', 'engine-001',
mock_tgm)
stack = mock.MagicMock()
stack.id = 'stack_id'
mock_get_locked.return_value = ['engine-001', 'engine-007',
'engine-008']
worker._cancel_workers(stack, mock_tgm, 'engine-001',
_worker._rpc_client)
mock_wccr.assert_called_once_with(stack.id, 'engine-001', mock_tgm)
self.assertEqual(2, mock_ccr.call_count)
calls = [mock.call(stack.context, stack.id, 'engine-007'),
mock.call(stack.context, stack.id, 'engine-008')]
mock_ccr.assert_has_calls(calls, any_order=True)
self.assertTrue(mock_wc.called)
@mock.patch.object(worker, '_stop_traversal')
def test_stop_traversal_stops_nested_stack(self, mock_st):
mock_tgm = mock.Mock()
ctx = utils.dummy_context()
tmpl = templatem.Template.create_empty_template()
stack1 = parser.Stack(ctx, 'stack1', tmpl,
current_traversal='123')
stack1.store()
stack2 = parser.Stack(ctx, 'stack2', tmpl,
owner_id=stack1.id, current_traversal='456')
stack2.store()
_worker = worker.WorkerService('host-1', 'topic-1', 'engine-001',
mock_tgm)
_worker.stop_traversal(stack1)
self.assertEqual(2, mock_st.call_count)
call1, call2 = mock_st.call_args_list
call_args1, call_args2 = call1[0][0], call2[0][0]
self.assertEqual('stack1', call_args1.name)
self.assertEqual('stack2', call_args2.name)
@mock.patch.object(worker, '_stop_traversal')
def test_stop_nested_traversal_stops_deeply_nested_stack(self, mock_st):
mock_tgm = mock.Mock()
ctx = utils.dummy_context()
tmpl = templatem.Template.create_empty_template()
stack1 = parser.Stack(ctx, 'stack1', tmpl,
current_traversal='123')
stack1.store()
stack2 = parser.Stack(ctx, 'stack2', tmpl,
owner_id=stack1.id, current_traversal='456')
stack2.store()
stack3 = parser.Stack(ctx, 'stack3', tmpl,
owner_id=stack2.id, current_traversal='789')
stack3.store()
_worker = worker.WorkerService('host-1', 'topic-1', 'engine-001',
mock_tgm)
_worker.stop_traversal(stack2)
self.assertEqual(2, mock_st.call_count)
call1, call2 = mock_st.call_args_list
call_args1, call_args2 = call1[0][0], call2[0][0]
self.assertEqual('stack2', call_args1.name)
self.assertEqual('stack3', call_args2.name)
@mock.patch.object(worker, '_cancel_workers')
@mock.patch.object(worker.WorkerService, 'stop_traversal')
def test_stop_all_workers_when_stack_in_progress(self, mock_st, mock_cw):
mock_tgm = mock.Mock()
_worker = worker.WorkerService('host-1', 'topic-1', 'engine-001',
mock_tgm)
stack = mock.MagicMock()
stack.IN_PROGRESS = 'IN_PROGRESS'
stack.status = stack.IN_PROGRESS
stack.id = 'stack_id'
stack.rollback = mock.MagicMock()
_worker.stop_all_workers(stack)
mock_st.assert_not_called()
mock_cw.assert_called_once_with(stack, mock_tgm, 'engine-001',
_worker._rpc_client)
self.assertFalse(stack.rollback.called)
@mock.patch.object(worker, '_cancel_workers')
@mock.patch.object(worker.WorkerService, 'stop_traversal')
def test_stop_all_workers_when_stack_not_in_progress(self, mock_st,
mock_cw):
mock_tgm = mock.Mock()
_worker = worker.WorkerService('host-1', 'topic-1', 'engine-001',
mock_tgm)
stack = mock.MagicMock()
stack.FAILED = 'FAILED'
stack.status = stack.FAILED
stack.id = 'stack_id'
stack.rollback = mock.MagicMock()
_worker.stop_all_workers(stack)
self.assertFalse(mock_st.called)
mock_cw.assert_called_once_with(stack, mock_tgm, 'engine-001',
_worker._rpc_client)
self.assertFalse(stack.rollback.called)
# test when stack complete
stack.FAILED = 'FAILED'
stack.status = stack.FAILED
_worker.stop_all_workers(stack)
self.assertFalse(mock_st.called)
mock_cw.assert_called_with(stack, mock_tgm, 'engine-001',
_worker._rpc_client)
self.assertFalse(stack.rollback.called)
@mock.patch.object(stack_objects.Stack, 'select_and_update')
def test_update_current_traversal(self, mock_sau):
stack = mock.MagicMock()
stack.current_traversal = 'some-thing'
old_trvsl = stack.current_traversal
worker._update_current_traversal(stack)
self.assertNotEqual(old_trvsl, stack.current_traversal)
mock_sau.assert_called_once_with(mock.ANY, stack.id, mock.ANY,
exp_trvsl=old_trvsl)
| apache-2.0 | -4,283,856,035,645,248,000 | 45.807273 | 78 | 0.565336 | false |
cvra/can-bootloader | client/tests/test_config_read_tool.py | 1 | 2204 | import unittest
try:
from unittest.mock import *
except ImportError:
from mock import *
from msgpack import *
from cvra_bootloader.read_config import main
from cvra_bootloader.commands import *
import sys
import json
class ReadConfigToolTestCase(unittest.TestCase):
@patch('cvra_bootloader.utils.write_command_retry')
@patch('cvra_bootloader.utils.write_command')
@patch('cvra_bootloader.utils.open_connection')
@patch('builtins.print')
def test_integration(self, print_mock, open_conn, write_command,
write_command_retry):
sys.argv = "test.py -p /dev/ttyUSB0 0 1 2".split()
configs = [{'id': i} for i in range(3)]
write_command_retry.return_value = {
i: packb(configs[i]) for i in range(3)
}
open_conn.return_value = object()
main()
write_command_retry.assert_any_call(open_conn.return_value,
encode_read_config(), [0, 1, 2])
all_configs = {i: configs[i] for i in range(3)}
print_mock.assert_any_call(json.dumps(all_configs, indent=4,
sort_keys=True))
@patch('cvra_bootloader.utils.open_connection')
@patch('cvra_bootloader.utils.write_command_retry')
@patch('cvra_bootloader.utils.write_command')
@patch('cvra_bootloader.utils.read_can_datagrams')
@patch('builtins.print')
def test_network_discovery(self, print_mock, read_can_datagram,
write_command, write_command_retry, open_conn):
"""
Checks if we can perform a whole network discovery.
"""
sys.argv = "test.py -p /dev/ttyUSB0 --all".split()
# The first two board answers the ping
board_answers = [(b'', [0], i) for i in range(1, 3)] + [None]
read_can_datagram.return_value = iter(board_answers)
write_command_retry.return_value = {
i: packb({'id': i}) for i in range(1, 3)
}
main()
write_command.assert_any_call(open_conn.return_value,
encode_ping(),
list(range(1, 128)))
| bsd-2-clause | 8,157,435,816,643,125,000 | 31.895522 | 78 | 0.578947 | false |
pang-w/pyspider | pyspider/webui/app.py | 4 | 3259 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-02-22 23:17:13
import os
import sys
import logging
logger = logging.getLogger("webui")
from six import reraise
from six.moves import builtins
from six.moves.urllib.parse import urljoin
from flask import Flask
from pyspider.fetcher import tornado_fetcher
if os.name == 'nt':
import mimetypes
mimetypes.add_type("text/css", ".css", True)
class TornadoFlask(Flask):
"""Flask object running with tornado ioloop"""
@property
def logger(self):
return logger
def run(self, host=None, port=None, debug=None, **options):
from werkzeug.serving import make_server, run_with_reloader
if host is None:
host = '127.0.0.1'
if port is None:
server_name = self.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
if debug is not None:
self.debug = bool(debug)
#run_simple(host, port, self, **options)
hostname = host
port = port
application = self
use_reloader = self.debug
use_debugger = self.debug
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, True)
def inner():
self.server = make_server(hostname, port, application)
self.server.serve_forever()
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = hostname != '*' and hostname or 'localhost'
if ':' in display_hostname:
display_hostname = '[%s]' % display_hostname
self.logger.info('webui running on http://%s:%d/', display_hostname, port)
if use_reloader:
run_with_reloader(inner)
else:
inner()
def quit(self):
if hasattr(self, 'server'):
self.server.shutdown_signal = True
self.logger.info('webui exiting...')
app = TornadoFlask('webui',
static_folder=os.path.join(os.path.dirname(__file__), 'static'),
template_folder=os.path.join(os.path.dirname(__file__), 'templates'))
app.secret_key = os.urandom(24)
app.jinja_env.line_statement_prefix = '#'
app.jinja_env.globals.update(builtins.__dict__)
app.config.update({
'fetch': lambda x: tornado_fetcher.Fetcher(None, None, async=False).fetch(x)[1],
'taskdb': None,
'projectdb': None,
'scheduler_rpc': None,
})
def cdn_url_handler(error, endpoint, kwargs):
if endpoint == 'cdn':
path = kwargs.pop('path')
# cdn = app.config.get('cdn', 'http://cdn.staticfile.org/')
# cdn = app.config.get('cdn', '//cdnjs.cloudflare.com/ajax/libs/')
cdn = app.config.get('cdn', '//cdnjscn.b0.upaiyun.com/libs/')
return urljoin(cdn, path)
else:
exc_type, exc_value, tb = sys.exc_info()
if exc_value is error:
reraise(exc_type, exc_value, tb)
else:
raise error
app.handle_url_build_error = cdn_url_handler
| apache-2.0 | 7,733,720,780,302,100,000 | 29.745283 | 88 | 0.594354 | false |
diagramsoftware/odoomrp-utils | purchase_pricelist_partnerinfo/__openerp__.py | 12 | 1572 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Product pricelist partnerinfo - Purchase extension",
"version": "1.0",
"depends": [
"product_pricelist_partnerinfo",
"purchase",
],
"author": "OdooMRP team, "
"AvanzOSC, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"website": "http://www.odoomrp.com",
"contributors": [
"Oihane Crucelaegui <[email protected]>",
"Pedro M. Baeza <[email protected]>",
"Ana Juaristi <[email protected]>"
],
"category": "Hidden/Dependency",
"summary": "",
"data": [
"views/pricelist_partnerinfo_view.xml"
],
"installable": True,
"auto_install": True,
}
| agpl-3.0 | -4,453,190,833,650,290,700 | 36.428571 | 78 | 0.57888 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.