max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
cudamat/cudamat_conv.py | zwenaing/unsupervised-videos | 293 | 11084074 | import ctypes as ct
import math
import pdb
_ConvNet = ct.cdll.LoadLibrary('libcudamat_conv.so')
def DivUp(a, b):
return (a + b - 1) / b
def AddAtAllLocs(h, b):
batch_size, size_x, size_y, num_channels = h.shape4d
b_shape = b.shape
h.reshape((-1, num_channels))
b.reshape((1, -1))
assert b.shape[1] == num_channels
h.add_row_vec(b)
h.reshape((batch_size, -1))
b.reshape(b_shape)
def AddUpAllLocs(h, b, scaleTargets=0):
batch_size, size_x, size_y, num_channels = h.shape4d
b_shape = b.shape
h.reshape((-1, num_channels))
b.reshape((1, -1))
assert b.shape[1] == num_channels
if scaleTargets == 0:
h.sum(axis=0, target=b)
else:
b.mult(scaleTargets)
b.add_sums(h, axis=0)
h.reshape((batch_size, -1))
b.reshape(b_shape)
def convUp(images, filters, targets, conv_desc, scaleTargets=0):
_ConvNet.convUp(images.p_mat, filters.p_mat, targets.p_mat,
images.p_shape4d, filters.p_shape4d, targets.p_shape4d,
conv_desc, ct.c_float(scaleTargets))
def localUp(images, filters, targets, conv_desc, scaleTargets=0):
_ConvNet.localUp(images.p_mat, filters.p_mat, targets.p_mat,
images.p_shape4d, filters.p_shape4d, targets.p_shape4d,
conv_desc, ct.c_float(scaleTargets))
def convDown(hidSums, filters, targets, conv_desc, scaleTargets=0):
_ConvNet.convDown(hidSums.p_mat, filters.p_mat, targets.p_mat,
hidSums.p_shape4d, filters.p_shape4d, targets.p_shape4d,
conv_desc, ct.c_float(scaleTargets))
def localDown(hidSums, filters, targets, conv_desc, scaleTargets=0):
_ConvNet.localDown(hidSums.p_mat, filters.p_mat, targets.p_mat,
hidSums.p_shape4d, filters.p_shape4d, targets.p_shape4d,
conv_desc, ct.c_float(scaleTargets))
def convOutp(images, hidSums, targets, conv_desc, scaleTargets=0, partialSumY=0, partialSumX=0, temp=None):
num_images, num_modules_x, num_modules_y, num_output_channels = hidSums.shape4d
num_output_channels2, kernel_size_x, kernel_size_y, num_input_channels = targets.shape4d
if partialSumY == 0:
partialSumY = num_modules_y
if partialSumX == 0:
partialSumX = num_modules_x
temp_alloc = False
num_locs = DivUp(num_modules_x, partialSumX) * DivUp(num_modules_y, partialSumY)
if num_locs == 1:
outp = targets
scale_targets = scaleTargets
else:
if temp is None:
temp_alloc = True
temp = cm.empty((num_output_channels, kernel_size_x * kernel_size_y * num_input_channels * num_locs))
temp.set_shape4d((num_output_channels, kernel_size_x, kernel_size_y, num_input_channels * num_locs))
outp = temp
scale_targets = 0
if temp is not None:
num_output_channels3, kernel_size_x2, kernel_size_y2, num_input_channels_mult_partial_sum = temp.shape4d
assert kernel_size_y2 == kernel_size_y
assert kernel_size_x2 == kernel_size_x
assert num_output_channels3 == num_output_channels
assert num_input_channels_mult_partial_sum % num_input_channels == 0
assert num_locs == num_input_channels_mult_partial_sum / num_input_channels
_ConvNet.convOutp(
images.p_mat, hidSums.p_mat, outp.p_mat,
images.p_shape4d, hidSums.p_shape4d, outp.p_shape4d,
conv_desc, ct.c_int(partialSumY), ct.c_int(partialSumX),
ct.c_float(scale_targets), ct.c_float(1))
if num_locs > 1:
temp.reshape((-1, num_locs))
targets.reshape((-1, 1))
targets.mult(scaleTargets)
targets.add_sums(temp, axis=1)
temp.reshape((num_output_channels, -1))
targets.reshape((num_output_channels, -1))
if temp_alloc:
temp.free_device_memory()
elif temp is not None:
temp.assign(outp)
def localOutp(images, hidSums, targets, conv_desc, scaleTargets=0):
_ConvNet.localOutp(
images.p_mat, hidSums.p_mat, targets.p_mat,
images.p_shape4d, hidSums.p_shape4d, targets.p_shape4d,
conv_desc, ct.c_float(scale_targets), ct.c_float(1))
def MaxPool(images, targets, conv_desc):
_ConvNet.MaxPool(images.p_mat, targets.p_mat, images.p_shape4d,
targets.p_shape4d, conv_desc)
def AvgPool(images, targets, conv_desc):
_ConvNet.AvgPool(images.p_mat, targets.p_mat, images.p_shape4d,
targets.p_shape4d, conv_desc)
def MaxPoolUndo(images, grad, maxes, targets, conv_desc, scaleTargets=0):
_ConvNet.MaxPoolUndo(images.p_mat, grad.p_mat, maxes.p_mat, targets.p_mat,
images.p_shape4d, grad.p_shape4d, conv_desc,
ct.c_float(scaleTargets))
def AvgPoolUndo(avgGrads, targets, conv_desc, scaleTargets=0):
_ConvNet.AvgPoolUndo(avgGrads.p_mat, targets.p_mat, avgGrads.p_shape4d,
targets.p_shape4d, conv_desc, ct.c_float(scaleTargets))
def ResponseNorm(images, denoms, targets, numChannels, sizeX, addScale, powScale):
assert targets.shape == images.shape
assert targets.shape == denoms.shape
_ConvNet.ResponseNorm(images.p_mat, denoms.p_mat, targets.p_mat,
numChannels, sizeX, ct.c_float(addScale),
ct.c_float(powScale))
def ResponseNormCrossMap(images, targets, sizeF, addScale, powScale, blocked):
_, _, _, num_filters = images.shape4d
assert targets.shape == images.shape
_ConvNet.ResponseNormCrossMap(images.p_mat, targets.p_mat, num_filters, sizeF, ct.c_float(addScale),
ct.c_float(powScale), blocked)
def ResponseNormUndo(outGrad, denoms, inGrad, acts, targets, numChannels, sizeX,
addScale, powScale):
assert targets.shape == outGrad.shape
assert targets.shape == denoms.shape
assert targets.shape == inGrad.shape
assert targets.shape == acts.shape
_ConvNet.ResponseNormUndo(outGrad.p_mat, denoms.p_mat, inGrad.p_mat,
acts.p_mat, targets.p_mat, numChannels, sizeX,
ct.c_float(addScale), ct.c_float(powScale))
def ResponseNormCrossMapUndo(outGrad, images, acts, targets, sizeF,
addScale, powScale, blocked):
assert targets.shape == outGrad.shape
assert targets.shape == images.shape
assert targets.shape == acts.shape
_, _, _, num_filters = images.shape4d
_ConvNet.ResponseNormCrossMapUndo(outGrad.p_mat, images.p_mat,
acts.p_mat, targets.p_mat, num_filters, sizeF,
ct.c_float(addScale), ct.c_float(powScale), blocked)
|
image-superresolution/esrgan/discriminator_arch.py | AaratiAkkapeddi/nnabla-examples | 228 | 11084081 | # Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
def conv_bn_3(x, nf, name, bias):
with nn.parameter_scope(name):
h = PF.convolution(x, nf, kernel=(3, 3), stride=(
1, 1), pad=(1, 1), with_bias=bias)
h = PF.batch_normalization(h)
h = F.leaky_relu(h, alpha=0.2)
return h
def conv_bn_4(x, nf, name, bias):
with nn.parameter_scope(name):
h = PF.convolution(x, nf, kernel=(4, 4), stride=(
2, 2), pad=(1, 1), with_bias=bias)
h = PF.batch_normalization(h)
h = F.leaky_relu(h, alpha=0.2)
return h
def discriminator(x, nf=64):
'''
:param x: input to the network
:param nf: number of output channels
:return:
'''
# [3,128, 128]
h = F.leaky_relu(PF.convolution(x, nf, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name='conv0_0', with_bias=True),
alpha=0.2)
# [64, 128, 128]
h = conv_bn_4(h, nf, "conv0_1", False)
h = conv_bn_3(h, 2 * nf, "conv1_0", False)
h = conv_bn_4(h, 2 * nf, "conv1_1", False)
h = conv_bn_3(h, 4 * nf, "conv2_0", False)
h = conv_bn_4(h, 4 * nf, "conv2_1", False)
h = conv_bn_3(h, 8 * nf, "conv3_0", False)
h = conv_bn_4(h, 8 * nf, "conv3_1", False)
h = conv_bn_3(h, 8 * nf, "conv4_0", False)
h = conv_bn_4(h, 8 * nf, "conv4_1", False)
# [512, 4, 4]
B, C, H, W = h.shape[0], h.shape[1], h.shape[2], h.shape[3]
h = F.leaky_relu((PF.affine(h, 100, name="affine1")),
alpha=0.2)
h = PF.affine(h, 1, name="affine2")
return h
|
jactorch/data/dataloader/__init__.py | dapatil211/Jacinle | 114 | 11084097 | <filename>jactorch/data/dataloader/__init__.py
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : __init__.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 03/09/2020
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import torch
if torch.__version__ < '0.3.1':
from .dataloader_torch030 import *
else:
from .dataloader import *
|
deploy/tx2_run_tvm.py | tolleybot/fast-depth | 759 | 11084113 | import tvm
import numpy as np
import argparse
import os
import time
def run_model(model_dir, input_fp, output_fp, warmup_trials, run_trials, cuda, try_randin):
# import compiled graph
print("=> [TVM on TX2] using model files in {}".format(model_dir))
assert(os.path.isdir(model_dir))
print("=> [TVM on TX2] loading model lib and ptx")
loaded_lib = tvm.module.load(os.path.join(model_dir, "deploy_lib.o"))
if cuda:
dev_lib = tvm.module.load(os.path.join(model_dir, "deploy_cuda.ptx"))
loaded_lib.import_module(dev_lib)
print("=> [TVM on TX2] loading model graph and params")
loaded_graph = open(os.path.join(model_dir,"deploy_graph.json")).read()
loaded_params = bytearray(open(os.path.join(model_dir, "deploy_param.params"), "rb").read())
print("=> [TVM on TX2] creating TVM runtime module")
fcreate = tvm.get_global_func("tvm.graph_runtime.create")
ctx = tvm.gpu(0) if cuda else tvm.cpu(0)
gmodule = fcreate(loaded_graph, loaded_lib, ctx.device_type, ctx.device_id)
set_input, get_output, run = gmodule["set_input"], gmodule["get_output"], gmodule["run"]
print("=> [TVM on TX2] feeding inputs and params into TVM module")
rgb_np = np.load(input_fp) # HWC
x = np.zeros([1,3,224,224]) # NCHW
x[0,:,:,:] = np.transpose(rgb_np, (2,0,1))
set_input('0', tvm.nd.array(x.astype('float32')))
gmodule["load_params"](loaded_params)
print("=> [TVM on TX2] running TVM module, saving output")
run() # not gmodule.run()
out_shape = (1, 1, 224, 224)
out = tvm.nd.empty(out_shape, "float32")
get_output(0, out)
np.save(output_fp, out.asnumpy())
print("=> [TVM on TX2] benchmarking: {} warmup, {} run trials".format(warmup_trials, run_trials))
# run model several times as a warmup
for i in range(warmup_trials):
run()
ctx.sync()
# profile runtime using TVM time evaluator
ftimer = gmodule.time_evaluator("run", ctx, number=1, repeat=run_trials)
profile_result = ftimer()
profiled_runtime = profile_result[0]
print("=> [TVM on TX2] profiled runtime (in ms): {:.5f}".format(1000*profiled_runtime))
# try randomizing input
if try_randin:
randin_runtime = 0
for i in range(run_trials):
x = np.random.randn(1, 3, 224, 224)
set_input('0', tvm.nd.array(x.astype('float32')))
randin_ftimer = gmodule.time_evaluator("run", ctx, number=1, repeat=1)
randin_profile_result = randin_ftimer()
randin_runtime += randin_profile_result[0]
randomized_input_runtime = randin_runtime/run_trials
print("=> [TVM on TX2] with randomized input on every run, profiled runtime (in ms): {:.5f}".format(1000*randomized_input_runtime))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', type=str, required=True,
help='path to folder with TVM-compiled model files (required)')
parser.add_argument('--input-fp', type=str, default='data/rgb.npy',
help='numpy file containing input rgb data (default: data/rgb.npy')
parser.add_argument('--output-fp', type=str, default='data/pred.npy',
help='numpy file to store output prediction data (default: data/pred.npy')
parser.add_argument('--warmup', type=int, default=10,
help='number of inference warmup trials (default: 10)')
parser.add_argument('--run', type=int, default=100,
help='number of inference run trials (default: 100)')
parser.add_argument('--cuda', type=bool, default=False,
help='run with CUDA (default: False)')
parser.add_argument('--randin', type=bool, default=False,
help='profile runtime while randomizing input on every run (default: False)')
args = parser.parse_args()
run_model(args.model_dir, args.input_fp, args.output_fp, args.warmup, args.run, args.cuda, try_randin=args.randin)
if __name__ == '__main__':
main()
|
ropper/z3helper.py | cbayet/Ropper | 1,502 | 11084126 | # coding=utf-8
# Copyright 2018 <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" A ND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from ropper.common.error import RopperError
class ConstraintCompiler(object):
"""
Compile a user given constraints to z3 expressions
constraint := assignment | pop_reg
assignment := reg, adjust, reg | number
pop_reg := "pop", reg
adjust := "==" | "+=" | "-=" | "*=" | "/="
reg := a register of the current architecture
number := int
"""
NUMBER_REGEX = '(-?[0-9]+)'
REG_REGEX = '(?P<{}>[a-zA-Z0-9]+)'
ADJUST_REGEX = '([\\+\-\*/=]=)'
ASSIGNMENT_REGEX = '('+REG_REGEX.format('reg_dst_1') + ' *' + ADJUST_REGEX + ' *('+NUMBER_REGEX+'|'+REG_REGEX.format('reg_src_1')+'|(\[)'+REG_REGEX.format('reg_src_2')+'(\])))'
POP_REGEX = '((pop) +'+REG_REGEX.format('reg_dst_2')+')'
CONSTRAINT_REGEX = '(' + ASSIGNMENT_REGEX + '|' + POP_REGEX + ')'
def __init__(self, architecture, semantic_info):
self.__architecture = architecture
self.__semantic_info = semantic_info
def getSymbols(self, constraints):
symbols = []
for constraint in constraints:
match = re.match(ConstraintCompiler.CONSTRAINT_REGEX, constraint)
if match is None:
raise Exception('Invalid syntax: %s' % constraint)
reg_dst = match.group('reg_dst_1')
if reg_dst is not None:
reg_src = match.group('reg_src_1')
reg_src = match.group('reg_src_2') if reg_src is None else reg_src
symbols.append((reg_dst, reg_src))
else:
symbols.append((match.group('reg_dst_2'), None))
return symbols
def compile(self, constraints):
"""
compile a line of semantic expressions
"""
tokens = self._tokenize(constraints)[::-1]
to_return = None
constraint = None
while True:
if not tokens:
break
token = tokens.pop()
if token in self.__architecture.info.registers:
constraint = self._assignment(token, tokens)
elif token == 'pop':
constraint = self._popReg(token, tokens)
elif token == ';':
if to_return is None:
to_return = constraint
else:
to_return = 'And(%s, %s)' % (to_return, constraint)
else:
raise ConstraintError('Invalid token: %s' % token)
return to_return
def _tokenize(self, constraints):
"""
return a list of tokens
"""
tokens = []
for constraint in constraints.split(';'):
constraint = constraint.strip()
if not constraint:
continue
match = re.match(ConstraintCompiler.CONSTRAINT_REGEX, constraint)
if match is None:
raise ConstraintError('Invalid Syntax: %s' % constraint)
last_valid_index = -1
for index in range(1, len(match.regs)):
start = match.regs[index][0]
if start == -1:
continue
if last_valid_index == -1:
last_valid_index = index
continue
if match.regs[last_valid_index][0] != start:
tokens.append(match.group(last_valid_index))
last_valid_index = index
tokens.append(match.group(last_valid_index))
tokens.append(';')
return tokens
def _assignment(self, register, tokens):
register = self.__architecture.getRegisterName(register)
reg1_last = self.__semantic_info.regs[register][-1]
reg1_init = self.__semantic_info.regs[register][0]
op = tokens.pop()
if not re.match(ConstraintCompiler.ADJUST_REGEX, op):
raise ConstraintError('Invalid syntax: %s' % op)
value = tokens.pop()
if value == '[':
r1 = register
register = tokens.pop()
register_name = self.__architecture.getRegisterName(register)
if not register_name:
raise ConstraintError('Invalid register: %s' & register)
value = self._readMemory(register_name)
tokens.pop()
elif re.match(ConstraintCompiler.NUMBER_REGEX, value):
value = create_number_expression(int(value), int(reg1_last.split('_')[-1]))
elif value in self.__architecture.info.registers:
value = self.__architecture.getRegisterName(value)
value = self.__semantic_info.regs[value][0]
value = create_register_expression(value, int(value.split('_')[-1]))
else:
print(re.match(ConstraintCompiler.NUMBER_REGEX, value))
raise ConstraintError('Invalid Assignment: %s%s%s' % (register, op, value))
reg1_last = create_register_expression(reg1_last, int(reg1_last.split('_')[-1]))
reg1_init = create_register_expression(reg1_init, int(reg1_init.split('_')[-1]))
return self._create(reg1_last, reg1_init, value, op[0])
def _create(self, left_last, left_init, right, adjust):
if adjust != '=':
return '%s == %s %s %s' % (left_last, left_init, adjust, right)
else:
return '%s == %s' % (left_last, right)
def _readMemory(self, register):
register_init = self.__semantic_info.regs[register][0]
if self.__semantic_info.mems:
memory = self.__semantic_info.mems[-1]
else:
memory = 'memory%d_%d_%d' % (0, self.__architecture.info.bits, 8)
self.__semantic_info.mems.append(memory)
size = int(register_init.split('_')[-1])
register_expr = create_register_expression(register_init, size)
mem_expr = create_read_memory_expression(memory, register_expr, size)
return mem_expr
def _popReg(self, pop, tokens):
reg_name = tokens.pop()
self.symbols.append((reg_name,None))
reg = self.__semantic_info.regs[reg_name][-1]
if self.__semantic_info.mems:
memory = self.__semantic_info.mems[0]
else:
memory = 'memory%d_%d_%d' % (0, self.__architecture.info.bits, 8)
self.__semantic_info.mems.append(memory)
size = int(reg.split('_')[-1])
register_expr = create_register_expression(reg, size)
mem_expr = create_read_memory_expression(memory, register_expr, size)
return mem_expr
class ConstraintError(RopperError):
"""
ConstraintError
"""
pass
def create_register_expression(register_accessor, size, high=False):
register_size = int(register_accessor.split('_')[2])
if size < register_size:
if high:
return 'Extract(%d, 8, %s)' % (size+8-1, register_accessor)
else:
return 'Extract(%d, 0, %s)' % (size-1, register_accessor)
else:
return '%s' % register_accessor
def create_number_expression(number, size):
return "BitVecVal(%d, %d)" % (number, size)
def create_read_memory_expression(memory, addr, size):
to_return = '%s[%s]' % (memory, addr)
for i in range(1, int(size/8)):
value = '%s[%s]' % (memory, '%s + %d' % (addr, i))
to_return = 'Concat(%s, %s)' % (value, to_return)
return to_return
|
tests/test_ngram.py | pemistahl/lingua-py | 119 | 11084155 | #
# Copyright © 2022 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from lingua._ngram import _range_of_lower_order_ngrams
def test_ngram_iterator():
ngram = "äbcde"
ngram_range = _range_of_lower_order_ngrams(ngram)
assert next(ngram_range) == "äbcde"
assert next(ngram_range) == "äbcd"
assert next(ngram_range) == "äbc"
assert next(ngram_range) == "äb"
assert next(ngram_range) == "ä"
with pytest.raises(StopIteration):
next(ngram_range)
|
targets/minispartan6/crg.py | skiphansen/litex-buildenv | 198 | 11084195 | # Support for the MiniSpartan6+ - https://www.scarabhardware.com/minispartan6/
from fractions import Fraction
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
class _CRG(Module):
def __init__(self, platform, clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys_ps = ClockDomain()
f0 = 32*1000000
clk32 = platform.request("clk32")
clk32a = Signal()
self.specials += Instance("IBUFG", i_I=clk32, o_O=clk32a)
clk32b = Signal()
self.specials += Instance("BUFIO2", p_DIVIDE=1,
p_DIVIDE_BYPASS="TRUE", p_I_INVERT="FALSE",
i_I=clk32a, o_DIVCLK=clk32b)
f = Fraction(int(clk_freq), int(f0))
n, m, p = f.denominator, f.numerator, 8
assert f0/n*m == clk_freq
pll_lckd = Signal()
pll_fb = Signal()
pll = Signal(6)
self.specials.pll = Instance("PLL_ADV", p_SIM_DEVICE="SPARTAN6",
p_BANDWIDTH="OPTIMIZED", p_COMPENSATION="INTERNAL",
p_REF_JITTER=.01, p_CLK_FEEDBACK="CLKFBOUT",
i_DADDR=0, i_DCLK=0, i_DEN=0, i_DI=0, i_DWE=0, i_RST=0, i_REL=0,
p_DIVCLK_DIVIDE=1, p_CLKFBOUT_MULT=m*p//n, p_CLKFBOUT_PHASE=0.,
i_CLKIN1=clk32b, i_CLKIN2=0, i_CLKINSEL=1,
p_CLKIN1_PERIOD=1000000000/f0, p_CLKIN2_PERIOD=0.,
i_CLKFBIN=pll_fb, o_CLKFBOUT=pll_fb, o_LOCKED=pll_lckd,
o_CLKOUT0=pll[0], p_CLKOUT0_DUTY_CYCLE=.5,
o_CLKOUT1=pll[1], p_CLKOUT1_DUTY_CYCLE=.5,
o_CLKOUT2=pll[2], p_CLKOUT2_DUTY_CYCLE=.5,
o_CLKOUT3=pll[3], p_CLKOUT3_DUTY_CYCLE=.5,
o_CLKOUT4=pll[4], p_CLKOUT4_DUTY_CYCLE=.5,
o_CLKOUT5=pll[5], p_CLKOUT5_DUTY_CYCLE=.5,
p_CLKOUT0_PHASE=0., p_CLKOUT0_DIVIDE=p//1,
p_CLKOUT1_PHASE=0., p_CLKOUT1_DIVIDE=p//1,
p_CLKOUT2_PHASE=0., p_CLKOUT2_DIVIDE=p//1,
p_CLKOUT3_PHASE=0., p_CLKOUT3_DIVIDE=p//1,
p_CLKOUT4_PHASE=0., p_CLKOUT4_DIVIDE=p//1, # sys
p_CLKOUT5_PHASE=270., p_CLKOUT5_DIVIDE=p//1, # sys_ps
)
self.specials += Instance("BUFG", i_I=pll[4], o_O=self.cd_sys.clk)
self.specials += Instance("BUFG", i_I=pll[5], o_O=self.cd_sys_ps.clk)
self.specials += AsyncResetSynchronizer(self.cd_sys, ~pll_lckd)
self.specials += Instance("ODDR2", p_DDR_ALIGNMENT="NONE",
p_INIT=0, p_SRTYPE="SYNC",
i_D0=0, i_D1=1, i_S=0, i_R=0, i_CE=1,
i_C0=self.cd_sys.clk, i_C1=~self.cd_sys.clk,
o_Q=platform.request("sdram_clock"))
|
mayan/apps/documents/tests/mixins/document_type_mixins.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 11084198 | from django.db.models import Q
from ...classes import BaseDocumentFilenameGenerator
from ...models.document_type_models import DocumentType, DocumentTypeFilename
from ..literals import (
TEST_DOCUMENT_TYPE_DELETE_PERIOD, TEST_DOCUMENT_TYPE_DELETE_TIME_UNIT,
TEST_DOCUMENT_TYPE_LABEL, TEST_DOCUMENT_TYPE_LABEL_EDITED,
TEST_DOCUMENT_TYPE_QUICK_LABEL, TEST_DOCUMENT_TYPE_QUICK_LABEL_EDITED
)
class DocumentQuickLabelViewTestMixin:
def _request_test_document_quick_label_edit_view(self, extra_data=None):
data = {
'document_type_available_filenames': self.test_document_type_quick_label.pk,
'label': ''
# View needs at least an empty label for quick
# label to work. Cause is unknown.
}
data.update(extra_data or {})
return self.post(
viewname='documents:document_properties_edit', kwargs={
'document_id': self.test_document.pk
}, data=data
)
class DocumentTypeAPIViewTestMixin:
def _request_test_document_type_create_api_view(self):
pk_list = list(DocumentType.objects.values_list('pk', flat=True))
response = self.post(
viewname='rest_api:documenttype-list', data={
'label': TEST_DOCUMENT_TYPE_LABEL
}
)
try:
self.test_document_type = DocumentType.objects.get(
~Q(pk__in=pk_list)
)
except DocumentType.DoesNotExist:
self.test_document_type = None
return response
def _request_test_document_type_delete_api_view(self):
return self.delete(
viewname='rest_api:documenttype-detail', kwargs={
'document_type_id': self.test_document_type.pk,
}
)
def _request_test_document_type_detail_api_view(self):
return self.get(
viewname='rest_api:documenttype-detail', kwargs={
'document_type_id': self.test_document_type.pk,
}
)
def _request_test_document_type_edit_via_patch_api_view(self):
return self.patch(
viewname='rest_api:documenttype-detail', kwargs={
'document_type_id': self.test_document_type.pk,
}, data={'label': TEST_DOCUMENT_TYPE_LABEL_EDITED}
)
def _request_test_document_type_edit_via_put_api_view(self):
return self.put(
viewname='rest_api:documenttype-detail', kwargs={
'document_type_id': self.test_document_type.pk,
}, data={'label': TEST_DOCUMENT_TYPE_LABEL_EDITED}
)
def _request_test_document_type_list_api_view(self):
return self.get(viewname='rest_api:documenttype-list')
class DocumentTypeDeletionPoliciesViewTestMixin:
def _request_test_document_type_policies_get_view(self):
return self.get(
viewname='documents:document_type_policies', kwargs={
'document_type_id': self.test_document_type.pk
}
)
def _request_test_document_type_policies_post_view(self):
return self.post(
viewname='documents:document_type_policies', kwargs={
'document_type_id': self.test_document_type.pk
}
)
class DocumentTypeFilenameGeneratorViewTestMixin:
def _request_test_document_type_filename_generator_get_view(self):
return self.get(
viewname='documents:document_type_filename_generator', kwargs={
'document_type_id': self.test_document_type.pk
}
)
def _request_test_document_type_filename_generator_post_view(self):
return self.post(
viewname='documents:document_type_filename_generator', kwargs={
'document_type_id': self.test_document_type.pk
}, data={
'filename_generator_backend': BaseDocumentFilenameGenerator.get_default()
}
)
class DocumentTypeQuickLabelAPIViewTestMixin:
def _request_test_document_type_quick_label_create_api_view(self):
pk_list = list(DocumentTypeFilename.objects.values('pk'))
response = self.post(
viewname='rest_api:documenttype-quicklabel-list', kwargs={
'document_type_id': self.test_document_type.pk,
}, data={
'filename': TEST_DOCUMENT_TYPE_QUICK_LABEL
}
)
try:
self.test_document_type_quick_label = DocumentTypeFilename.objects.get(
~Q(pk__in=pk_list)
)
except DocumentTypeFilename.DoesNotExist:
self.test_document_type_quick_label = None
return response
def _request_test_document_type_quick_label_delete_api_view(self):
return self.delete(
viewname='rest_api:documenttype-quicklabel-detail', kwargs={
'document_type_id': self.test_document_type.pk,
'document_type_quick_label_id': self.test_document_type_quick_label.pk,
}
)
def _request_test_document_type_quick_label_detail_api_view(self):
return self.get(
viewname='rest_api:documenttype-quicklabel-detail', kwargs={
'document_type_id': self.test_document_type.pk,
'document_type_quick_label_id': self.test_document_type_quick_label.pk,
}
)
def _request_test_document_type_quick_label_edit_via_patch_api_view(self):
return self.patch(
viewname='rest_api:documenttype-quicklabel-detail', kwargs={
'document_type_id': self.test_document_type.pk,
'document_type_quick_label_id': self.test_document_type_quick_label.pk,
}, data={'filename': TEST_DOCUMENT_TYPE_QUICK_LABEL_EDITED}
)
def _request_test_document_type_quick_label_edit_via_put_api_view(self):
return self.put(
viewname='rest_api:documenttype-quicklabel-detail', kwargs={
'document_type_id': self.test_document_type.pk,
'document_type_quick_label_id': self.test_document_type_quick_label.pk,
}, data={'filename': TEST_DOCUMENT_TYPE_QUICK_LABEL_EDITED}
)
def _request_test_document_type_quick_label_list_api_view(self):
return self.get(
viewname='rest_api:documenttype-quicklabel-list', kwargs={
'document_type_id': self.test_document_type.pk
}
)
class DocumentTypeQuickLabelViewTestMixin:
def _request_test_quick_label_create_view(self):
return self.post(
viewname='documents:document_type_filename_create', kwargs={
'document_type_id': self.test_document_type.pk
}, data={
'filename': TEST_DOCUMENT_TYPE_QUICK_LABEL,
}
)
def _request_test_quick_label_delete_view(self):
return self.post(
viewname='documents:document_type_filename_delete', kwargs={
'document_type_filename_id': self.test_document_type_quick_label.pk
}
)
def _request_test_quick_label_edit_view(self):
return self.post(
viewname='documents:document_type_filename_edit', kwargs={
'document_type_filename_id': self.test_document_type_quick_label.pk
}, data={
'filename': TEST_DOCUMENT_TYPE_QUICK_LABEL_EDITED,
}
)
def _request_test_quick_label_list_view(self):
return self.get(
viewname='documents:document_type_filename_list', kwargs={
'document_type_id': self.test_document_type.pk
}
)
class DocumentTypeQuickLabelTestMixin:
def _create_test_document_type_quick_label(self):
self.test_document_type_quick_label = self.test_document_type.filenames.create(
filename=TEST_DOCUMENT_TYPE_QUICK_LABEL
)
class DocumentTypeViewTestMixin:
def _request_test_document_type_create_view(self):
return self.post(
viewname='documents:document_type_create',
data={
'label': TEST_DOCUMENT_TYPE_LABEL,
'delete_time_period': TEST_DOCUMENT_TYPE_DELETE_PERIOD,
'delete_time_unit': TEST_DOCUMENT_TYPE_DELETE_TIME_UNIT
}
)
def _request_test_document_type_delete_view(self):
return self.post(
viewname='documents:document_type_delete', kwargs={
'document_type_id': self.test_document_type.pk
}
)
def _request_test_document_type_edit_view(self):
return self.post(
viewname='documents:document_type_edit', kwargs={
'document_type_id': self.test_document_type.pk
}, data={
'label': TEST_DOCUMENT_TYPE_LABEL_EDITED,
}
)
def _request_test_document_type_list_view(self):
return self.get(viewname='documents:document_type_list')
|
gryphon/tests/logic/exchange_wrappers/bitstamp_test.py | qiquanzhijia/gryphon | 1,109 | 11084201 | <gh_stars>1000+
import pyximport; pyximport.install()
from gryphon.lib.exchange.bitstamp_btc_usd import BitstampBTCUSDExchange
from gryphon.tests.logic.exchange_wrappers.public_methods import ExchangePublicMethodsTests
class TestBitstampPublicMethods(ExchangePublicMethodsTests):
def setUp(self):
self.exchange = BitstampBTCUSDExchange()
|
CircuitPython_MacroPad_NKRO/boot.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 11084209 | <gh_stars>100-1000
import usb_hid
REPORT_ID = 0x4
REPORT_BYTES = 16
bitmap_keyboard_descriptor = bytes((
0x05, 0x01, # Usage Page (Generic Desktop),
0x09, 0x06, # Usage (Keyboard),
0xA1, 0x01, # Collection (Application),
0x85, REPORT_ID, # Report ID
# bitmap of modifiers
0x75, 0x01, # Report Size (1),
0x95, 0x08, # Report Count (8),
0x05, 0x07, # Usage Page (Key Codes),
0x19, 0xE0, # Usage Minimum (224),
0x29, 0xE7, # Usage Maximum (231),
0x15, 0x00, # Logical Minimum (0),
0x25, 0x01, # Logical Maximum (1),
0x81, 0x02, # Input (Data, Variable, Absolute), ;Modifier byte
# LED output report
0x95, 0x05, # Report Count (5),
0x75, 0x01, # Report Size (1),
0x05, 0x08, # Usage Page (LEDs),
0x19, 0x01, # Usage Minimum (1),
0x29, 0x05, # Usage Maximum (5),
0x91, 0x02, # Output (Data, Variable, Absolute),
0x95, 0x01, # Report Count (1),
0x75, 0x03, # Report Size (3),
0x91, 0x03, # Output (Constant),
# bitmap of keys
0x95, (REPORT_BYTES-1)*8, # Report Count (),
0x75, 0x01, # Report Size (1),
0x15, 0x00, # Logical Minimum (0),
0x25, 0x01, # Logical Maximum(1),
0x05, 0x07, # Usage Page (Key Codes),
0x19, 0x00, # Usage Minimum (0),
0x29, (REPORT_BYTES-1)*8-1, # Usage Maximum (),
0x81, 0x02, # Input (Data, Variable, Absolute),
0xc0 # End Collection
))
bitmap_keyboard = usb_hid.Device(
report_descriptor=bitmap_keyboard_descriptor,
usage_page=0x1,
usage=0x6,
report_ids=(REPORT_ID,),
in_report_lengths=(REPORT_BYTES,),
out_report_lengths=(1,),
)
usb_hid.enable(
(
bitmap_keyboard,
usb_hid.Device.MOUSE,
usb_hid.Device.CONSUMER_CONTROL,
)
)
print("enabled HID with custom keyboard device")
|
presidio-analyzer/tests/test_us_itin_recognizer.py | kubistika/presidio | 1,408 | 11084222 | <filename>presidio-analyzer/tests/test_us_itin_recognizer.py
import pytest
from tests import assert_result_within_score_range
from presidio_analyzer.predefined_recognizers import UsItinRecognizer
@pytest.fixture(scope="module")
def recognizer():
return UsItinRecognizer()
@pytest.fixture(scope="module")
def entities():
return ["US_ITIN"]
@pytest.mark.parametrize(
"text, expected_len, expected_positions, expected_score_ranges",
[
# fmt: off
("911-701234 91170-1234", 2, ((0, 10), (11, 21),), ((0.0, 0.3), (0.0, 0.3),),),
("911701234", 1, ((0, 9),), ((0.3, 0.4),),),
("911-70-1234", 1, ((0, 11),), ((0.5, 0.6),),),
("911-89-1234", 0, (), (),),
("my tax id 911-89-1234", 0, (), (),),
# fmt: on
],
)
def test_when_itin_in_text_then_all_us_itins_found(
text,
expected_len,
expected_positions,
expected_score_ranges,
recognizer,
entities,
max_score,
):
results = recognizer.analyze(text, entities)
assert len(results) == expected_len
for res, (st_pos, fn_pos), (st_score, fn_score) in zip(
results, expected_positions, expected_score_ranges
):
if fn_score == "max":
fn_score = max_score
assert_result_within_score_range(
res, entities[0], st_pos, fn_pos, st_score, fn_score
)
|
misc/generate_site_positions.py | epifluidlab/juicer | 204 | 11084226 | #!/usr/bin/env python
# Generate site positions in genome from given restriction enzyme
# Juicer 1.5
from __future__ import print_function
import sys
import re
def usage():
print('Usage: {} <restriction enzyme> <genome> [location]'.format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
# ------------------------------------------------------------------------------
def process_args(args):
# Genome to filename mappings
#
# You may hardcode filenames belonging to frequently used genomes by inserting
# elements into this dictionary.
filenames = {
'hg19': '/seq/references/Homo_sapiens_assembly19.fasta',
'mm9' : '/seq/references/Mus_musculus_assembly9.fasta',
'mm10': '/seq/references/Mus_musculus_assembly10.fasta',
'hg18': '/seq/references/Homo_sapiens_assembly18.fasta',
}
# Enzyme to search pattern mappings
#
# You may provide your own patterns by inserting elements into this dictionary.
# Multiple patterns are supported in the form of lists. If you enumerate more
# than one patterns for an enzyme, then a match will be reported when at least
# one of them can be found at a given position.
#
# Wildcards:
# N: A, C, G or T
# M: A or C
# R: A or G
# W: A or T
# Y: C or T
# S: C or G
# K: G or T
# H: A, C or T
# B: C, G or T
# V: A, C or G
# D: A, G or T
patterns = {
'HindIII' : 'AAGCTT',
'DpnII' : 'GATC',
'MboI' : 'GATC',
'Sau3AI' : 'GATC',
'Arima' : [ 'GATC', 'GANTC' ],
}
if len(args) != 3 and len(args) != 4:
usage()
enzyme = args[1]
genome = args[2]
inputfile = ''
outputfile = ''
pattern = ''
if len(args) == 4:
inputfile = args[3]
elif genome in filenames:
inputfile = filenames[genome]
else:
print('<genome> not found and [location] not defined', file=sys.stderr)
usage()
if enzyme in patterns:
pattern = patterns[enzyme]
# Convert a simple string to a list.
if not isinstance(pattern, list):
pattern = [ pattern ]
# Make patterns uppercase.
pattern = [ p.upper() for p in pattern ]
else:
print('<restriction enzyme> must be one of {}'.format(list(patterns.keys())), file=sys.stderr)
usage()
outputfile = genome + '_' + enzyme + '.txt'
return {
'enzyme' : enzyme,
'genome' : genome,
'pattern' : pattern,
'inputfile' : inputfile,
'outputfile' : outputfile,
}
# ------------------------------------------------------------------------------
def has_wildcard(pattern):
# Input pattern can be a list or a string.
wildcards = re.compile(r'[NMRWYSKHBVD]')
if (isinstance(pattern, list)):
for p in pattern:
if re.search(wildcards, p):
return True
else:
if re.search(wildcards, pattern):
return True
return False
# ------------------------------------------------------------------------------
def pattern2regexp(pattern):
# Input pattern can be a list or a string.
wildcards = {
'N': '[ACGT]',
'M': '[AC]',
'R': '[AG]',
'W': '[AT]',
'Y': '[CT]',
'S': '[CG]',
'K': '[GT]',
'H': '[ACT]',
'B': '[CGT]',
'V': '[ACG]',
'D': '[AGT]',
}
if isinstance(pattern, list):
return [ pattern2regexp(p) for p in pattern ]
pattern = pattern.upper()
for p, r in wildcards.items():
pattern = re.sub(p, r, pattern)
return re.compile(pattern.upper())
# ------------------------------------------------------------------------------
def get_match_func(pattern):
# Input pattern can be a list or a string.
if not isinstance(pattern, list):
pattern = [ pattern ]
if has_wildcard(pattern):
pattern = pattern2regexp(pattern)
if len(pattern) == 1: # There is only a single pattern.
pattern = pattern[0] # Use the only element from the list as a single regexp.
def match_single_regexp(segment):
if re.match(pattern, segment):
return True
return False
return match_single_regexp
else: # There are multiple patterns.
def match_multi_regexp(segment):
for p in pattern:
if re.match(p, segment):
return True
return False
return match_multi_regexp
else: # No wildcard in any of the patterns.
if len(pattern) == 1: # There is only a single pattern.
pattern = pattern[0] # Use the only element from the list as a single string.
def match_single_string(segment):
if segment.startswith(pattern):
return True
return False
return match_single_string
else: # There are multiple patterns.
def match_multi_string(segment):
for p in pattern:
if segment.startswith(p):
return True
return False
return match_multi_string
# ------------------------------------------------------------------------------
def process_input(params):
f = open(params['inputfile' ], 'r')
g = open(params['outputfile'], 'w')
minsize = min([ len(p) for p in params['pattern'] ])
maxsize = max([ len(p) for p in params['pattern'] ])
matches = get_match_func(params['pattern'])
segment = ''
counter = 0
endl = ''
for line in f:
line = line.strip()
if line.startswith('>'):
# This is the beginning of a new sequence, but before starting it we must
# finish processing of the remaining segment of the previous sequence.
while len(segment) > minsize:
segment = segment[1:]
if matches(segment):
g.write(' ' + str(counter - len(segment) + 1))
if counter > 0:
g.write(' ' + str(counter)) # Close the previous sequence here.
firststr=re.split('\s+',line[1:])
g.write(endl+firststr[0])
segment = ''
counter = 0
endl = '\n'
continue
# Process next line of the sequence.
line = line.upper()
for symbol in line:
counter += 1
segment += symbol
while len(segment) > maxsize:
segment = segment[1:]
# Do pattern matching only if segment size equals maxsize.
if len(segment) == maxsize:
if matches(segment):
g.write(' ' + str(counter - maxsize + 1)) # maxsize == len(segment)
# Finish the last sequence.
while len(segment) > minsize:
segment = segment[1:]
if matches(segment):
g.write(' ' + str(counter - len(segment) + 1))
if counter > 0:
g.write(' ' + str(counter))
g.write('\n') # End the output file with a newline.
# Close files.
g.close()
f.close()
# ------------------------------------------------------------------------------
params = process_args(sys.argv)
process_input(params)
|
python/py-pil/files/patch-setup.py | davidlrichmond/macports-ports | 1,199 | 11084235 | <gh_stars>1000+
--- setup.py.orig 2006-12-03 04:37:29.000000000 -0700
+++ setup.py 2009-09-18 19:58:45.000000000 -0600
@@ -33,12 +33,12 @@
#
# TIFF_ROOT = libinclude("/opt/tiff")
-TCL_ROOT = None
-JPEG_ROOT = None
-ZLIB_ROOT = None
-TIFF_ROOT = None
-FREETYPE_ROOT = None
-LCMS_ROOT = None
+TCL_ROOT = "__PREFIX__/lib", "__PREFIX__/include"
+JPEG_ROOT = "__PREFIX__/lib", "__PREFIX__/include"
+ZLIB_ROOT = "__PREFIX__/lib", "__PREFIX__/include"
+TIFF_ROOT = "__PREFIX__/lib", "__PREFIX__/include"
+FREETYPE_ROOT = "__PREFIX__/lib/", "__PREFIX__/include/freetype2/"
+LCMS_ROOT = "__PREFIX__/lib", "__PREFIX__/include"
# FIXME: add mechanism to explicitly *disable* the use of a library
@@ -331,30 +331,7 @@
"_imagingcms", ["_imagingcms.c"], libraries=["lcms"] + extra
))
- if sys.platform == "darwin":
- # locate Tcl/Tk frameworks
- frameworks = []
- framework_roots = [
- "/Library/Frameworks",
- "/System/Library/Frameworks"
- ]
- for root in framework_roots:
- if (os.path.exists(os.path.join(root, "Tcl.framework")) and
- os.path.exists(os.path.join(root, "Tk.framework"))):
- print "--- using frameworks at", root
- frameworks = ["-framework", "Tcl", "-framework", "Tk"]
- dir = os.path.join(root, "Tcl.framework", "Headers")
- add_directory(self.compiler.include_dirs, dir, 0)
- dir = os.path.join(root, "Tk.framework", "Headers")
- add_directory(self.compiler.include_dirs, dir, 1)
- break
- if frameworks:
- exts.append(Extension(
- "_imagingtk", ["_imagingtk.c", "Tk/tkImaging.c"],
- extra_compile_args=frameworks, extra_link_args=frameworks
- ))
- feature.tcl = feature.tk = 1 # mark as present
- elif feature.tcl and feature.tk:
+ if feature.tcl and feature.tk:
exts.append(Extension(
"_imagingtk", ["_imagingtk.c", "Tk/tkImaging.c"],
libraries=[feature.tcl, feature.tk]
|
kik_unofficial/protobuf/messagepath/v1/adaptive_cards_pb2.py | 3dik/kik-bot-api-unofficial | 120 | 11084262 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: messagepath/v1/adaptive_cards.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import kik_unofficial.protobuf.protobuf_validation_pb2 as protobuf__validation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='messagepath/v1/adaptive_cards.proto',
package='common.messagepath.v1',
syntax='proto3',
serialized_pb=_b('\n#messagepath/v1/adaptive_cards.proto\x12\x15\x63ommon.messagepath.v1\x1a\x19protobuf_validation.proto\"f\n\x16\x41\x64\x61ptiveCardAttachment\x12\x1b\n\x07\x63\x61rd_id\x18\x01 \x01(\tB\x08\xca\x9d%\x04(\x01\x30@H\x00\x12$\n\x0f\x63\x61rd_definition\x18\x02 \x01(\tB\t\xca\x9d%\x05(\x02\x30\x90NH\x00\x42\t\n\x07\x63ontentBz\n\x19\x63om.kik.messagepath.modelZVgithub.com/kikinteractive/xiphias-model-common/generated/go/messagepath/v1;messagepath\xa2\x02\x04MPTHb\x06proto3')
,
dependencies=[protobuf__validation__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ADAPTIVECARDATTACHMENT = _descriptor.Descriptor(
name='AdaptiveCardAttachment',
full_name='common.messagepath.v1.AdaptiveCardAttachment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='card_id', full_name='common.messagepath.v1.AdaptiveCardAttachment.card_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\004(\0010@'))),
_descriptor.FieldDescriptor(
name='card_definition', full_name='common.messagepath.v1.AdaptiveCardAttachment.card_definition', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\005(\0020\220N'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='content', full_name='common.messagepath.v1.AdaptiveCardAttachment.content',
index=0, containing_type=None, fields=[]),
],
serialized_start=89,
serialized_end=191,
)
_ADAPTIVECARDATTACHMENT.oneofs_by_name['content'].fields.append(
_ADAPTIVECARDATTACHMENT.fields_by_name['card_id'])
_ADAPTIVECARDATTACHMENT.fields_by_name['card_id'].containing_oneof = _ADAPTIVECARDATTACHMENT.oneofs_by_name['content']
_ADAPTIVECARDATTACHMENT.oneofs_by_name['content'].fields.append(
_ADAPTIVECARDATTACHMENT.fields_by_name['card_definition'])
_ADAPTIVECARDATTACHMENT.fields_by_name['card_definition'].containing_oneof = _ADAPTIVECARDATTACHMENT.oneofs_by_name['content']
DESCRIPTOR.message_types_by_name['AdaptiveCardAttachment'] = _ADAPTIVECARDATTACHMENT
AdaptiveCardAttachment = _reflection.GeneratedProtocolMessageType('AdaptiveCardAttachment', (_message.Message,), dict(
DESCRIPTOR = _ADAPTIVECARDATTACHMENT,
__module__ = 'messagepath.v1.adaptive_cards_pb2'
# @@protoc_insertion_point(class_scope:common.messagepath.v1.AdaptiveCardAttachment)
))
_sym_db.RegisterMessage(AdaptiveCardAttachment)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\031com.kik.messagepath.modelZVgithub.com/kikinteractive/xiphias-model-common/generated/go/messagepath/v1;messagepath\242\002\004MPTH'))
_ADAPTIVECARDATTACHMENT.fields_by_name['card_id'].has_options = True
_ADAPTIVECARDATTACHMENT.fields_by_name['card_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\004(\0010@'))
_ADAPTIVECARDATTACHMENT.fields_by_name['card_definition'].has_options = True
_ADAPTIVECARDATTACHMENT.fields_by_name['card_definition']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\005(\0020\220N'))
# @@protoc_insertion_point(module_scope)
|
Alignment/MuonAlignment/python/MCScenario_CRAFT1_22X.py | ckamtsikis/cmssw | 852 | 11084270 | <reponame>ckamtsikis/cmssw<filename>Alignment/MuonAlignment/python/MCScenario_CRAFT1_22X.py
# To use this script:
# it's an ordinary Python script, not a CMSSW configuration file (though it writes and runs CMSSW configuration files)
# * you MUST have an inertGlobalPositionRcd.db in your working directory
# * you MUST NOT have an MCScenario_CRAFT1_22X.db
#
# to make the inertGlobalPositionRcd:
# cmsRun Alignment/MuonAlignment/python/makeGlobalPositionRcd_cfg.py
#
# to get rid of the MCScenario_CRAFT1_22X.db:
# rm MCScenario_CRAFT1_22X.db (naturally)
#
# to run this script:
# python MCScenario_CRAFT1_22X.py
#
# it will create
# * MCScenario_CRAFT1_22X.xml the XML file with randomly-distributed values, created directly by define_scenario()
# * convert_cfg.py the conversion configuration file
# * MCScenario_CRAFT1_22X.db the SQLite database created from the XML
# * check_cfg.py configuration file that converts the SQLite file back into XML
# * MCScenario_CRAFT1_22X_CHECKME.xml converted back, so that we can check the values that were saved to the database
#
# to check the output in Excel, do this
# ./Alignment/MuonAlignment/python/geometryXMLtoCSV.py < MCScenario_CRAFT1_22X_CHECKME.xml > MCScenario_CRAFT1_22X_CHECKME.csv
# and then open MCScenario_CRAFT1_22X_CHECKME.csv in Excel
from builtins import range
import random, os
from math import *
# set the initial seed for reproducibility!
random.seed(123456)
#### called once at the end of this script
def make_scenario_sqlite():
scenario = define_scenario()
write_xml(scenario, "MCScenario_CRAFT1_22X.xml")
write_conversion_cfg("convert_cfg.py", "MCScenario_CRAFT1_22X.xml", "MCScenario_CRAFT1_22X.db")
cmsRun("convert_cfg.py")
write_check_cfg("check_cfg.py", "MCScenario_CRAFT1_22X.db", "MCScenario_CRAFT1_22X_CHECKME.xml")
cmsRun("check_cfg.py")
#### that's it! everything this uses is defined below
def write_conversion_cfg(fileName, xmlFileName, dbFileName):
outfile = file(fileName, "w")
outfile.write("""
from Alignment.MuonAlignment.convertXMLtoSQLite_cfg import *
process.MuonGeometryDBConverter.fileName = "%(xmlFileName)s"
process.PoolDBOutputService.connect = "sqlite_file:%(dbFileName)s"
""" % vars())
def write_check_cfg(fileName, dbFileName, xmlFileName):
outfile = file(fileName, "w")
outfile.write("""
from Alignment.MuonAlignment.convertSQLitetoXML_cfg import *
process.PoolDBESSource.connect = "sqlite_file:%(dbFileName)s"
process.MuonGeometryDBConverter.outputXML.fileName = "%(xmlFileName)s"
process.MuonGeometryDBConverter.outputXML.relativeto = "ideal"
process.MuonGeometryDBConverter.outputXML.suppressDTChambers = False
process.MuonGeometryDBConverter.outputXML.suppressDTSuperLayers = False
process.MuonGeometryDBConverter.outputXML.suppressDTLayers = True
process.MuonGeometryDBConverter.outputXML.suppressCSCChambers = False
process.MuonGeometryDBConverter.outputXML.suppressCSCLayers = False
""" % vars())
def cmsRun(fileName):
os.system("cmsRun %(fileName)s" % vars())
########### writing a scenario in XML ##############################################################
# only needed to make the output XML readable
DTpreferred_order = {"wheel":1, "station":2, "sector":3, "superlayer":4, "layer":5}
CSCpreferred_order = {"endcap":1, "station":2, "ring":3, "chamber":4, "layer":5}
def DTsorter(a, b): return cmp(DTpreferred_order[a], DTpreferred_order[b])
def CSCsorter(a, b): return cmp(CSCpreferred_order[a], CSCpreferred_order[b])
# an instance of this class corresponds to one <DTChamber ... /> or <CSCStation ... />, etc.
class Alignable:
def __init__(self, alignabletype, **location):
self.alignabletype = alignabletype
self.location = location
def writeXML(self):
parameters = self.location.keys()
if self.alignabletype[0:2] == "DT":
parameters.sort(DTsorter)
else:
parameters.sort(CSCsorter)
output = ["<", self.alignabletype, " "]
for parameter in parameters:
output.extend([parameter, "=\"", str(self.location[parameter]), "\" "])
output.append("/>")
return "".join(output)
preferred_order = {"x":1, "y":2, "z":3, "phix":4, "phiy":5, "phiz":6}
def sorter(a, b): return cmp(preferred_order[a], preferred_order[b])
# an instance of this class corresponds to one <setposition ... />
class Position:
def __init__(self, **location):
self.location = location
def writeXML(self):
parameters = self.location.keys()
parameters.sort(sorter)
output = ["<setposition relativeto=\"ideal\" "]
for parameter in parameters:
output.extend([parameter, "=\"", str(self.location[parameter]), "\" "])
output.append("/>")
return "".join(output)
# an instance of this class corresponds to one <operation> ... </operation> in the XML file
class Operation:
def __init__(self, alignable, position):
self.alignable = alignable
self.position = position
def writeXML(self):
output = ["<operation> ", self.alignable.writeXML(), " ", self.position.writeXML(), " </operation>\n"]
return "".join(output)
def write_xml(scenario, fileName):
# a scenario is an ordered list of Operations
XMLlist = ["<MuonAlignment>\n"]
for operation in scenario:
XMLlist.append(operation.writeXML())
XMLlist.append("</MuonAlignment>\n")
XMLstring = "".join(XMLlist)
outfile = file(fileName, "w")
outfile.write(XMLstring)
class DTChamber:
def __init__(self, **location):
self.__dict__.update(location)
class CSCChamber:
def __init__(self, **location):
self.__dict__.update(location)
########### defining the actual scenario ##############################################################
# this is the interesting part: where we define a scenario for CRAFT1 MC
def define_scenario():
# this will be a list of operations to write to an XML file
scenario = []
# Uncertainty in DT chamber positions comes in two parts:
# 1. positions within sectors
# 2. positions of the sector-groups
# Aligned chambers (wheels -1, 0, +1 except sectors 1 and 7)
# uncertainty within sectors:
# x: 0.08 cm (from segment-matching) phix: 0.0007 rad (from MC)
# y: 0.10 cm (from MC) phiy: 0.0007 rad (from segment-matching)
# z: 0.10 cm (from MC) phiz: 0.0003 rad (from MC)
# uncertainty of sector-groups (depends on choice of pT cut, not well understood):
# x: 0.05 cm
# Unaligned chambers uncertainty within sectors:
# x: 0.08 cm (same as above) phix: 0.0016 rad
# y: 0.24 cm phiy: 0.0021 rad
# z: 0.42 cm with a -0.35 cm bias phiz: 0.0010 rad
# uncertainty of sector-groups:
# x: 0.65 cm
# These come from actual alignments measured in the aligned
# chambers (we assume that the unaligned chambers have
# misalignments on the same scale)
# Also, superlayer z uncertainty is 0.054 cm
# Before starting, let's build a list of chambers
DTchambers = []
for wheel in -2, -1, 0, 1, 2:
for station in 1, 2, 3, 4:
if station == 4: nsectors = 14
else: nsectors = 12
for sector in range(1, nsectors+1):
DTchambers.append(DTChamber(wheel = wheel, station = station, sector = sector))
# the superlayers
for dtchamber in DTchambers:
for superlayer in 1, 2, 3:
if superlayer == 2 and dtchamber.station == 4: continue
alignable = Alignable("DTSuperLayer", wheel = dtchamber.wheel, station = dtchamber.station, sector = dtchamber.sector, superlayer = superlayer)
position = Position(x = 0, y = 0, z = random.gauss(0, 0.054), phix = 0, phiy = 0, phiz = 0)
scenario.append(Operation(alignable, position))
sector_errx = {}
# sector-groups for aligned chambers:
for wheel in -1, 0, 1:
for sector in 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14:
sector_errx[wheel, sector] = random.gauss(0., 0.05)
# sector-groups for unaligned chambers:
for wheel in -1, 0, 1:
for sector in 1, 7:
sector_errx[wheel, sector] = random.gauss(0., 0.65)
for wheel in -2, 2:
for sector in 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14:
sector_errx[wheel, sector] = random.gauss(0., 0.65)
for dtchamber in DTchambers:
# within sectors for aligned chambers:
if dtchamber.wheel in (-1, 0, 1) and dtchamber.sector in (2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14):
errx = random.gauss(0, 0.08)
erry = random.gauss(0, 0.10)
errz = random.gauss(0, 0.10)
errphix = random.gauss(0, 0.0007)
errphiy = random.gauss(0, 0.0007)
errphiz = random.gauss(0, 0.0003)
# within sectors for unaligned chambers:
else:
errx = random.gauss(0, 0.08)
erry = random.gauss(0, 0.24)
errz = random.gauss(-0.35, 0.42)
errphix = random.gauss(0, 0.0016)
errphiy = random.gauss(0, 0.0021)
errphiz = random.gauss(0, 0.0010)
errx += sector_errx[dtchamber.wheel, dtchamber.sector]
# now turn this into an operation
alignable = Alignable("DTChamber", wheel = dtchamber.wheel, station = dtchamber.station, sector = dtchamber.sector)
position = Position(x = errx, y = erry, z = errz, phix = errphix, phiy = errphiy, phiz = errphiz)
scenario.append(Operation(alignable, position))
# Uncertainty in CSC chamber positions comes in 5 parts:
# 1. 0.0092 cm layer x misalignments observed with beam-halo tracks
# 2. isotropic photogrammetry uncertainty of 0.03 cm (x, y, z) and 0.00015 rad in phiz
# 3. 0.0023 rad phiy misalignment observed with beam-halo tracks
# 4. 0.1438 cm z and 0.00057 rad phix uncertainty between rings from SLM (from comparison in 0T data with PG)
# 5. 0.05 cm (x, y, z) disk misalignments and 0.0001 rad rotation around beamline
# Before starting, let's build a list of chambers
CSCchambers = []
for endcap in 1, 2:
for station, ring in (1, 1), (1, 2), (1, 3), (1, 4), (2, 1), (2, 2), (3, 1), (3, 2), (4, 1):
if station > 1 and ring == 1:
nchambers = 18
else:
nchambers = 36
for chamber in range(1, nchambers+1):
CSCchambers.append(CSCChamber(endcap = endcap, station = station, ring = ring, chamber = chamber))
# First, the layer uncertainties: x only for simplicity, observed 0.0092 cm in overlaps alignment test
for chamber in CSCchambers:
for layer in 1, 2, 3, 4, 5, 6:
alignable = Alignable("CSCLayer", endcap = chamber.endcap, station = chamber.station, ring = chamber.ring, chamber = chamber.chamber, layer = layer)
position = Position(x = random.gauss(0, 0.0092), y = 0, z = 0, phix = 0, phiy = 0, phiz = 0)
scenario.append(Operation(alignable, position))
# Next, the ring errors from DCOPS (derived from comparison with photogrammetry)
CSCrings = []
for endcap in 1, 2:
for station, ring in (1, 1), (1, 2), (1, 3), (1, 4), (2, 1), (2, 2), (3, 1), (3, 2), (4, 1):
CSCrings.append(CSCChamber(endcap = endcap, station = station, ring = ring, z = random.gauss(0, 0.1438), phix = random.gauss(0, 0.00057)))
# Next, the chamber errors
for chamber in CSCchambers:
errx = random.gauss(0, 0.03)
erry = random.gauss(0, 0.03)
errz = random.gauss(0, 0.03)
errphix = random.gauss(0, 0.00057)
errphiy = random.gauss(0, 0.0023)
errphiz = random.gauss(0, 0.00015)
for ring in CSCrings:
if ring.endcap == chamber.endcap and ring.station == chamber.station and ring.ring == chamber.ring:
errz += ring.z
errphix += ring.phix
break
alignable = Alignable("CSCChamber", endcap = chamber.endcap, station = chamber.station, ring = chamber.ring, chamber = chamber.chamber)
position = Position(x = errx, y = erry, z = errz, phix = errphix, phiy = errphiy, phiz = errphiz)
scenario.append(Operation(alignable, position))
# Finally, the disk errors
for endcap in 1, 2:
for station in 1, 2, 3, 4:
alignable = Alignable("CSCStation", endcap = endcap, station = station)
position = Position(x = random.gauss(0, 0.05), y = random.gauss(0, 0.05), z = random.gauss(0, 0.05), phix = 0., phiy = 0., phiz = random.gauss(0, 0.0001))
scenario.append(Operation(alignable, position))
return scenario
# run it all!
make_scenario_sqlite()
|
DQMServices/Components/python/DQMMessageLoggerClient_cfi.py | ckamtsikis/cmssw | 852 | 11084285 | import FWCore.ParameterSet.Config as cms
DQMMessageLoggerClient = cms.EDAnalyzer ("DQMMessageLoggerClient",
Directory = cms.string("MessageLogger")
)
|
env/lib/python3.8/site-packages/plotly/validators/layout/mapbox/__init__.py | acrucetta/Chicago_COVI_WebApp | 11,750 | 11084288 | <reponame>acrucetta/Chicago_COVI_WebApp
import sys
if sys.version_info < (3, 7):
from ._zoom import ZoomValidator
from ._uirevision import UirevisionValidator
from ._style import StyleValidator
from ._pitch import PitchValidator
from ._layerdefaults import LayerdefaultsValidator
from ._layers import LayersValidator
from ._domain import DomainValidator
from ._center import CenterValidator
from ._bearing import BearingValidator
from ._accesstoken import AccesstokenValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._zoom.ZoomValidator",
"._uirevision.UirevisionValidator",
"._style.StyleValidator",
"._pitch.PitchValidator",
"._layerdefaults.LayerdefaultsValidator",
"._layers.LayersValidator",
"._domain.DomainValidator",
"._center.CenterValidator",
"._bearing.BearingValidator",
"._accesstoken.AccesstokenValidator",
],
)
|
integration/rpc/controller/controller_pb2_grpc.py | meldafrawi/longhorn-engine | 160 | 11084308 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import controller_pb2 as controller__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class ControllerServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.VolumeGet = channel.unary_unary(
'/ptypes.ControllerService/VolumeGet',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=controller__pb2.Volume.FromString,
)
self.VolumeStart = channel.unary_unary(
'/ptypes.ControllerService/VolumeStart',
request_serializer=controller__pb2.VolumeStartRequest.SerializeToString,
response_deserializer=controller__pb2.Volume.FromString,
)
self.VolumeShutdown = channel.unary_unary(
'/ptypes.ControllerService/VolumeShutdown',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=controller__pb2.Volume.FromString,
)
self.VolumeSnapshot = channel.unary_unary(
'/ptypes.ControllerService/VolumeSnapshot',
request_serializer=controller__pb2.VolumeSnapshotRequest.SerializeToString,
response_deserializer=controller__pb2.VolumeSnapshotReply.FromString,
)
self.VolumeRevert = channel.unary_unary(
'/ptypes.ControllerService/VolumeRevert',
request_serializer=controller__pb2.VolumeRevertRequest.SerializeToString,
response_deserializer=controller__pb2.Volume.FromString,
)
self.VolumeExpand = channel.unary_unary(
'/ptypes.ControllerService/VolumeExpand',
request_serializer=controller__pb2.VolumeExpandRequest.SerializeToString,
response_deserializer=controller__pb2.Volume.FromString,
)
self.VolumeFrontendStart = channel.unary_unary(
'/ptypes.ControllerService/VolumeFrontendStart',
request_serializer=controller__pb2.VolumeFrontendStartRequest.SerializeToString,
response_deserializer=controller__pb2.Volume.FromString,
)
self.VolumeFrontendShutdown = channel.unary_unary(
'/ptypes.ControllerService/VolumeFrontendShutdown',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=controller__pb2.Volume.FromString,
)
self.ReplicaList = channel.unary_unary(
'/ptypes.ControllerService/ReplicaList',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=controller__pb2.ReplicaListReply.FromString,
)
self.ReplicaGet = channel.unary_unary(
'/ptypes.ControllerService/ReplicaGet',
request_serializer=controller__pb2.ReplicaAddress.SerializeToString,
response_deserializer=controller__pb2.ControllerReplica.FromString,
)
self.ControllerReplicaCreate = channel.unary_unary(
'/ptypes.ControllerService/ControllerReplicaCreate',
request_serializer=controller__pb2.ControllerReplicaCreateRequest.SerializeToString,
response_deserializer=controller__pb2.ControllerReplica.FromString,
)
self.ReplicaDelete = channel.unary_unary(
'/ptypes.ControllerService/ReplicaDelete',
request_serializer=controller__pb2.ReplicaAddress.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ReplicaUpdate = channel.unary_unary(
'/ptypes.ControllerService/ReplicaUpdate',
request_serializer=controller__pb2.ControllerReplica.SerializeToString,
response_deserializer=controller__pb2.ControllerReplica.FromString,
)
self.ReplicaPrepareRebuild = channel.unary_unary(
'/ptypes.ControllerService/ReplicaPrepareRebuild',
request_serializer=controller__pb2.ReplicaAddress.SerializeToString,
response_deserializer=controller__pb2.ReplicaPrepareRebuildReply.FromString,
)
self.ReplicaVerifyRebuild = channel.unary_unary(
'/ptypes.ControllerService/ReplicaVerifyRebuild',
request_serializer=controller__pb2.ReplicaAddress.SerializeToString,
response_deserializer=controller__pb2.ControllerReplica.FromString,
)
self.JournalList = channel.unary_unary(
'/ptypes.ControllerService/JournalList',
request_serializer=controller__pb2.JournalListRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.VersionDetailGet = channel.unary_unary(
'/ptypes.ControllerService/VersionDetailGet',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=controller__pb2.VersionDetailGetReply.FromString,
)
self.MetricGet = channel.unary_stream(
'/ptypes.ControllerService/MetricGet',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=controller__pb2.MetricGetReply.FromString,
)
class ControllerServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def VolumeGet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def VolumeStart(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def VolumeShutdown(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def VolumeSnapshot(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def VolumeRevert(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def VolumeExpand(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def VolumeFrontendStart(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def VolumeFrontendShutdown(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReplicaList(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReplicaGet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ControllerReplicaCreate(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReplicaDelete(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReplicaUpdate(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReplicaPrepareRebuild(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReplicaVerifyRebuild(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def JournalList(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def VersionDetailGet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MetricGet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ControllerServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'VolumeGet': grpc.unary_unary_rpc_method_handler(
servicer.VolumeGet,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=controller__pb2.Volume.SerializeToString,
),
'VolumeStart': grpc.unary_unary_rpc_method_handler(
servicer.VolumeStart,
request_deserializer=controller__pb2.VolumeStartRequest.FromString,
response_serializer=controller__pb2.Volume.SerializeToString,
),
'VolumeShutdown': grpc.unary_unary_rpc_method_handler(
servicer.VolumeShutdown,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=controller__pb2.Volume.SerializeToString,
),
'VolumeSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.VolumeSnapshot,
request_deserializer=controller__pb2.VolumeSnapshotRequest.FromString,
response_serializer=controller__pb2.VolumeSnapshotReply.SerializeToString,
),
'VolumeRevert': grpc.unary_unary_rpc_method_handler(
servicer.VolumeRevert,
request_deserializer=controller__pb2.VolumeRevertRequest.FromString,
response_serializer=controller__pb2.Volume.SerializeToString,
),
'VolumeExpand': grpc.unary_unary_rpc_method_handler(
servicer.VolumeExpand,
request_deserializer=controller__pb2.VolumeExpandRequest.FromString,
response_serializer=controller__pb2.Volume.SerializeToString,
),
'VolumeFrontendStart': grpc.unary_unary_rpc_method_handler(
servicer.VolumeFrontendStart,
request_deserializer=controller__pb2.VolumeFrontendStartRequest.FromString,
response_serializer=controller__pb2.Volume.SerializeToString,
),
'VolumeFrontendShutdown': grpc.unary_unary_rpc_method_handler(
servicer.VolumeFrontendShutdown,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=controller__pb2.Volume.SerializeToString,
),
'ReplicaList': grpc.unary_unary_rpc_method_handler(
servicer.ReplicaList,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=controller__pb2.ReplicaListReply.SerializeToString,
),
'ReplicaGet': grpc.unary_unary_rpc_method_handler(
servicer.ReplicaGet,
request_deserializer=controller__pb2.ReplicaAddress.FromString,
response_serializer=controller__pb2.ControllerReplica.SerializeToString,
),
'ControllerReplicaCreate': grpc.unary_unary_rpc_method_handler(
servicer.ControllerReplicaCreate,
request_deserializer=controller__pb2.ControllerReplicaCreateRequest.FromString,
response_serializer=controller__pb2.ControllerReplica.SerializeToString,
),
'ReplicaDelete': grpc.unary_unary_rpc_method_handler(
servicer.ReplicaDelete,
request_deserializer=controller__pb2.ReplicaAddress.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ReplicaUpdate': grpc.unary_unary_rpc_method_handler(
servicer.ReplicaUpdate,
request_deserializer=controller__pb2.ControllerReplica.FromString,
response_serializer=controller__pb2.ControllerReplica.SerializeToString,
),
'ReplicaPrepareRebuild': grpc.unary_unary_rpc_method_handler(
servicer.ReplicaPrepareRebuild,
request_deserializer=controller__pb2.ReplicaAddress.FromString,
response_serializer=controller__pb2.ReplicaPrepareRebuildReply.SerializeToString,
),
'ReplicaVerifyRebuild': grpc.unary_unary_rpc_method_handler(
servicer.ReplicaVerifyRebuild,
request_deserializer=controller__pb2.ReplicaAddress.FromString,
response_serializer=controller__pb2.ControllerReplica.SerializeToString,
),
'JournalList': grpc.unary_unary_rpc_method_handler(
servicer.JournalList,
request_deserializer=controller__pb2.JournalListRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'VersionDetailGet': grpc.unary_unary_rpc_method_handler(
servicer.VersionDetailGet,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=controller__pb2.VersionDetailGetReply.SerializeToString,
),
'MetricGet': grpc.unary_stream_rpc_method_handler(
servicer.MetricGet,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=controller__pb2.MetricGetReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'ptypes.ControllerService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
vimfiles/bundle/vim-python/submodules/pylint/tests/benchmark/test_baseline_benchmarks.py | ciskoinch8/vimrc | 463 | 11084352 | <filename>vimfiles/bundle/vim-python/submodules/pylint/tests/benchmark/test_baseline_benchmarks.py<gh_stars>100-1000
""" Profiles basic -jX functionality """
# Copyright (c) 2020-2021 <NAME> <<EMAIL>>
# Copyright (c) 2020 hippo91 <<EMAIL>>
# Copyright (c) 2020 <NAME> <<EMAIL>>
# Copyright (c) 2020 <NAME> <<EMAIL>>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
# pylint: disable=protected-access,missing-function-docstring,no-self-use
import os
import pprint
import time
from unittest.mock import patch
import pytest
import pylint.interfaces
from pylint.checkers.base_checker import BaseChecker
from pylint.lint import PyLinter, Run, check_parallel
from pylint.testutils import GenericTestReporter as Reporter
from pylint.utils import register_plugins
def _empty_filepath():
return os.path.abspath(
os.path.join(
os.path.dirname(__file__), "..", "input", "benchmark_minimal_file.py"
)
)
class SleepingChecker(BaseChecker):
"""A checker that sleeps, the wall-clock time should reduce as we add workers
As we apply a roughly constant amount of "work" in this checker any variance is
likely to be caused by the pylint system."""
__implements__ = (pylint.interfaces.IRawChecker,)
name = "sleeper"
msgs = {
"R9999": (
"Test",
"test-check",
"Some helpful text.",
)
}
sleep_duration = 0.5 # the time to pretend we're doing work for
def process_module(self, _astroid):
"""Sleeps for `sleep_duration` on each call
This effectively means each file costs ~`sleep_duration`+framework overhead"""
time.sleep(self.sleep_duration)
class SleepingCheckerLong(BaseChecker):
"""A checker that sleeps, the wall-clock time should reduce as we add workers
As we apply a roughly constant amount of "work" in this checker any variance is
likely to be caused by the pylint system."""
__implements__ = (pylint.interfaces.IRawChecker,)
name = "long-sleeper"
msgs = {
"R9999": (
"Test",
"test-check",
"Some helpful text.",
)
}
sleep_duration = 0.5 # the time to pretend we're doing work for
def process_module(self, _astroid):
"""Sleeps for `sleep_duration` on each call
This effectively means each file costs ~`sleep_duration`+framework overhead"""
time.sleep(self.sleep_duration)
class NoWorkChecker(BaseChecker):
""" A checker that sleeps, the wall-clock time should change as we add threads """
__implements__ = (pylint.interfaces.IRawChecker,)
name = "sleeper"
msgs = {
"R9999": (
"Test",
"test-check",
"Some helpful text.",
)
}
def process_module(self, _astroid):
pass
@pytest.mark.benchmark(
group="baseline",
)
class TestEstablishBaselineBenchmarks:
"""Naive benchmarks for the high-level pylint framework
Because this benchmarks the fundemental and common parts and changes seen here will
impact everything else"""
empty_filepath = _empty_filepath()
empty_file_info = (
"name-emptyfile-file",
_empty_filepath(),
"modname-emptyfile-mod",
)
lot_of_files = 500
def test_baseline_benchmark_j1(self, benchmark):
"""Establish a baseline of pylint performance with no work
We will add extra Checkers in other benchmarks.
Because this is so simple, if this regresses something very serious has happened
"""
linter = PyLinter(reporter=Reporter())
fileinfos = [self.empty_filepath] # Single file to end-to-end the system
assert linter.config.jobs == 1
assert len(linter._checkers) == 1, "Should just have 'master'"
benchmark(linter.check, fileinfos)
assert (
linter.msg_status == 0
), "Expected no errors to be thrown: %s" % pprint.pformat(
linter.reporter.messages
)
def test_baseline_benchmark_j10(self, benchmark):
"""Establish a baseline of pylint performance with no work across threads
Same as `test_baseline_benchmark_j1` but we use -j10 with 10 fake files to
ensure end-to-end-system invoked.
Because this is also so simple, if this regresses something very serious has
happened.
"""
linter = PyLinter(reporter=Reporter())
linter.config.jobs = 10
# Create file per worker, using all workers
fileinfos = [self.empty_filepath for _ in range(linter.config.jobs)]
assert linter.config.jobs == 10
assert len(linter._checkers) == 1, "Should have 'master'"
benchmark(linter.check, fileinfos)
assert (
linter.msg_status == 0
), "Expected no errors to be thrown: %s" % pprint.pformat(
linter.reporter.messages
)
def test_baseline_benchmark_check_parallel_j10(self, benchmark):
""" Should demonstrate times very close to `test_baseline_benchmark_j10` """
linter = PyLinter(reporter=Reporter())
# Create file per worker, using all workers
fileinfos = [self.empty_file_info for _ in range(linter.config.jobs)]
assert len(linter._checkers) == 1, "Should have 'master'"
benchmark(check_parallel, linter, jobs=10, files=fileinfos)
assert (
linter.msg_status == 0
), "Expected no errors to be thrown: %s" % pprint.pformat(
linter.reporter.messages
)
def test_baseline_lots_of_files_j1(self, benchmark):
"""Establish a baseline with only 'master' checker being run in -j1
We do not register any checkers except the default 'master', so the cost is just
that of the system with a lot of files registerd"""
if benchmark.disabled:
benchmark(print, "skipping, only benchmark large file counts")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
linter.config.jobs = 1
fileinfos = [self.empty_filepath for _ in range(self.lot_of_files)]
assert linter.config.jobs == 1
assert len(linter._checkers) == 1, "Should have 'master'"
benchmark(linter.check, fileinfos)
assert (
linter.msg_status == 0
), "Expected no errors to be thrown: %s" % pprint.pformat(
linter.reporter.messages
)
def test_baseline_lots_of_files_j10(self, benchmark):
"""Establish a baseline with only 'master' checker being run in -j10
As with the -j1 variant above `test_baseline_lots_of_files_j1`, we do not
register any checkers except the default 'master', so the cost is just that of
the check_parallel system across 10 workers, plus the overhead of PyLinter"""
if benchmark.disabled:
benchmark(print, "skipping, only benchmark large file counts")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
linter.config.jobs = 10
fileinfos = [self.empty_filepath for _ in range(self.lot_of_files)]
assert linter.config.jobs == 10
assert len(linter._checkers) == 1, "Should have 'master'"
benchmark(linter.check, fileinfos)
assert (
linter.msg_status == 0
), "Expected no errors to be thrown: %s" % pprint.pformat(
linter.reporter.messages
)
def test_baseline_lots_of_files_j1_empty_checker(self, benchmark):
"""Baselines pylint for a single extra checker being run in -j1, for N-files
We use a checker that does no work, so the cost is just that of the system at
scale"""
if benchmark.disabled:
benchmark(print, "skipping, only benchmark large file counts")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
linter.config.jobs = 1
linter.register_checker(NoWorkChecker(linter))
fileinfos = [self.empty_filepath for _ in range(self.lot_of_files)]
assert linter.config.jobs == 1
assert len(linter._checkers) == 2, "Should have 'master' and 'sleeper'"
benchmark(linter.check, fileinfos)
assert (
linter.msg_status == 0
), "Expected no errors to be thrown: %s" % pprint.pformat(
linter.reporter.messages
)
def test_baseline_lots_of_files_j10_empty_checker(self, benchmark):
"""Baselines pylint for a single extra checker being run in -j10, for N-files
We use a checker that does no work, so the cost is just that of the system at
scale, across workers"""
if benchmark.disabled:
benchmark(print, "skipping, only benchmark large file counts")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
linter.config.jobs = 10
linter.register_checker(NoWorkChecker(linter))
fileinfos = [self.empty_filepath for _ in range(self.lot_of_files)]
assert linter.config.jobs == 10
assert len(linter._checkers) == 2, "Should have 'master' and 'sleeper'"
benchmark(linter.check, fileinfos)
assert (
linter.msg_status == 0
), "Expected no errors to be thrown: %s" % pprint.pformat(
linter.reporter.messages
)
def test_baseline_benchmark_j1_single_working_checker(self, benchmark):
"""Establish a baseline of single-worker performance for PyLinter
Here we mimick a single Checker that does some work so that we can see the
impact of running a simple system with -j1 against the same system with -j10.
We expect this benchmark to take very close to
`numfiles*SleepingChecker.sleep_duration`"""
if benchmark.disabled:
benchmark(print, "skipping, do not want to sleep in main tests")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
linter.register_checker(SleepingChecker(linter))
# Check the same number of files as
# `test_baseline_benchmark_j10_single_working_checker`
fileinfos = [self.empty_filepath for _ in range(10)]
assert linter.config.jobs == 1
assert len(linter._checkers) == 2, "Should have 'master' and 'sleeper'"
benchmark(linter.check, fileinfos)
assert (
linter.msg_status == 0
), "Expected no errors to be thrown: %s" % pprint.pformat(
linter.reporter.messages
)
def test_baseline_benchmark_j10_single_working_checker(self, benchmark):
"""Establishes baseline of multi-worker performance for PyLinter/check_parallel
We expect this benchmark to take less time that test_baseline_benchmark_j1,
`error_margin*(1/J)*(numfiles*SleepingChecker.sleep_duration)`
Because of the cost of the framework and system the performance difference will
*not* be 1/10 of -j1 versions."""
if benchmark.disabled:
benchmark(print, "skipping, do not want to sleep in main tests")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
linter.config.jobs = 10
linter.register_checker(SleepingChecker(linter))
# Check the same number of files as
# `test_baseline_benchmark_j1_single_working_checker`
fileinfos = [self.empty_filepath for _ in range(10)]
assert linter.config.jobs == 10
assert len(linter._checkers) == 2, "Should have 'master' and 'sleeper'"
benchmark(linter.check, fileinfos)
assert (
linter.msg_status == 0
), "Expected no errors to be thrown: %s" % pprint.pformat(
linter.reporter.messages
)
def test_baseline_benchmark_j1_all_checks_single_file(self, benchmark):
"""Runs a single file, with -j1, against all plug-ins
... that's the intent at least.
"""
# Just 1 file, but all Checkers/Extensions
fileinfos = [self.empty_filepath]
runner = benchmark(Run, fileinfos, reporter=Reporter(), exit=False)
assert runner.linter.config.jobs == 1
print("len(runner.linter._checkers)", len(runner.linter._checkers))
assert len(runner.linter._checkers) > 1, "Should have more than 'master'"
assert (
runner.linter.msg_status == 0
), "Expected no errors to be thrown: %s" % pprint.pformat(
runner.linter.reporter.messages
)
def test_baseline_benchmark_j1_all_checks_lots_of_files(self, benchmark):
"""Runs lots of files, with -j1, against all plug-ins
... that's the intent at least.
"""
if benchmark.disabled:
benchmark(print, "skipping, only benchmark large file counts")
return # _only_ run this test is profiling
linter = PyLinter()
# Register all checkers/extensions and enable them
with patch("os.listdir", return_value=["pylint", "tests"]):
register_plugins(
linter,
os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")),
)
linter.load_default_plugins()
linter.enable("all")
# Just 1 file, but all Checkers/Extensions
fileinfos = [self.empty_filepath for _ in range(self.lot_of_files)]
assert linter.config.jobs == 1
print("len(linter._checkers)", len(linter._checkers))
assert len(linter._checkers) > 1, "Should have more than 'master'"
benchmark(linter.check, fileinfos)
|
terrascript/resource/hashicorp/dns.py | mjuenema/python-terrascript | 507 | 11084358 | <filename>terrascript/resource/hashicorp/dns.py
# terrascript/resource/hashicorp/dns.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:15:19 UTC)
import terrascript
class dns_a_record_set(terrascript.Resource):
pass
class dns_aaaa_record_set(terrascript.Resource):
pass
class dns_cname_record(terrascript.Resource):
pass
class dns_mx_record_set(terrascript.Resource):
pass
class dns_ns_record_set(terrascript.Resource):
pass
class dns_ptr_record(terrascript.Resource):
pass
class dns_srv_record_set(terrascript.Resource):
pass
class dns_txt_record_set(terrascript.Resource):
pass
__all__ = [
"dns_a_record_set",
"dns_aaaa_record_set",
"dns_cname_record",
"dns_mx_record_set",
"dns_ns_record_set",
"dns_ptr_record",
"dns_srv_record_set",
"dns_txt_record_set",
]
|
tests/asr/spokestack/test_speech_recognizer.py | spokestack/spokestack-python | 139 | 11084425 | <filename>tests/asr/spokestack/test_speech_recognizer.py
"""
This module tests the cloud speech recognizer
"""
import json
from unittest import mock
import numpy as np
from spokestack.asr.spokestack.speech_recognizer import CloudSpeechRecognizer
from spokestack.context import SpeechContext
def test_recognize():
context = SpeechContext()
recognizer = CloudSpeechRecognizer()
recognizer._client._socket = mock.MagicMock()
recognizer._client._socket.recv.return_value = json.dumps(
{
"error": None,
"final": False,
"hypotheses": [{"confidence": 0.5, "transcript": "this is a test"}],
"status": "ok",
}
)
frame = np.random.rand(160).astype(np.int16)
# call with context active to test _begin and first _send
context.is_active = True
recognizer(context, frame)
# call again to test with internal _is_active as True
recognizer(context, frame)
# call with context not active to test _commit
context.is_active = False
recognizer(context, frame)
recognizer._client._socket.recv.return_value = json.dumps(
{
"error": None,
"final": True,
"hypotheses": [{"confidence": 0.5, "transcript": "this is a test"}],
"status": "ok",
}
)
# call with the client indicating it's the final frame to test _receive
recognizer(context, frame)
recognizer._client._socket.max_idle_time = 500
# test timeout
for i in range(501):
recognizer(context, frame)
assert not context.is_active
assert not recognizer._client.is_connected
def test_response():
context = SpeechContext()
recognizer = CloudSpeechRecognizer()
recognizer._client._socket = mock.MagicMock()
recognizer._client._socket.recv.return_value = json.dumps(
{
"error": None,
"final": False,
"hypotheses": [{"confidence": 0.5, "transcript": "this is a test"}],
"status": "ok",
}
)
frame = np.random.rand(160).astype(np.int16)
# run through all the steps
context.is_active = True
recognizer(context, frame)
recognizer(context, frame)
context.is_active = False
recognizer(context, frame)
recognizer._client._socket.recv.return_value = json.dumps(
{
"error": None,
"final": True,
"hypotheses": [{"confidence": 0.5, "transcript": "this is a test"}],
"status": "ok",
}
)
# process the final frame with the final transcript
recognizer(context, frame)
assert context.transcript == "this is a test"
assert context.confidence == 0.5
recognizer.close()
def test_reset():
context = SpeechContext()
recognizer = CloudSpeechRecognizer()
recognizer._client._socket = mock.MagicMock()
recognizer._client._socket.recv.return_value = json.dumps(
{
"error": None,
"final": False,
"hypotheses": [{"confidence": 0.5, "transcript": "this is a test"}],
"status": "ok",
}
)
frame = np.random.rand(160).astype(np.int16)
# trigger _begin and first _send
context.is_active = True
recognizer(context, frame)
# trigger _send
recognizer(context, frame)
# we haven't triggered _commit or sent the final frame
# which means context is still active and _is_active is True
recognizer.reset()
assert not recognizer._is_active
assert not recognizer._client.is_connected
def test_empty_transcript():
context = SpeechContext()
recognizer = CloudSpeechRecognizer()
recognizer._client._socket = mock.MagicMock()
recognizer._client._socket.recv.return_value = json.dumps(
{
"error": None,
"final": False,
"hypotheses": [{"confidence": 0.5, "transcript": ""}],
"status": "ok",
}
)
frame = np.random.rand(160).astype(np.int16)
# run through all the steps
context.is_active = True
recognizer(context, frame)
recognizer(context, frame)
context.is_active = False
recognizer(context, frame)
recognizer._client._socket.recv.return_value = json.dumps(
{
"error": None,
"final": True,
"hypotheses": [{"confidence": 0.5, "transcript": ""}],
"status": "ok",
}
)
# process the final frame with the final transcript
recognizer(context, frame)
assert not context.transcript
assert context.confidence == 0.5
recognizer.close()
|
scripts/discourse_1291.py | gmatteo/awesome-panel | 179 | 11084475 | # imports
import pandas as pd
import panel as pn
pn.extension()
# initialize list of lists
data = [
["A", "January", 10],
["A", "February", 7],
["A", "March", 5],
["B", "January", 4],
["B", "February", 8],
["B", "March", 12],
]
# Create the pandas DataFrame
df = pd.DataFrame(data, columns=["Type", "Month", "Metric"])
# lists creation
type_list = ["A", "B"]
# widget creation
dd_types = pn.widgets.Select(name="Select the type to display report:", options=type_list)
# See https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html
def custom_style(val):
color = "red" if val > 5 else "green"
return "color: %s" % color
# filter df function
def display_panel(value):
table = df[df.Type == value].pivot_table(values="Metric", index="Type", columns="Month")
styled_table = table.style.applymap(custom_style)
return pn.panel(styled_table)
# create relation between widgets
@pn.depends(dd_types)
def get_parameters(value):
return display_panel(value)
# arrange and correlate widgets and functions
report_area = pn.Row(dd_types, get_parameters)
report_area.show()
|
src/betterproto/plugin/models.py | hsh-nids/python-betterproto | 708 | 11084477 | <reponame>hsh-nids/python-betterproto
"""Plugin model dataclasses.
These classes are meant to be an intermediate representation
of protobuf objects. They are used to organize the data collected during parsing.
The general intention is to create a doubly-linked tree-like structure
with the following types of references:
- Downwards references: from message -> fields, from output package -> messages
or from service -> service methods
- Upwards references: from field -> message, message -> package.
- Input/output message references: from a service method to it's corresponding
input/output messages, which may even be in another package.
There are convenience methods to allow climbing up and down this tree, for
example to retrieve the list of all messages that are in the same package as
the current message.
Most of these classes take as inputs:
- proto_obj: A reference to it's corresponding protobuf object as
presented by the protoc plugin.
- parent: a reference to the parent object in the tree.
With this information, the class is able to expose attributes,
such as a pythonized name, that will be calculated from proto_obj.
The instantiation should also attach a reference to the new object
into the corresponding place within it's parent object. For example,
instantiating field `A` with parent message `B` should add a
reference to `A` to `B`'s `fields` attribute.
"""
import betterproto
from betterproto import which_one_of
from betterproto.casing import sanitize_name
from betterproto.compile.importing import (
get_type_reference,
parse_source_type_name,
)
from betterproto.compile.naming import (
pythonize_class_name,
pythonize_field_name,
pythonize_method_name,
)
from betterproto.lib.google.protobuf import (
DescriptorProto,
EnumDescriptorProto,
FileDescriptorProto,
MethodDescriptorProto,
Field,
FieldDescriptorProto,
FieldDescriptorProtoType,
FieldDescriptorProtoLabel,
)
from betterproto.lib.google.protobuf.compiler import CodeGeneratorRequest
import re
import textwrap
from dataclasses import dataclass, field
from typing import Dict, Iterable, Iterator, List, Optional, Set, Text, Type, Union
import sys
from ..casing import sanitize_name
from ..compile.importing import get_type_reference, parse_source_type_name
from ..compile.naming import (
pythonize_class_name,
pythonize_field_name,
pythonize_method_name,
)
# Create a unique placeholder to deal with
# https://stackoverflow.com/questions/51575931/class-inheritance-in-python-3-7-dataclasses
PLACEHOLDER = object()
# Organize proto types into categories
PROTO_FLOAT_TYPES = (
FieldDescriptorProtoType.TYPE_DOUBLE, # 1
FieldDescriptorProtoType.TYPE_FLOAT, # 2
)
PROTO_INT_TYPES = (
FieldDescriptorProtoType.TYPE_INT64, # 3
FieldDescriptorProtoType.TYPE_UINT64, # 4
FieldDescriptorProtoType.TYPE_INT32, # 5
FieldDescriptorProtoType.TYPE_FIXED64, # 6
FieldDescriptorProtoType.TYPE_FIXED32, # 7
FieldDescriptorProtoType.TYPE_UINT32, # 13
FieldDescriptorProtoType.TYPE_SFIXED32, # 15
FieldDescriptorProtoType.TYPE_SFIXED64, # 16
FieldDescriptorProtoType.TYPE_SINT32, # 17
FieldDescriptorProtoType.TYPE_SINT64, # 18
)
PROTO_BOOL_TYPES = (FieldDescriptorProtoType.TYPE_BOOL,) # 8
PROTO_STR_TYPES = (FieldDescriptorProtoType.TYPE_STRING,) # 9
PROTO_BYTES_TYPES = (FieldDescriptorProtoType.TYPE_BYTES,) # 12
PROTO_MESSAGE_TYPES = (
FieldDescriptorProtoType.TYPE_MESSAGE, # 11
FieldDescriptorProtoType.TYPE_ENUM, # 14
)
PROTO_MAP_TYPES = (FieldDescriptorProtoType.TYPE_MESSAGE,) # 11
PROTO_PACKED_TYPES = (
FieldDescriptorProtoType.TYPE_DOUBLE, # 1
FieldDescriptorProtoType.TYPE_FLOAT, # 2
FieldDescriptorProtoType.TYPE_INT64, # 3
FieldDescriptorProtoType.TYPE_UINT64, # 4
FieldDescriptorProtoType.TYPE_INT32, # 5
FieldDescriptorProtoType.TYPE_FIXED64, # 6
FieldDescriptorProtoType.TYPE_FIXED32, # 7
FieldDescriptorProtoType.TYPE_BOOL, # 8
FieldDescriptorProtoType.TYPE_UINT32, # 13
FieldDescriptorProtoType.TYPE_SFIXED32, # 15
FieldDescriptorProtoType.TYPE_SFIXED64, # 16
FieldDescriptorProtoType.TYPE_SINT32, # 17
FieldDescriptorProtoType.TYPE_SINT64, # 18
)
def monkey_patch_oneof_index():
"""
The compiler message types are written for proto2, but we read them as proto3.
For this to work in the case of the oneof_index fields, which depend on being able
to tell whether they were set, we have to treat them as oneof fields. This method
monkey patches the generated classes after the fact to force this behaviour.
"""
object.__setattr__(
FieldDescriptorProto.__dataclass_fields__["oneof_index"].metadata[
"betterproto"
],
"group",
"oneof_index",
)
object.__setattr__(
Field.__dataclass_fields__["oneof_index"].metadata["betterproto"],
"group",
"oneof_index",
)
def get_comment(
proto_file: "FileDescriptorProto", path: List[int], indent: int = 4
) -> str:
pad = " " * indent
for sci_loc in proto_file.source_code_info.location:
if list(sci_loc.path) == path and sci_loc.leading_comments:
lines = textwrap.wrap(
sci_loc.leading_comments.strip().replace("\n", ""), width=79 - indent
)
if path[-2] == 2 and path[-4] != 6:
# This is a field
return f"{pad}# " + f"\n{pad}# ".join(lines)
else:
# This is a message, enum, service, or method
if len(lines) == 1 and len(lines[0]) < 79 - indent - 6:
lines[0] = lines[0].strip('"')
return f'{pad}"""{lines[0]}"""'
else:
joined = f"\n{pad}".join(lines)
return f'{pad}"""\n{pad}{joined}\n{pad}"""'
return ""
class ProtoContentBase:
"""Methods common to MessageCompiler, ServiceCompiler and ServiceMethodCompiler."""
source_file: FileDescriptorProto
path: List[int]
comment_indent: int = 4
parent: Union["betterproto.Message", "OutputTemplate"]
__dataclass_fields__: Dict[str, object]
def __post_init__(self) -> None:
"""Checks that no fake default fields were left as placeholders."""
for field_name, field_val in self.__dataclass_fields__.items():
if field_val is PLACEHOLDER:
raise ValueError(f"`{field_name}` is a required field.")
@property
def output_file(self) -> "OutputTemplate":
current = self
while not isinstance(current, OutputTemplate):
current = current.parent
return current
@property
def request(self) -> "PluginRequestCompiler":
current = self
while not isinstance(current, OutputTemplate):
current = current.parent
return current.parent_request
@property
def comment(self) -> str:
"""Crawl the proto source code and retrieve comments
for this object.
"""
return get_comment(
proto_file=self.source_file, path=self.path, indent=self.comment_indent
)
@dataclass
class PluginRequestCompiler:
plugin_request_obj: CodeGeneratorRequest
output_packages: Dict[str, "OutputTemplate"] = field(default_factory=dict)
@property
def all_messages(self) -> List["MessageCompiler"]:
"""All of the messages in this request.
Returns
-------
List[MessageCompiler]
List of all of the messages in this request.
"""
return [
msg for output in self.output_packages.values() for msg in output.messages
]
@dataclass
class OutputTemplate:
"""Representation of an output .py file.
Each output file corresponds to a .proto input file,
but may need references to other .proto files to be
built.
"""
parent_request: PluginRequestCompiler
package_proto_obj: FileDescriptorProto
input_files: List[str] = field(default_factory=list)
imports: Set[str] = field(default_factory=set)
datetime_imports: Set[str] = field(default_factory=set)
typing_imports: Set[str] = field(default_factory=set)
messages: List["MessageCompiler"] = field(default_factory=list)
enums: List["EnumDefinitionCompiler"] = field(default_factory=list)
services: List["ServiceCompiler"] = field(default_factory=list)
@property
def package(self) -> str:
"""Name of input package.
Returns
-------
str
Name of input package.
"""
return self.package_proto_obj.package
@property
def input_filenames(self) -> Iterable[str]:
"""Names of the input files used to build this output.
Returns
-------
Iterable[str]
Names of the input files used to build this output.
"""
return sorted(f.name for f in self.input_files)
@property
def python_module_imports(self) -> Set[str]:
imports = set()
if any(x for x in self.messages if any(x.deprecated_fields)):
imports.add("warnings")
return imports
@dataclass
class MessageCompiler(ProtoContentBase):
"""Representation of a protobuf message."""
source_file: FileDescriptorProto
parent: Union["MessageCompiler", OutputTemplate] = PLACEHOLDER
proto_obj: DescriptorProto = PLACEHOLDER
path: List[int] = PLACEHOLDER
fields: List[Union["FieldCompiler", "MessageCompiler"]] = field(
default_factory=list
)
deprecated: bool = field(default=False, init=False)
def __post_init__(self) -> None:
# Add message to output file
if isinstance(self.parent, OutputTemplate):
if isinstance(self, EnumDefinitionCompiler):
self.output_file.enums.append(self)
else:
self.output_file.messages.append(self)
self.deprecated = self.proto_obj.options.deprecated
super().__post_init__()
@property
def proto_name(self) -> str:
return self.proto_obj.name
@property
def py_name(self) -> str:
return pythonize_class_name(self.proto_name)
@property
def annotation(self) -> str:
if self.repeated:
return f"List[{self.py_name}]"
return self.py_name
@property
def deprecated_fields(self) -> Iterator[str]:
for f in self.fields:
if f.deprecated:
yield f.py_name
@property
def has_deprecated_fields(self) -> bool:
return any(self.deprecated_fields)
def is_map(
proto_field_obj: FieldDescriptorProto, parent_message: DescriptorProto
) -> bool:
"""True if proto_field_obj is a map, otherwise False."""
if proto_field_obj.type == FieldDescriptorProtoType.TYPE_MESSAGE:
# This might be a map...
message_type = proto_field_obj.type_name.split(".").pop().lower()
map_entry = f"{proto_field_obj.name.replace('_', '').lower()}entry"
if message_type == map_entry:
for nested in parent_message.nested_type: # parent message
if (
nested.name.replace("_", "").lower() == map_entry
and nested.options.map_entry
):
return True
return False
def is_oneof(proto_field_obj: FieldDescriptorProto) -> bool:
"""
True if proto_field_obj is a OneOf, otherwise False.
.. warning::
Becuase the message from protoc is defined in proto2, and betterproto works with
proto3, and interpreting the FieldDescriptorProto.oneof_index field requires
distinguishing between default and unset values (which proto3 doesn't support),
we have to hack the generated FieldDescriptorProto class for this to work.
The hack consists of setting group="oneof_index" in the field metadata,
essentially making oneof_index the sole member of a one_of group, which allows
us to tell whether it was set, via the which_one_of interface.
"""
return which_one_of(proto_field_obj, "oneof_index")[0] == "oneof_index"
@dataclass
class FieldCompiler(MessageCompiler):
parent: MessageCompiler = PLACEHOLDER
proto_obj: FieldDescriptorProto = PLACEHOLDER
def __post_init__(self) -> None:
# Add field to message
self.parent.fields.append(self)
# Check for new imports
self.add_imports_to(self.output_file)
super().__post_init__() # call FieldCompiler-> MessageCompiler __post_init__
def get_field_string(self, indent: int = 4) -> str:
"""Construct string representation of this field as a field."""
name = f"{self.py_name}"
annotations = f": {self.annotation}"
field_args = ", ".join(
([""] + self.betterproto_field_args) if self.betterproto_field_args else []
)
betterproto_field_type = (
f"betterproto.{self.field_type}_field({self.proto_obj.number}{field_args})"
)
return f"{name}{annotations} = {betterproto_field_type}"
@property
def betterproto_field_args(self) -> List[str]:
args = []
if self.field_wraps:
args.append(f"wraps={self.field_wraps}")
return args
@property
def datetime_imports(self) -> Set[str]:
imports = set()
annotation = self.annotation
# FIXME: false positives - e.g. `MyDatetimedelta`
if "timedelta" in annotation:
imports.add("timedelta")
if "datetime" in annotation:
imports.add("datetime")
return imports
@property
def typing_imports(self) -> Set[str]:
imports = set()
annotation = self.annotation
if "Optional[" in annotation:
imports.add("Optional")
if "List[" in annotation:
imports.add("List")
if "Dict[" in annotation:
imports.add("Dict")
return imports
def add_imports_to(self, output_file: OutputTemplate) -> None:
output_file.datetime_imports.update(self.datetime_imports)
output_file.typing_imports.update(self.typing_imports)
@property
def field_wraps(self) -> Optional[str]:
"""Returns betterproto wrapped field type or None."""
match_wrapper = re.match(
r"\.google\.protobuf\.(.+)Value$", self.proto_obj.type_name
)
if match_wrapper:
wrapped_type = "TYPE_" + match_wrapper.group(1).upper()
if hasattr(betterproto, wrapped_type):
return f"betterproto.{wrapped_type}"
return None
@property
def repeated(self) -> bool:
return (
self.proto_obj.label == FieldDescriptorProtoLabel.LABEL_REPEATED
and not is_map(self.proto_obj, self.parent)
)
@property
def mutable(self) -> bool:
"""True if the field is a mutable type, otherwise False."""
return self.annotation.startswith(("List[", "Dict["))
@property
def field_type(self) -> str:
"""String representation of proto field type."""
return (
FieldDescriptorProtoType(self.proto_obj.type)
.name.lower()
.replace("type_", "")
)
@property
def default_value_string(self) -> Union[Text, None, float, int]:
"""Python representation of the default proto value."""
if self.repeated:
return "[]"
if self.py_type == "int":
return "0"
if self.py_type == "float":
return "0.0"
elif self.py_type == "bool":
return "False"
elif self.py_type == "str":
return '""'
elif self.py_type == "bytes":
return 'b""'
else:
# Message type
return "None"
@property
def packed(self) -> bool:
"""True if the wire representation is a packed format."""
return self.repeated and self.proto_obj.type in PROTO_PACKED_TYPES
@property
def py_name(self) -> str:
"""Pythonized name."""
return pythonize_field_name(self.proto_name)
@property
def proto_name(self) -> str:
"""Original protobuf name."""
return self.proto_obj.name
@property
def py_type(self) -> str:
"""String representation of Python type."""
if self.proto_obj.type in PROTO_FLOAT_TYPES:
return "float"
elif self.proto_obj.type in PROTO_INT_TYPES:
return "int"
elif self.proto_obj.type in PROTO_BOOL_TYPES:
return "bool"
elif self.proto_obj.type in PROTO_STR_TYPES:
return "str"
elif self.proto_obj.type in PROTO_BYTES_TYPES:
return "bytes"
elif self.proto_obj.type in PROTO_MESSAGE_TYPES:
# Type referencing another defined Message or a named enum
return get_type_reference(
package=self.output_file.package,
imports=self.output_file.imports,
source_type=self.proto_obj.type_name,
)
else:
raise NotImplementedError(f"Unknown type {field.type}")
@property
def annotation(self) -> str:
if self.repeated:
return f"List[{self.py_type}]"
return self.py_type
@dataclass
class OneOfFieldCompiler(FieldCompiler):
@property
def betterproto_field_args(self) -> List[str]:
args = super().betterproto_field_args
group = self.parent.proto_obj.oneof_decl[self.proto_obj.oneof_index].name
args.append(f'group="{group}"')
return args
@dataclass
class MapEntryCompiler(FieldCompiler):
py_k_type: Type = PLACEHOLDER
py_v_type: Type = PLACEHOLDER
proto_k_type: str = PLACEHOLDER
proto_v_type: str = PLACEHOLDER
def __post_init__(self) -> None:
"""Explore nested types and set k_type and v_type if unset."""
map_entry = f"{self.proto_obj.name.replace('_', '').lower()}entry"
for nested in self.parent.proto_obj.nested_type:
if (
nested.name.replace("_", "").lower() == map_entry
and nested.options.map_entry
):
# Get Python types
self.py_k_type = FieldCompiler(
source_file=self.source_file,
parent=self,
proto_obj=nested.field[0], # key
).py_type
self.py_v_type = FieldCompiler(
source_file=self.source_file,
parent=self,
proto_obj=nested.field[1], # value
).py_type
# Get proto types
self.proto_k_type = FieldDescriptorProtoType(nested.field[0].type).name
self.proto_v_type = FieldDescriptorProtoType(nested.field[1].type).name
super().__post_init__() # call FieldCompiler-> MessageCompiler __post_init__
@property
def betterproto_field_args(self) -> List[str]:
return [f"betterproto.{self.proto_k_type}", f"betterproto.{self.proto_v_type}"]
@property
def field_type(self) -> str:
return "map"
@property
def annotation(self) -> str:
return f"Dict[{self.py_k_type}, {self.py_v_type}]"
@property
def repeated(self) -> bool:
return False # maps cannot be repeated
@dataclass
class EnumDefinitionCompiler(MessageCompiler):
"""Representation of a proto Enum definition."""
proto_obj: EnumDescriptorProto = PLACEHOLDER
entries: List["EnumDefinitionCompiler.EnumEntry"] = PLACEHOLDER
@dataclass(unsafe_hash=True)
class EnumEntry:
"""Representation of an Enum entry."""
name: str
value: int
comment: str
def __post_init__(self) -> None:
# Get entries/allowed values for this Enum
self.entries = [
self.EnumEntry(
name=sanitize_name(entry_proto_value.name),
value=entry_proto_value.number,
comment=get_comment(
proto_file=self.source_file, path=self.path + [2, entry_number]
),
)
for entry_number, entry_proto_value in enumerate(self.proto_obj.value)
]
super().__post_init__() # call MessageCompiler __post_init__
@property
def default_value_string(self) -> str:
"""Python representation of the default value for Enums.
As per the spec, this is the first value of the Enum.
"""
return str(self.entries[0].value) # ideally, should ALWAYS be int(0)!
@dataclass
class ServiceCompiler(ProtoContentBase):
parent: OutputTemplate = PLACEHOLDER
proto_obj: DescriptorProto = PLACEHOLDER
path: List[int] = PLACEHOLDER
methods: List["ServiceMethodCompiler"] = field(default_factory=list)
def __post_init__(self) -> None:
# Add service to output file
self.output_file.services.append(self)
self.output_file.typing_imports.add("Dict")
super().__post_init__() # check for unset fields
@property
def proto_name(self) -> str:
return self.proto_obj.name
@property
def py_name(self) -> str:
return pythonize_class_name(self.proto_name)
@dataclass
class ServiceMethodCompiler(ProtoContentBase):
parent: ServiceCompiler
proto_obj: MethodDescriptorProto
path: List[int] = PLACEHOLDER
comment_indent: int = 8
def __post_init__(self) -> None:
# Add method to service
self.parent.methods.append(self)
# Check for imports
if self.py_input_message:
for f in self.py_input_message.fields:
f.add_imports_to(self.output_file)
if "Optional" in self.py_output_message_type:
self.output_file.typing_imports.add("Optional")
self.mutable_default_args # ensure this is called before rendering
# Check for Async imports
if self.client_streaming:
self.output_file.typing_imports.add("AsyncIterable")
self.output_file.typing_imports.add("Iterable")
self.output_file.typing_imports.add("Union")
if self.server_streaming:
self.output_file.typing_imports.add("AsyncIterator")
super().__post_init__() # check for unset fields
@property
def mutable_default_args(self) -> Dict[str, str]:
"""Handle mutable default arguments.
Returns a list of tuples containing the name and default value
for arguments to this message who's default value is mutable.
The defaults are swapped out for None and replaced back inside
the method's body.
Reference:
https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments
Returns
-------
Dict[str, str]
Name and actual default value (as a string)
for each argument with mutable default values.
"""
mutable_default_args = {}
if self.py_input_message:
for f in self.py_input_message.fields:
if (
not self.client_streaming
and f.default_value_string != "None"
and f.mutable
):
mutable_default_args[f.py_name] = f.default_value_string
self.output_file.typing_imports.add("Optional")
return mutable_default_args
@property
def py_name(self) -> str:
"""Pythonized method name."""
return pythonize_method_name(self.proto_obj.name)
@property
def proto_name(self) -> str:
"""Original protobuf name."""
return self.proto_obj.name
@property
def route(self) -> str:
package_part = (
f"{self.output_file.package}." if self.output_file.package else ""
)
return f"/{package_part}{self.parent.proto_name}/{self.proto_name}"
@property
def py_input_message(self) -> Optional[MessageCompiler]:
"""Find the input message object.
Returns
-------
Optional[MessageCompiler]
Method instance representing the input message.
If not input message could be found or there are no
input messages, None is returned.
"""
package, name = parse_source_type_name(self.proto_obj.input_type)
# Nested types are currently flattened without dots.
# Todo: keep a fully quantified name in types, that is
# comparable with method.input_type
for msg in self.request.all_messages:
if (
msg.py_name == name.replace(".", "")
and msg.output_file.package == package
):
return msg
return None
@property
def py_input_message_type(self) -> str:
"""String representation of the Python type corresponding to the
input message.
Returns
-------
str
String representation of the Python type corresponding to the input message.
"""
return get_type_reference(
package=self.output_file.package,
imports=self.output_file.imports,
source_type=self.proto_obj.input_type,
).strip('"')
@property
def py_output_message_type(self) -> str:
"""String representation of the Python type corresponding to the
output message.
Returns
-------
str
String representation of the Python type corresponding to the output message.
"""
return get_type_reference(
package=self.output_file.package,
imports=self.output_file.imports,
source_type=self.proto_obj.output_type,
unwrap=False,
).strip('"')
@property
def client_streaming(self) -> bool:
return self.proto_obj.client_streaming
@property
def server_streaming(self) -> bool:
return self.proto_obj.server_streaming
|
ironic/common/nova.py | yanndegat/ironic | 350 | 11084518 | <filename>ironic/common/nova.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import exceptions as kaexception
from oslo_log import log
from ironic.common import keystone
from ironic.common import states
from ironic.conf import CONF
LOG = log.getLogger(__name__)
NOVA_API_VERSION = "2.1"
NOVA_API_MICROVERSION = '2.76'
_NOVA_ADAPTER = None
def _get_nova_adapter():
global _NOVA_ADAPTER
if not _NOVA_ADAPTER:
_NOVA_ADAPTER = keystone.get_adapter(
'nova',
session=keystone.get_session('nova'),
auth=keystone.get_auth('nova'),
version=NOVA_API_VERSION)
return _NOVA_ADAPTER
def _get_power_update_event(server_uuid, target_power_state):
return {'name': 'power-update',
'server_uuid': server_uuid,
'tag': target_power_state}
def _send_event(context, event, api_version=None):
"""Sends an event to Nova conveying power state change.
:param context:
request context,
instance of ironic.common.context.RequestContext
:param event:
A "power-update" event for nova to act upon.
:param api_version:
api version of nova
:returns:
A boolean which indicates if the event was sent and received
successfully.
"""
try:
nova = _get_nova_adapter()
response = nova.post(
'/os-server-external-events', json={'events': [event]},
microversion=api_version, global_request_id=context.global_id,
raise_exc=False)
except kaexception.ClientException as ex:
LOG.warning('Could not connect to Nova to send a power notification, '
'please check configuration. %s', ex)
return False
try:
if response.status_code >= 400:
LOG.warning('Failed to notify nova on event: %s. %s.',
event, response.text)
return False
resp_event = response.json()['events'][0]
code = resp_event['code']
except Exception as e:
LOG.error('Invalid response %s returned from nova for power-update '
'event %s. %s.', response, event, e)
return False
if code >= 400:
LOG.warning('Nova event: %s returned with failed status.', resp_event)
return False
LOG.debug('Nova event response: %s.', resp_event)
return True
def power_update(context, server_uuid, target_power_state):
"""Creates and sends power state change for the provided server_uuid.
:param context:
request context,
instance of ironic.common.context.RequestContext
:param server_uuid:
The uuid of the node whose power state changed.
:param target_power_state:
Targeted power state change i.e "POWER_ON" or "POWER_OFF"
:returns:
A boolean which indicates if the power update was executed
successfully (mainly for testing purposes).
"""
if not CONF.nova.send_power_notifications:
return False
if target_power_state == states.POWER_ON:
target_power_state = "POWER_ON"
elif target_power_state == states.POWER_OFF:
target_power_state = "POWER_OFF"
else:
LOG.error('Invalid Power State %s.', target_power_state)
return False
event = _get_power_update_event(server_uuid, target_power_state)
result = _send_event(context, event, api_version=NOVA_API_MICROVERSION)
return result
|
examples/bayesian_optimization.py | gautam1858/autograd | 6,119 | 11084521 | """This Bayesian optimization demo using gradient-based optimization
to find the next query point."""
from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd import value_and_grad
from scipy.optimize import minimize
from gaussian_process import make_gp_funs, rbf_covariance
from autograd.scipy.stats import norm
def probability_of_improvement(mean, std, max_so_far):
return norm.cdf(max_so_far, mean, std)
def expected_new_max(mean, std, max_so_far):
return max_so_far - \
(mean - max_so_far) * norm.cdf(mean, max_so_far, std) \
+ std * norm.pdf(mean, max_so_far, std)
def init_covariance_params(num_params):
return np.zeros(num_params)
def defaultmax(x, default=-np.inf):
if x.size == 0:
return default
return np.max(x)
def bayesian_optimize(func, domain_min, domain_max, num_iters=20, callback=None):
D = len(domain_min)
num_params, predict, log_marginal_likelihood = \
make_gp_funs(rbf_covariance, num_cov_params=D + 1)
model_params = init_covariance_params(num_params)
def optimize_gp_params(init_params, X, y):
log_hyperprior = lambda params: np.sum(norm.logpdf(params, 0., 100.))
objective = lambda params: -log_marginal_likelihood(params, X, y) -log_hyperprior(params)
return minimize(value_and_grad(objective), init_params, jac=True, method='CG').x
def choose_next_point(domain_min, domain_max, acquisition_function, num_tries=15, rs=npr.RandomState(0)):
"""Uses gradient-based optimization to find next query point."""
init_points = rs.rand(num_tries, D) * (domain_max - domain_min) + domain_min
grad_obj = value_and_grad(lambda x: -acquisition_function(x))
def optimize_point(init_point):
print('.', end='')
result = minimize(grad_obj, x0=init_point, jac=True, method='L-BFGS-B',
options={'maxiter': 10}, bounds=list(zip(domain_min, domain_max)))
return result.x, acquisition_function(result.x)
optimzed_points, optimized_values = list(zip(*list(map(optimize_point, init_points))))
print()
best_ix = np.argmax(optimized_values)
return np.atleast_2d(optimzed_points[best_ix])
# Start by evaluating once in the middle of the domain.
X = np.zeros((0, D))
y = np.zeros((0))
X = np.concatenate((X, np.reshape((domain_max - domain_min) / 2.0, (D, 1))))
y = np.concatenate((y, np.reshape(np.array(func(X)), (1,))))
for i in range(num_iters):
if i > 1:
print("Optimizing model parameters...")
model_params = optimize_gp_params(model_params, X, y)
print("Choosing where to look next", end='')
def predict_func(xstar):
mean, cov = predict(model_params, X, y, xstar)
return mean, np.sqrt(np.diag(cov))
def acquisition_function(xstar):
xstar = np.atleast_2d(xstar) # To work around a bug in scipy.minimize
mean, std = predict_func(xstar)
return expected_new_max(mean, std, defaultmax(y))
next_point = choose_next_point(domain_min, domain_max, acquisition_function)
print("Evaluating expensive function...")
new_value = func(next_point)
X = np.concatenate((X, next_point))
y = np.concatenate((y, np.reshape(np.array(new_value), (1,))))
if callback:
callback(X, y, predict_func, acquisition_function, next_point, new_value)
best_ix = np.argmax(y)
return X[best_ix, :], y[best_ix]
if __name__ == '__main__':
def example_function(x):
return np.sum(x * np.sin(10.0*x) + x) - 1
domain_min = np.array([0.0])
domain_max = np.array([1.1])
# Set up figure.
fig = plt.figure(figsize=(12,8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.show(block=False)
def callback(X, y, predict_func, acquisition_function, next_point, new_value):
plt.cla()
# Show posterior marginals.
plot_xs = np.reshape(np.linspace(domain_min, domain_max, 300), (300,1))
pred_mean, pred_std = predict_func(plot_xs)
ax.plot(plot_xs, pred_mean, 'b')
ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]),
np.concatenate([pred_mean - 1.96 * pred_std,
(pred_mean + 1.96 * pred_std)[::-1]]),
alpha=.15, fc='Blue', ec='None')
ax.plot(X, y, 'kx')
ax.plot(next_point, new_value, 'ro')
alphas = acquisition_function(plot_xs)
ax.plot(plot_xs, alphas, 'r')
ax.set_ylim([-1.5, 1.5])
ax.set_xticks([])
ax.set_yticks([])
plt.draw()
plt.pause(1)
best_x, best_y = bayesian_optimize(example_function, domain_min, domain_max, callback=callback)
|
test/uscg_test.py | quiet-oceans/libais | 161 | 11084543 | <gh_stars>100-1000
"""Tests for ais.uscg."""
import unittest
from ais import uscg
METADATA_TYPES = r"""
# pylint: disable=line-too-long
# S
!SAVDM,1,1,1,A,13mHu<001Kr7KRN=oHCkLjj00<0P,0*29,d-98,S0,t000000.00,T00.004775,D07MN-KW-PRIBS1,1428796800
!SAVDM,1,1,7,B,18158j001ErSVGdE>>1Unlcp0l1n,0*44,d-105,S2235,t000059.00,T59.605270,D05MN-NC-MAMBS1,1428796799
# s
!AIVDM,1,1,,A,177jR48P00JrdqBH@CwBcOw`08N`,0*7F,s27064,d-106,t000113.82,T28.09677041,S1053,r003669945,1136498115
# d
!SAVDM,1,1,3,B,D03Owh0m9N>4gPfGLfpNfp0,2*1D,d-9,S1500,t000440.00,T40.004650,D08MN-NO-VENBS1,1428797080
!AIVDM,1,1,,B,8h3OdJQKT001GNQTp2V8QeQgR;Rt00,4*31,d-089,S2243,t235959.00,T59.81635087,r09SIGN1,1428796801
!SAVDM,1,1,8,A,15NC6HPP00JRh42E3a`LK?wn2<13,0*5F,d-108,S2236,t000059.00,T59.631854,D05MN-NC-MAMBS1,1428796799
# t
!SAVDM,1,1,9,A,15N4GOP000IS3iR@bPg:n`T00D3w,0*74,d-107,S2229,t235959.00,T59.445101,D08MN-NO-VENBS1,1428796799
!SAVDM,1,1,5,A,1E?GH@0022o4l9`JLk1SNRih0000,0*71,d-101,S2233,t000059.00,T59.552019,D13MN-CR-ELKBS1,1428796799
# T
!AIVDM,1,1,,B,4h3OdJQuu5ossIw1T`JFhg700p00,0*5F,d-088,S0000,t000000.00,T00.00303381,r09SIGN1,1428796801
!SAVDM,1,1,3,B,D03Owh0m9N>4gPfGLfpNfp0,2*1D,d-9,S1500,t000440.00,T40.004650,D08MN-NO-VENBS1,1428797080
# x
!AIVDM,1,1,,A,KnLBOtVM4EWJbNH,2*27,s22613,d-125,T48.04022115,x4928,r003669946,1218999946
$PSHI,001,47.04372373,0,02792,2I4$PSq:,0023$6`O,0$GPGGA,191948,4241.3391,N,07037.9474,W,$GP$,,x5675,r003669946,1219000825
$PRDCB,CONRPT,1428797525,D17MN-ANC-MMX1,172.16.31.10,3,0,*01
"""
SINGLE_LINE_MESSAGES = r"""
# pylint: disable=line-too-long
!SAVDM,1,1,7,B,18158j001ErSVGdE>>1Unlcp0l1n,0*44,d-105,S2235,t000059.00,T59.605270,D05MN-NC-MAMBS1,1428796799
!SAVDO,1,1,9,B,4h3Ovq1uu60INqoKkrEpsww020S:,0*42,b003669732,1428798330
!BSVDM,1,1,,A,23?v;T002:G2hUFKhOP:JpHD0D0@,0*2A,b003669974,1428969792
!ABVDM,1,1,,A,35NJ5OPP@9qPECnA:O?QDI`8015Q,0*0F,b003669955,1429041003
!BSVDO,1,1,,B,4h3OdJQuu5ossIw1T`JFhg700p00,0*44,b003665002,1428796801
!AIVDM,1,1,,B,74eGuSP0h=w0,0*50,rB0003160047,1428883425
!AIVDM,1,1,,B,:4eGcI1;EkKD,0*08,rB0003160047,1428890747
!AIVDM,1,1,,B,<27spkv6RCBfrql1sh,4*40,d-117,S1768,t004447.00,T47.16255138,r08NFTK1,1428885887
!AIVDM,1,1,,A,<so<bfHBtM:rkh,4*42,d-124,S0548,t042314.00,T14.62954173,r08XSEW1,1428898995
!AIVDM,1,1,,A,>b;7JGv9qqKuTw2G,0*5C,d-121,S0083,t230202.00,T02.2371735,r09SGHA1,1428899944
!SAVDO,1,1,,B,?03OwnRG`hh<D00,2*58,b003669978,1428885504
!BSVDM,1,1,,A,@h3OwphkwRq03h0000000000,0*50,b003669705,1428886986
!AIVDM,1,1,,B,B5MwIk00;VdW@M5grNOBKwtUkP06,0*5A,d-115,S2181,t233658.00,T58.160652,r003669930,1428883200
!AIVDM,1,1,,B,Dh3Owoib5N>5N4fGMMhNfp0,4*49,d-110,S0753,t001720.00,T20.079342,r003669930,1428885622
!BSVDM,1,1,,B,E>kb9Ma89h:9V@9Q@@@@@@@@@@@;W@2v=hvOP00003vP100,2*7D,b003669706,1428887581
!AIVDM,1,1,,A,F030owj2N2P6Ubib@=4q35b10000,0*58,rB0003160047,1428886673
!AIVDM,1,1,,A,Gh30ot3AW0jefS:9U2h00000;00,2*3B,rB0003160047,1428885144
!AIVDM,1,1,,B,H52Q5K@dDtpV222222222222220,2*45,d-084,S1704,t002745.00,T45.44369607,r11CSDO1,1428884866
!AIVDM,1,1,,B,IJwUuc?Ncj:B:5Qepm0,2*06,r17MAUM1,1428884214
!AIVDM,1,1,,B,Jq4luMRUIu2s@w0,2*01,r17MPYB1,1428886550
!SAVDM,1,1,,B,K8VSqb9LdU28WP>`,0*4D,b003669952,1428906229
"""
MULTI_LINE_MESSAGES = """
!SAVDM,2,1,3,A,55NPRF400001L@OC;K4pThE>1<PtDl4dE:22220O1@D3740HtEP000000000,0*0D,d-81,S2231,t040459.00,T59.498602,D08MN-MO-ROBBS1,1428796802
!SAVDM,2,2,3,A,00000000000,2*3D,d-81,S2231,t040459.00,T59.498602,D08MN-MO-ROBBS1,1428796802
!ANVDM,2,1,1,A,55NH7SP00001L@GOO7QHT=@u8T6222222222220O0000040Ht00000000000,0*06,r08ACERDC,1428798766
!ANVDM,2,2,1,A,00000000000,2*22,r08ACERDC,1428798766
!SAVDM,2,1,9,A,6h30ot1;EkJ0Ch82AP;H888852CH4p13kk88883<<TH0i<i=tk3=61KC<CO<,0*1B,d-92,S330,t004508.00,T08.804593,D09MN-BU-RIPBS1,1428799508
!SAVDM,2,2,9,A,lGAPW000000000,4*5C,d-92,S330,t004508.00,T08.804593,D09MN-BU-RIPBS1,1428799508
!SAVDM,2,1,9,A,6h30ot1;EkJ0Ch82AP;H888852CH4p13kk88883<<TH0i<i=tk3=61KC<CO<,0*1B,d-82,S330,t004508.00,T08.804427,D09MN-BU-EDEBS1,1428799508
!SAVDM,2,2,9,A,lGAPW000000000,4*5C,d-82,S330,t004508.00,T08.804427,D09MN-BU-EDEBS1,1428799508
!SAVDM,2,1,9,A,6h30ot1;EnRpCh82AP;H8888880mF@B3ihH8883EtDH21<i=tk3M61QC<CO<,0*46,d-92,S490,t004513.00,T13.071313,D09MN-BU-RIPBS1,1428799513
!SAVDM,2,2,9,A,n5AP`@00000000,4*6B,d-92,S490,t004513.00,T13.071313,D09MN-BU-RIPBS1,1428799513
!SAVDM,2,1,9,A,6h3Owji;EkJTFp82APc@C1kk@H3PERAhE3p8883C3lH49<i=tU8u6=5C<CO9,0*56,d-60,S340,t024509.00,T09.071138,D09MN-BU-SUNBS1,1428806709
!SAVDM,2,2,9,A,;5ASr@00000000,4*2F,d-60,S340,t024509.00,T09.071138,D09MN-BU-SUNBS1,1428806709
!SAVDM,2,1,6,A,85Mwp@QKfBcUKjRd>qgPw0SB5W<ujTiIusgwH=DSakg3Mhd0db4uL9Ha;DVu,0*16,d-78,S597,t000215.00,T15.924474,D07MN-JA-SAIBS1,1428796936
!SAVDM,2,2,6,A,:OSHBmF>@qos@VW1wsoBT0mIi0FBQNEpB`T,2*04,d-78,S597,t000215.00,T15.924474,D07MN-JA-SAIBS1,1428796936
!SAVDM,2,1,7,A,8@3QiWAKpAH042d2L6Nbd:U111101P0@8EkUS<6QhrQ?a20F0119PTW:@42V,0*29,d-88,S726,t000219.00,T19.364725,D13MN-CR-MEGBS1,1428796939
!SAVDM,2,2,7,A,2W9h@0H0445LoQA1`vPHCr@P,0*2F,d-88,S726,t000219.00,T19.364725,D13MN-CR-MEGBS1,1428796939
!SAVDM,2,1,4,A,8@3QiWAKpAH04:2V`NTB3JJHHg101P0@`EjFuP6W2JQ?a20F0139UPVWUPcP,0*3D,d-88,S743,t000219.00,T19.818064,D13MN-CR-MEGBS1,1428796940
!SAVDM,2,2,4,A,h@@@@0H04<5LcQd1aqI8Cr@P,0*7E,d-88,S743,t000219.00,T19.818064,D13MN-CR-MEGBS1,1428796940
!SAVDM,2,1,8,A,85PH6tAKfEoc0@DehR9FpGse2Rre`9?97tqBtBUh`lCwTnv:SEuK4FWw0<K:,0*5A,d-94,S774,t000220.00,T20.644616,D05MN-HR-MERBS1,1428796940
!SAVDM,2,2,8,A,3ovc?s7BFVm?Fjsadw9aLkH1Vfp``@nP9Q@,2*09,d-94,S774,t000220.00,T20.644616,D05MN-HR-MERBS1,1428796940
!SAVDM,2,1,1,B,85PH6tAKfEoc0@DehR9FpGse2Rre`9?97tqBtBUh`lCwTnv:SEuK4FWw0<K:,0*50,d-100,S823,t000221.00,T21.951383,D05MN-HR-FARBS1,1428796942
!SAVDM,2,2,1,B,3ovc?s7BFVm?Fjsadw9aLkH1Vfp``@nP9Q@,2*03,d-100,S823,t000221.00,T21.951383,D05MN-HR-FARBS1,1428796942
!AIVDM,2,1,5,A,85Np7J1Kf4j5lTUDTFNrtFc:0fM;quuUDBU6SV?dTI2,0*55,r17MKET3,1428796942
!AIVDM,2,2,5,A,0g=V9WToh,0*12,r17MKET3,1428796942
!AIVDM,2,1,4,A,85Np7J1Kf4j5lTUDTFNrtFc:0fM;quuUDBU6SV?dTI2,0*54,r17MKET1,1428796942
!AIVDM,2,2,4,A,0g=V9WToh,0*13,r17MKET1,1428796942
!SAVDM,2,1,1,B,8P3QiWAKpAH04>6H2`VNQ0VPBa101P0@pEitV06W7d1?a20F01440VVWW2FV,0*54,d-44,S846,t000222.00,T22.564474,D13MN-CR-MEGBS1,1428796942
!SAVDM,2,2,1,B,V6;h@0H04@5LRJ61ah:PCr@P,0*65,d-44,S846,t000222.00,T22.564474,D13MN-CR-MEGBS1,1428796942
!SAVDM,2,1,0,B,8P3QiWAKpAH04>6H2`VNQ0VPBa101P0@pEitV06W7d1?a20F01440VVWW2FV,0*55,d-91,S846,t000222.00,T22.564932,D13MN-CR-ELKBS1,1428796942
!SAVDM,2,2,0,B,V6;h@0H04@5LRJ61ah:PCr@P,0*64,d-91,S846,t000222.00,T22.564932,D13MN-CR-ELKBS1,1428796942
!SAVDM,2,1,9,B,8h3QiWAKpAH04>6H2`VNQ0VPBa101P0@pEitV06W7d1?a20F01440VVWW2FV,0*64,d-59,S868,t000223.00,T23.151189,D13MN-CR-KELBS1,1428796943
!SAVDM,2,2,9,B,V6;h@0H04@5LRJ61ah:PCr@P,0*6D,d-59,S868,t000223.00,T23.151189,D13MN-CR-KELBS1,1428796943
!SAVDM,2,1,9,B,8h3QiWAKpAH04>6H2`VNQ0VPBa101P0@pEitV06W7d1?a20F01440VVWW2FV,0*64,d-56,S868,t000223.00,T23.151273,D13MN-CR-ELKBS1,1428796943
!SAVDM,2,2,9,B,V6;h@0H04@5LRJ61ah:PCr@P,0*6D,d-56,S868,t000223.00,T23.151273,D13MN-CR-ELKBS1,1428796943
!SAVDM,2,1,1,A,85NQB:1Kf@BDjURLGvFQeWBwfiONL:oN4UP4ns8CwVO`lGOhJJwi:Oo?p1qt,0*79,d-48,S899,t000223.00,T23.977783,D13MN-PS-SEABS1,1428796944
!SAVDM,2,2,1,A,4Tmq43sTQCrRcW8gPMt6mea192P7RKObLmh,2*1C,d-48,S899,t000223.00,T23.977783,D13MN-PS-SEABS1,1428796944
!AIVDM,2,1,3,A,85MwpViKf3dH4vHepwg9M>6:6:>cfRuSE3tHD93grIp,0*7E,d-81,S0857,t233922.00,T22.852219,r003669930,1428796944
!AIVDM,2,2,3,A,3@D:5LNoU,0*15,d-81,S0857,t233922.00,T22.852219,r003669930,1428796944
!AIVDM,2,1,0,A,>MIv3elkWG;9M?vTgOao95OQgAva9qSSdfT2IkhFV5C,0*4F,d-124,S0665,t024017.00,T17.75172871,r09SIGN1,1428806419
!AIVDM,2,2,0,A,T,0*42,d-124,S0665,t024017.00,T17.75172871,r09SIGN1,1428806419
!AIVDM,2,1,7,B,>v4kNGwiaajsirqtCNngTaN4K7oAowu;Lk;c4Nl?bnH,0*4B,d-127,S1504,t045140.00,T40.111042,r003669930,1428815682
!AIVDM,2,2,7,B,M=BT1RnAq:;Ab4bMF@,2*74,d-127,S1504,t045140.00,T40.111042,r003669930,1428815682
!AIVDM,2,1,1,A,AMkeWOfOPl6q6tOsqjls;eg<wELn7Pn?uuw8GrA;2W`,0*00,r17MCHI1,1428813162
!AIVDM,2,2,1,A,G,0*50,r17MCHI1,1428813162
!AIVDM,2,1,1,A,ABo8USwc:g7>plwwrOmCb@OruNu>rHsocw6vB?`SwON,0*2C,r17MKET3,1428822381
!AIVDM,2,2,1,A,2,0*25,r17MKET3,1428822381
!AIVDM,2,1,0,B,C5NMbDQdD>KeM8VCcLdo8dv44T28V@2g0J6F::00000,0*6D,d-075,S1553,t023441.00,T41.42342273,r09STWO1,1428806089
!AIVDM,2,2,0,B,0J70<RRS0,0*37,d-075,S1553,t023441.00,T41.42342273,r09STWO1,1428806089
!AIVDM,2,1,7,A,C8gvgIhKqO5BvtMF:Fr1o?wswPeF8C;vM?usbbo6DtC,0*32,r17MWRA1,1428835629
!AIVDM,2,2,7,A,;BmWoasCLR7MbMKaL=coseoL,2*47,r17MWRA1,1428835629
!AIVDM,2,1,8,A,DmpwSacL?uRSO?t:IiDcO3U95ts3lOk?HHv?IwojkCU,0*35,d-130,S0921,t001424.00,T24.557781,r003669930,1428799045
!AIVDM,2,2,8,A,P,3*4D,d-130,S0921,t001424.00,T24.557781,r003669930,1428799045
!AIVDM,2,1,7,B,DvVqNKP1Mondj<lJAB2wdOaOtwuTccugOs?lO2:4MlV,0*20,d-129,S0401,t000110.00,T10.68684,r003669930,1428799152
!AIVDM,2,2,7,B,p,0*62,d-129,S0401,t000110.00,T10.68684,r003669930,1428799152
!AIVDM,2,1,5,A,DNSD;INlw7RBa2Mwb:FIJUT8vT<NqQIwroBnwTb3HvN,0*3D,d-127,S1215,t112232.00,T32.397243,r003669930,1428839132
!AIVDM,2,2,5,A,KlE:bTq3hS8P6j3roN;9A@GJ38t>A=4LWjgp,4*35,d-127,S1215,t112232.00,T32.397243,r003669930,1428839132
!AIVDM,2,1,9,B,ENk`sO70VQ97aRh1T@611@Hr@@@=FVj<;V5d@00003v,0*1D,d-101,S1635,r003669959,1428800479
!AIVDM,2,2,9,B,P000,2*7E,d-101,S1635,r003669959,1428800479
!AIVDM,2,1,5,A,E=Mk`t@:aQSh:2ab@1:WdhHHHP0=Jw6U;m3r0108880,0*55,d-91,S0101,t040602.00,T02.691849,r003669930,1428812944
!AIVDM,2,2,5,A,>20,2*2D,d-91,S0101,t040602.00,T02.691849,r003669930,1428812944
!AIVDM,2,1,2,B,Jlttij=1Ct=wG4AwOtgsotWssq6wi=D4:bnwwiqq>Ns,0*2E,r17MKET3,1428805696
!AIVDM,2,2,2,B,?OdL,2*4D,r17MKET3,1428805696
--
!AIVDM,2,1,9,A,Jr:wA3lmgDOFo7fTugFoBVb?tLJUepabWvKuuNcKCrS,0*77,r17MGAM1,1428807541
!AIVDM,2,2,9,A,npfqv;WWd7vewOw0,2*66,r17MGAM1,1428807541
--
!AIVDM,2,1,9,B,JOg<PB;Crvcg>sS9waBwfOlALLUv7ch7feOrq?6IfOL,0*69,d-108,S1447,t032538.00,T38.597771,r003669945,1428809138
!AIVDM,2,2,9,B,79I:UEltj,0*03,d-108,S1447,t032538.00,T38.597771,r003669945,1428809138
--
!AIVDM,2,1,1,B,JHVBNvfuurLe1GwwdJ77uOK6;erAo8g<9Q9t7PvmOoQ,0*52,d-125,S0729,t103019.00,T19.46385228,r09SMIL1,1428834621
!AIVDM,2,2,1,B,9TfsIG7?rftLr4S<HG<C;Cfgw3s;=wWagj:rn,0*3D,d-125,S0729,t103019.00,T19.46385228,r09SMIL1,1428834621
--
!AIVDM,2,1,5,A,JOU7TI4SNbClHt7ukEdIoQe9ESgEm?AfnBFB:<7pvVu,0*6C,r17MPYB1,1428846735
!AIVDM,2,2,5,A,fu;Rph0,4*45,r17MPYB1,1428846735
!ANVDM,3,1,3,A,8h30otA?0@55oUPPP121IoCol54cd1;ws;wwhhvmTP15oVD9e2B94oCPH54M,0*64,r08ACERDC,1428798685
!ANVDM,3,2,3,A,`45wshwwhdw1dP15oaPPP121IoCol54cd2UwrUwwhhvmTP15obD9e2B94oCP,0*50,r08ACERDC,1428798685
!ANVDM,3,3,3,A,H54M`4MwskwwhdvudP0,2*69,r08ACERDC,1428798685
!SAVDM,3,1,8,A,8h3OwjQKP@55o5D9e2B94oCPH54M`45wsiwwhkvqdP1608PPP121IoCol54c,0*2F,d-110,S1399,t003137.00,T37.311616,D09MN-BU-SUNBS1,1428798697
!SAVDM,3,2,8,A,d0MwskwwheviTP1608D9e2B94oCPH54M`4=wsdwwhbw5dP160:PPP121IoCo,0*67,d-110,S1399,t003137.00,T37.311616,D09MN-BU-SUNBS1,1428798697
!SAVDM,3,3,8,A,l54cd2ewrwwwhdviTP0,2*6E,d-110,S1399,t003137.00,T37.311616,D09MN-BU-SUNBS1,1428798697
!AIVDM,3,1,5,B,85MwpViKfGM3;LgDUfeqCt0uO6r>hhUP1UCmw9nNUAN,0*31,d-101,S0247,r003669959,1428797682
!AIVDM,3,2,5,B,:=TAfR65lA7oHU`sIuKPPFmsJNN2Vdrf3rLSCuFrqe`,0*16,d-101,S0247,r003669959,1428797682
!AIVDM,3,3,5,B,b`1QwIReh,2*11,d-101,S0247,r003669959,1428797682
!SAVDM,3,1,5,A,8h3OwjQKP@=60>PPPP<j>nsphTtHBR:1@0160>PPPP<iCnsm<4tPG29g@016,0*44,d-109,S1119,t001529.00,T29.844856,D09MN-BU-SUNBS1,1428797730
!SAVDM,3,2,5,A,0>PPPP<jCnssG4tCk2OT@0160>PPPP<i>nskl4tSp1lE@0160>PPP<pC5nsT,0*52,d-109,S1119,t001529.00,T29.844856,D09MN-BU-SUNBS1,1428797730
!SAVDM,3,3,5,A,`4r9d4@A@0160>PPPP<k>nsuPTt6m2Oa@00,2*4A,d-109,S1119,t001529.00,T29.844856,D09MN-BU-SUNBS1,1428797730
"""
class UscgRegexTest(unittest.TestCase):
def testComplete(self):
# pylint: disable=line-too-long
line = '!AIVDM,1,1,,B,15N1R5PP00rlhF0GQ8M00?wL28KP,0*4F,d-117,S1760,t205246.00,T46.932137,r003669930,1429046147'
expected = {
'counter': None,
'hour': '20',
'minute': '52',
'payload': '!AIVDM,1,1,,B,15N1R5PP00rlhF0GQ8M00?wL28KP,0*4F',
'receiver_time': '205246.00',
'rssi': None,
'second': '46.00',
'signal_strength': '-117',
'slot': '1760',
'station': 'r003669930',
'station_type': 'r',
'time': '1429046147',
'time_of_arrival': '46.932137',
'uscg_metadata': ',d-117,S1760,t205246.00,T46.932137,r003669930,1429046147'}
self.assertEqual(uscg.USCG_RE.match(line).groupdict(), expected)
def testSlot(self):
# pylint: disable=line-too-long
line = '!SAVDM,1,1,7,B,18158j001ErSVGdE>>1Unlcp0l1n,0*44,d-105,S2235,t000059.00,T59.605270,D05MN-NC-MAMBS1,1428796799'
self.assertEqual(uscg.USCG_RE.match(line).groupdict()['slot'], '2235')
def testLowerRssi(self):
# pylint: disable=line-too-long
line = '!AIVDM,1,1,,A,177jR48P00JrdqBH@CwBcOw`08N`,0*7F,s27064,d-106,t000113.82,T28.09677041,S1053,r003669945,1136498115'
self.assertEqual(uscg.USCG_RE.match(line).groupdict()['rssi'], '27064')
def testLowerSignalStrength(self):
lines = (
# pylint: disable=line-too-long
'!SAVDM,1,1,3,B,D03Owh0m9N>4gPfGLfpNfp0,2*1D,d-9,S1500,t000440.00,T40.004650,D08MN-NO-VENBS1,1428797080',
'!AIVDM,1,1,,B,8h3OdJQKT001GNQTp2V8QeQgR;Rt00,4*31,d-089,S2243,t235959.00,T59.81635087,r09SIGN1,1428796801',
'!SAVDM,1,1,8,A,15NC6HPP00JRh42E3a`LK?wn2<13,0*5F,d-108,S2236,t000059.00,T59.631854,D05MN-NC-MAMBS1,1428796799',
)
self.assertEqual(
uscg.USCG_RE.match(lines[0]).groupdict()['signal_strength'], '-9')
self.assertEqual(
uscg.USCG_RE.match(lines[1]).groupdict()['signal_strength'], '-089')
self.assertEqual(
uscg.USCG_RE.match(lines[2]).groupdict()['signal_strength'], '-108')
def testReceiverTime(self):
# pylint: disable=line-too-long
line = '!SAVDM,1,1,9,A,15N4GOP000IS3iR@bPg:n`T00D3w,0*74,d-107,S2229,t235959.00,T59.445101,D08MN-NO-VENBS1,1428796799'
result = uscg.USCG_RE.match(line).groupdict()
self.assertEqual(result['receiver_time'], '235959.00')
self.assertEqual(result['hour'], '23')
self.assertEqual(result['minute'], '59')
self.assertEqual(result['second'], '59.00')
def testTimeOfArrival(self):
# pylint: disable=line-too-long
line = '!SAVDM,1,1,8,A,15NC6HPP00JRh42E3a`LK?wn2<13,0*5F,d-108,S2236,t000059.00,T59.631854,D05MN-NC-MAMBS1,1428796799'
self.assertEqual(
uscg.USCG_RE.match(line).groupdict()['time_of_arrival'], '59.631854')
def testStationCounter(self):
# pylint: disable=line-too-long
line = '!AIVDM,1,1,,A,KnLBOtVM4EWJbNH,2*27,s22613,d-125,T48.04022115,x4928,r003669946,1218999946'
self.assertEqual(uscg.USCG_RE.match(line).groupdict()['counter'], '4928')
def testStation(self):
# pylint: disable=line-too-long
line = '$GPTXT,01,01,02,ANTSTATUS=OK*3B,rnrwais1,1241447967.14'
self.assertEqual(
uscg.USCG_RE.match(line).groupdict()['station'], 'rnrwais1')
self.assertEqual(
uscg.USCG_RE.match(line).groupdict()['station_type'], 'r')
line = '!AIVDM,1,1,,A,btKp7;kfb@J?rAvs,0*6B,s20552,d-123,T55.10376258,x373,R11SMOR1,1236152694'
self.assertEqual(
uscg.USCG_RE.match(line).groupdict()['station'], 'R11SMOR1')
line = '!BSVDO,1,1,,B,4h3OdJQuu5ossIw1T`JFhg700p00,0*44,b003665002,1428796801'
self.assertEqual(
uscg.USCG_RE.match(line).groupdict()['station'], 'b003665002')
line = '!AIVDM,3,3,4,A,PP3D<oPPEU;M418g@02PSpPPP2hlEoRQgU;j@17p@00,2*63,B0003160047,1064981014'
self.assertEqual(
uscg.USCG_RE.match(line).groupdict()['station'], 'B0003160047')
line = '!SAVDM,2,2,1,A,KKKKKKKKKK@,2*4F,d-84,S1688,t000245.00,T45.018254,D08MN-NO-BSABS1,1428969765'
self.assertEqual(
uscg.USCG_RE.match(line).groupdict()['station'], 'D08MN-NO-BSABS1')
def testTime(self):
line = '!AIVDM,1,1,,B,:3TsvT2G`mGL,0*5A,r00370003,1428890499'
self.assertEqual(uscg.USCG_RE.match(line).groupdict()['time'], '1428890499')
line = '$GPTXT,01,01,02,ANTSTATUS=OK*3B,rnrwais1,1241447967.14'
self.assertEqual(
uscg.USCG_RE.match(line).groupdict()['time'], '1241447967.14')
class ParseTest(unittest.TestCase):
def testSingleLine(self):
line = '!AIVDM,1,1,,B,=gLGu1tCm;vwvpwm,0*4C,r17MLAR1,1428885091'
metadata = uscg.Parse(line)
self.assertEqual(
metadata['payload'], '!AIVDM,1,1,,B,=gLGu1tCm;vwvpwm,0*4C')
self.assertEqual(metadata['station'], 'r17MLAR1')
self.assertEqual(metadata['time'], 1428885091)
class UsgsQueueTest(unittest.TestCase):
def setUp(self):
self.queue = uscg.UscgQueue()
def testPassThroughText(self):
lines = (
'',
'\n',
' \n',
' \n\r',
'# comment',
'$GPZDA,050004,29,11,2012,-5,00*', # No checksum.
'GPZDA,050013,29,11,2012,-5,00*5D', # No initial [$!].
)
for line_num, line in enumerate(lines):
self.queue.put(line)
self.assertEqual(self.queue.qsize(), 1)
expected = {
'line_nums': [line_num + 1],
'lines': [line.rstrip()]}
msg = self.queue.get()
self.assertEqual(msg, expected, 'pass through fail. %d "%s"\n %s != %s'
% (line_num, line, msg, expected))
self.assertEqual(self.queue.qsize(), 0)
def testSingleLine(self):
# pylint: disable=line-too-long
line = '!SAVDM,1,1,7,B,18158j001ErSVGdE>>1Unlcp0l1n,0*44,d-105,S2235,t000059.00,T59.605270,D05MN-NC-MAMBS1,1428796799'
# self.assertEqual(ais.decode('18158j001ErSVGdE>>1Unlcp0l1n'), {})
metadata = uscg.Parse(line)
self.assertEqual(
metadata,
{
'counter': None,
'hour': 0,
'minute': 0,
'payload': '!SAVDM,1,1,7,B,18158j001ErSVGdE>>1Unlcp0l1n,0*44',
'receiver_time': 59.00,
'rssi': None,
'second': 59.00,
'signal_strength': -105,
'slot': 2235,
'station': 'D05MN-NC-MAMBS1',
'station_type': 'D',
'time': 1428796799,
'time_of_arrival': 59.605270,
'uscg_metadata':
',d-105,S2235,t000059.00,T59.605270,D05MN-NC-MAMBS1,1428796799'})
def testSingleLineDecode(self):
line = '!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17,rMySat,1218999946'
self.queue.put(line)
self.assertEqual(self.queue.qsize(), 1)
msg = self.queue.get()
self.assertEqual(
msg,
{
'decoded': {
'cog': 131,
'gnss': True,
'id': 27,
'md5': '50898a3435865cf76f1b502b2821672b',
'mmsi': 577305000,
'nav_status': 5,
'position_accuracy': 1,
'raim': False,
'repeat_indicator': 0,
'sog': 0,
'spare': 0,
'x': -90.20666666666666,
'y': 29.145},
'line_nums': [None],
'lines': ['!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17,rMySat,1218999946'],
'matches': [{
'body': 'K8VSqb9LdU28WP8<',
'chan': 'B',
'checksum': '17',
'counter': None,
'fill_bits': 0,
'hour': None,
'minute': None,
'payload': '!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17',
'receiver_time': None,
'rssi': None,
'second': None,
'sen_num': 1,
'sen_tot': 1,
'seq_id': None,
'signal_strength': None,
'slot': None,
'station': 'rMySat',
'station_type': 'r',
'talker': 'SA',
'time': 1218999946,
'time_of_arrival': None,
'uscg_metadata': ',rMySat,1218999946',
'vdm': '!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17',
'vdm_type': 'VDM'}]})
def testManySingleLineMessages(self):
count = 0
for line in SINGLE_LINE_MESSAGES.split('\n'):
if not line.startswith('!'):
continue
count += 1
self.queue.put(line)
self.assertEqual(self.queue.qsize(), count, 'line failed: %s' % line)
msgs = []
while not self.queue.empty():
msg = self.queue.get()
msgs.append(msg)
self.assertIn('decoded', msg)
self.assertIn('line_nums', msg)
self.assertIn('lines', msg)
self.assertIn('matches', msg)
msg_ids = [msg['decoded']['id'] for msg in msgs]
self.assertEqual(
msg_ids,
[1, 4, 2, 3, 4, 7, 10, 12, 12, 14, 15, 16, 18, 20, 21, 22, 23, 24, 25,
26, 27])
def testOneMultiLineMessage(self):
lines = (
# pylint: disable=line-too-long
'!SAVDM,2,1,2,A,55@=M<02=rO7<Dm7B20EHE:1<4HEAV2222222217JQHQ:57c0Rk3lp0CQiC1,0*47,d-62,S2149,t135257.00,T57.311866,D11MN-LA-CATBS1,1429278777',
'!SAVDM,2,2,2,A,Dp888888880,2*08,d-62,S2149,t135257.00,T57.311866,D11MN-LA-CATBS1,1429278777'
)
self.queue.put(lines[0])
self.assertEqual(self.queue.qsize(), 0)
self.queue.put(lines[1])
self.assertEqual(self.queue.qsize(), 1)
msg = self.queue.get()
self.assertEqual(
msg,
{
# pylint: disable=line-too-long
'decoded': {
'ais_version': 0,
'callsign': '3EMQ4 ',
'destination': 'LOS ANGELES ',
'dim_a': 212,
'dim_b': 88,
'dim_c': 33,
'dim_d': 10,
'draught': 13.899999618530273,
'dte': 0,
'eta_day': 15,
'eta_hour': 11,
'eta_minute': 0,
'eta_month': 4,
'fix_type': 1,
'id': 5,
'imo_num': 9300465,
'md5': '3ce09e77864abc4b9766573e1de13c3c',
'mmsi': 352542000,
'name': '<NAME>AFETY ',
'repeat_indicator': 0,
'spare': 0,
'type_and_cargo': 71},
'line_nums': [1, 2],
'lines': [
'!SAVDM,2,1,2,A,55@=M<02=rO7<Dm7B20EHE:1<4HEAV2222222217JQHQ:57c0Rk3lp0CQiC1,0*47,d-62,S2149,t135257.00,T57.311866,D11MN-LA-CATBS1,1429278777',
'!SAVDM,2,2,2,A,Dp888888880,2*08,d-62,S2149,t135257.00,T57.311866,D11MN-LA-CATBS1,1429278777'],
'matches': [
{
'body': '55@=M<02=rO7<Dm7B20EHE:1<4HEAV2222222217JQHQ:57c0Rk3lp0CQiC1',
'chan': 'A',
'checksum': '47',
'counter': None,
'fill_bits': 0,
'hour': 13,
'minute': 52,
'payload': '!SAVDM,2,1,2,A,55@=M<02=rO7<Dm7B20EHE:1<4HEAV2222222217JQHQ:57c0Rk3lp0CQiC1,0*47',
'receiver_time': 135257.00,
'rssi': None,
'second': 57.0,
'sen_num': 1,
'sen_tot': 2,
'seq_id': 2,
'signal_strength': -62,
'slot': 2149,
'station': 'D11MN-LA-CATBS1',
'station_type': 'D',
'talker': 'SA',
'time': 1429278777,
'time_of_arrival': 57.311866,
'uscg_metadata': ',d-62,S2149,t135257.00,T57.311866,D11MN-LA-CATBS1,1429278777',
'vdm': '!SAVDM,2,1,2,A,55@=M<02=rO7<Dm7B20EHE:1<4HEAV2222222217JQHQ:57c0Rk3lp0CQiC1,0*47',
'vdm_type': 'VDM'},
{
'body': 'Dp888888880',
'chan': 'A',
'checksum': '08',
'counter': None,
'fill_bits': 2,
'hour': 13,
'minute': 52,
'payload': '!SAVDM,2,2,2,A,Dp888888880,2*08',
'receiver_time': 135257.00,
'rssi': None,
'second': 57.00,
'sen_num': 2,
'sen_tot': 2,
'seq_id': 2,
'signal_strength': -62,
'slot': 2149,
'station': 'D11MN-LA-CATBS1',
'station_type': 'D',
'talker': 'SA',
'time': 1429278777,
'time_of_arrival': 57.311866,
'uscg_metadata': ',d-62,S2149,t135257.00,T57.311866,D11MN-LA-CATBS1,1429278777',
'vdm': '!SAVDM,2,2,2,A,Dp888888880,2*08',
'vdm_type': 'VDM'}],
'times': [1429278777, 1429278777]})
if __name__ == '__main__':
unittest.main()
|
examples/legacy/seq2seq/convert_model_to_fp16.py | liminghao1630/transformers | 50,404 | 11084548 | <reponame>liminghao1630/transformers
#!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import fire
import torch
from tqdm import tqdm
def convert(src_path: str, map_location: str = "cpu", save_path: Union[str, None] = None) -> None:
"""Convert a pytorch_model.bin or model.pt file to torch.float16 for faster downloads, less disk space."""
state_dict = torch.load(src_path, map_location=map_location)
for k, v in tqdm(state_dict.items()):
if not isinstance(v, torch.Tensor):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin")
state_dict[k] = v.half()
if save_path is None: # overwrite src_path
save_path = src_path
torch.save(state_dict, save_path)
if __name__ == "__main__":
fire.Fire(convert)
|
src/probnum/quad/_integration_measures.py | christopheroates/probnum | 226 | 11084618 | """Contains integration measures."""
import abc
from typing import Optional, Tuple, Union
import numpy as np
import scipy.stats
from probnum.randvars import Normal
from probnum.typing import FloatArgType, IntArgType
class IntegrationMeasure(abc.ABC):
"""An abstract class for a measure against which a target function is integrated.
Child classes implement specific integration measures and, if available, make use
of random variables for sampling and evaluation of the density function.
Parameters
----------
dim :
Dimension of the integration domain.
domain :
Tuple which contains two arrays which define the start and end points,
respectively, of the rectangular integration domain.
"""
def __init__(
self,
dim: IntArgType,
domain: Tuple[Union[np.ndarray, FloatArgType], Union[np.ndarray, FloatArgType]],
) -> None:
self._set_dimension_domain(dim, domain)
def __call__(self, points: Union[float, np.floating, np.ndarray]) -> np.ndarray:
"""Evaluate the density function of the integration measure.
Parameters
----------
points :
*shape=(n_points,) or (n_points, dim)* -- Input locations.
Returns
-------
density_evals :
*shape=(n_points,)* -- Density evaluated at given locations.
"""
# pylint: disable=no-member
return self.random_variable.pdf(points).squeeze()
def sample(
self,
rng: np.random.Generator,
n_sample: IntArgType,
) -> np.ndarray:
"""Sample ``n_sample`` points from the integration measure.
Parameters
----------
rng :
Random number generator
n_sample :
Number of points to be sampled
Returns
-------
points :
*shape=(n_sample,) or (n_sample,dim)* -- Sampled points
"""
# pylint: disable=no-member
return np.reshape(
self.random_variable.sample(rng=rng, size=n_sample),
newshape=(n_sample, self.dim),
)
def _set_dimension_domain(
self,
dim: IntArgType,
domain: Tuple[Union[np.ndarray, FloatArgType], Union[np.ndarray, FloatArgType]],
) -> None:
"""Sets the integration domain and dimension.
The following logic is used to set the domain and dimension:
1. If ``dim`` is not given (``dim == None``):
1a. If either ``domain[0]`` or ``domain[1]`` is a scalar, the dimension
is set as the maximum of their lengths and the scalar is expanded to
a constant vector.
1b. Otherwise, if the ``domain[0]`` and ``domain[1]`` are not of equal
length, an error is raised.
2. If ``dim`` is given:
2a. If both ``domain[0]`` and ``domain[1]`` are scalars, they are
expanded to constant vectors of length ``dim``.
2b. If only one of `domain[0]`` and ``domain[1]`` is a scalar and the
length of the other equals ``dim``, the scalar one is expanded to a
constant vector of length ``dim``.
2c. Otherwise, if neither of ``domain[0]`` and ``domain[1]`` is a
scalar, error is raised if either of them has length which does not
equal ``dim``.
"""
domain_a_dim = np.size(domain[0])
domain_b_dim = np.size(domain[1])
# Check that given dimensions match and are positive
dim_mismatch = False
if dim is None:
if domain_a_dim == domain_b_dim:
dim = domain_a_dim
elif domain_a_dim == 1 or domain_b_dim == 1:
dim = np.max([domain_a_dim, domain_b_dim])
else:
dim_mismatch = True
else:
if (domain_a_dim > 1 or domain_b_dim > 1) and dim != np.max(
[domain_a_dim, domain_b_dim]
):
dim_mismatch = True
if dim_mismatch:
raise ValueError(
"Domain limits must have the same length or at least "
"one of them has to be one-dimensional."
)
if dim < 1:
raise ValueError(f"Domain dimension dim = {dim} must be positive.")
# Use same domain limit in all dimensions if only one limit is given
if domain_a_dim == 1:
domain_a = np.full((dim,), domain[0])
else:
domain_a = domain[0]
if domain_b_dim == 1:
domain_b = np.full((dim,), domain[1])
else:
domain_b = domain[1]
# Check that the domain is non-empty
if not np.all(domain_a < domain_b):
raise ValueError("Domain must be non-empty.")
self.dim = dim
self.domain = (domain_a, domain_b)
class LebesgueMeasure(IntegrationMeasure):
"""Lebesgue measure on a hyper-rectangle.
Parameters
----------
dim :
Dimension of the integration domain
domain :
Tuple which contains two arrays which define the start and end points,
respectively, of the rectangular integration domain.
normalized :
Boolean which controls whether or not the measure is normalized (i.e.,
integral over the domain is one).
"""
def __init__(
self,
domain: Tuple[Union[np.ndarray, FloatArgType], Union[np.ndarray, FloatArgType]],
dim: Optional[IntArgType] = None,
normalized: Optional[bool] = False,
) -> None:
super().__init__(dim=dim, domain=domain)
# Set normalization constant
self.normalized = normalized
if self.normalized:
self.normalization_constant = 1.0 / np.prod(self.domain[1] - self.domain[0])
else:
self.normalization_constant = 1.0
if self.normalization_constant in [0, np.Inf, -np.Inf]:
raise ValueError(
"Normalization constant is too small or too large. "
"Consider setting normalized = False."
)
# Use scipy's uniform random variable since uniform random variables are not
# yet implemented in probnum
self.random_variable = scipy.stats.uniform(
loc=self.domain[0], scale=self.domain[1] - self.domain[0]
)
def __call__(self, points: Union[float, np.floating, np.ndarray]) -> np.ndarray:
num_dat = np.atleast_1d(points).shape[0]
return np.full(() if num_dat == 1 else (num_dat,), self.normalization_constant)
def sample(
self,
rng: np.random.Generator,
n_sample: IntArgType,
) -> np.ndarray:
return self.random_variable.rvs(size=(n_sample, self.dim), random_state=rng)
# pylint: disable=too-few-public-methods
class GaussianMeasure(IntegrationMeasure):
"""Gaussian measure on Euclidean space with given mean and covariance.
If ``mean`` and ``cov`` are scalars but ``dim`` is larger than one, ``mean`` and
``cov`` are extended to a constant vector and diagonal matrix, respectively,
of appropriate dimensions.
Parameters
----------
mean :
*shape=(dim,)* -- Mean of the Gaussian measure.
cov :
*shape=(dim, dim)* -- Covariance matrix of the Gaussian measure.
dim :
Dimension of the integration domain.
"""
def __init__(
self,
mean: Union[float, np.floating, np.ndarray],
cov: Union[float, np.floating, np.ndarray],
dim: Optional[IntArgType] = None,
) -> None:
# Extend scalar mean and covariance to higher dimensions if dim has been
# supplied by the user
# pylint: disable=fixme
# TODO: This needs to be modified to account for cases where only either the
# mean or covariance is given in scalar form
if (
(np.isscalar(mean) or mean.size == 1)
and (np.isscalar(cov) or cov.size == 1)
and dim is not None
):
mean = np.full((dim,), mean)
cov = cov * np.eye(dim)
# Set dimension based on the mean vector
if np.isscalar(mean):
dim = 1
else:
dim = mean.size
# If cov has been given as a vector of variances, transform to diagonal matrix
if isinstance(cov, np.ndarray) and np.squeeze(cov).ndim == 1 and dim > 1:
cov = np.diag(np.squeeze(cov))
# Exploit random variables to carry out mean and covariance checks
self.random_variable = Normal(mean=np.squeeze(mean), cov=np.squeeze(cov))
self.mean = self.random_variable.mean
self.cov = self.random_variable.cov
# Set diagonal_covariance flag
if dim == 1:
self.diagonal_covariance = True
else:
self.diagonal_covariance = (
np.count_nonzero(self.cov - np.diag(np.diagonal(self.cov))) == 0
)
super().__init__(
dim=dim,
domain=(np.full((dim,), -np.Inf), np.full((dim,), np.Inf)),
)
|
robel/scripts/reset_hardware.py | Del9fina/robel | 109 | 11084654 | <gh_stars>100-1000
# Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to test resetting robot hardware environments.
To run:
python -m robel.scripts.reset_hardware \
-e DKittyWalkFixed-v0 -d /dev/ttyUSB0
"""
import argparse
import logging
import time
import gym
import robel
from robel.scripts.utils import parse_env_args
def main():
# Get command line arguments.
parser = argparse.ArgumentParser()
parser.add_argument(
'-n',
'--num_repeats',
type=int,
default=1,
help='The number of resets to perform.')
env_id, params, args = parse_env_args(parser)
# Show INFO-level logs.
logging.basicConfig(level=logging.INFO)
# Create the environment and get the robot component.
robel.set_env_params(env_id, params)
env = gym.make(env_id).unwrapped
assert env.robot.is_hardware
for i in range(args.num_repeats):
print('Starting reset #{}'.format(i))
# Disengage all of the motors and let the dkitty fall.
env.robot.set_motors_engaged(None, engaged=False)
print('Place the robot to a starting position.')
input('Press Enter to start the reset...')
# Start with all motors engaged.
env.robot.set_motors_engaged(None, engaged=True)
env.reset()
print('Done reset! Turning off the robot in a few seconds.')
time.sleep(2)
if __name__ == '__main__':
main()
|
third_party/inspector_protocol/concatenate_protocols.py | RiyoCoder/v8 | 5,964 | 11084660 | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
import sys
try:
import json
except ImportError:
import simplejson as json
import pdl
def main(argv):
if len(argv) < 1:
sys.stderr.write("Usage: %s <protocol-1> [<protocol-2> [, <protocol-3>...]] <output-file>\n" % sys.argv[0])
return 1
domains = []
version = None
for protocol in argv[:-1]:
file_name = os.path.normpath(protocol)
if not os.path.isfile(file_name):
sys.stderr.write("Cannot find %s\n" % file_name)
return 1
input_file = open(file_name, "r")
parsed_json = pdl.loads(input_file.read(), file_name)
domains += parsed_json["domains"]
version = parsed_json["version"]
output_file = open(argv[-1], "w")
json.dump({"version": version, "domains": domains}, output_file, indent=4, sort_keys=False, separators=(',', ': '))
output_file.close()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
tests/test_check_playbook_file_removed_and_added.py | deperrone/content | 1,138 | 11084701 | <filename>tests/test_check_playbook_file_removed_and_added.py<gh_stars>1000+
import os
import pytest
from .test_ansible_file_removed_and_added import check_playbook_file_removed_and_added
def test_file_removed_and_added():
playbook_path = os.path.join(os.path.dirname(__file__),
"ansible_file_removed_and_added",
"file_removed_and_added.yml")
assert not check_playbook_file_removed_and_added(playbook_path)
def test_file_removed_and_not_added():
playbook_path = os.path.join(os.path.dirname(__file__),
"ansible_file_removed_and_added",
"file_removed_and_not_added.yml")
assert check_playbook_file_removed_and_added(playbook_path)
def test_file_not_removed_and_added():
playbook_path = os.path.join(os.path.dirname(__file__),
"ansible_file_removed_and_added",
"file_not_removed_and_added.yml")
assert check_playbook_file_removed_and_added(playbook_path)
def test_file_block_removed_and_added():
playbook_path = os.path.join(os.path.dirname(__file__),
"ansible_file_removed_and_added",
"file_block_removed_and_added.yml")
assert not check_playbook_file_removed_and_added(playbook_path)
def test_file_block_removed_and_not_added():
playbook_path = os.path.join(os.path.dirname(__file__),
"ansible_file_removed_and_added",
"file_block_removed_and_not_added.yml")
assert check_playbook_file_removed_and_added(playbook_path)
|
89_Tic-Tac-Toe/python/tictactoe2.py | jcoehoorn/basic-computer-games | 7,880 | 11084705 | <reponame>jcoehoorn/basic-computer-games<gh_stars>1000+
#!/usr/bin/env python3
from enum import Enum
class OccupiedBy(Enum):
COMPUTER=-1
EMPTY=0
PLAYER=1
class Winner(Enum):
NONE=0
COMPUTER=1
PLAYER=2
DRAW=3
class Space(Enum):
TOP_LEFT = 0
TOP_CENTER = 1
TOP_RIGHT = 2
MID_LEFT = 3
MID_CENTER = 4
MID_RIGHT = 5
BOT_LEFT = 6
BOT_CENTER = 7
BOT_RIGHT = 8
def line_170(board, g, h, j, k):
if g == OccupiedBy.Player:
if board[Space.MID_CENTER] == g:
if board[Space.TOP_RIGHT] == g and board[Space.BOTTOM_LEFT] is OccupiedBy.EMPTY: # Line 171
return Space.BOTTOM_LEFT # Line 187
elif board[Space.BOTTOM_RIGHT] == g and board[Space.TOP_LEFT] is OccupiedBy.EMPTY: # Line 172
return Space.TOP_LEFT # Line 181
elif board[Space.BOTTOM_LEFT] == g and board[Space.TOP_RIGHT] is OccupiedBy.EMPTY: # Line 173
return Space.TOP_RIGHT # Line 183
elif board[Space.BOTTOM_RIGHT] is OccupiedBy.PLAYER and board[Space.TOP_RIGHT] is OccupiedBy.EMPTY: # Line 174
return Space.TOP_RIGHT # Line 189
elif g is OccupiedBy.COMPUTER:
g = OccupiedBy.PLAYER
h = OccupiedBy.COMPUTER
return line_118(board, g, h, j, k)
def line_150(board, g, h, j, k):
if board[k] != g: # line 150
if (board[k] == h # line 160
or board[k + 6] != g # line 161
or board[k + 3] != g): # line 162
return -1 # Goto 170
else:
return k + 3 # Line 163
elif board[k + 6] != g: # line 152
if board[k + 6] != 0 or board[k + 3] != g: # line 165
return -1 # Goto 170
elif board[k + 3]: # line 156
return - 1
return k + 6
def line_120(board, g, h, j, k):
if board[j] != g:
if board[j] == h or board[j+2] != g or board[j+1] != g:
if board[k] != g:
if board[k + 6] != g and (board[k + 6] != 0 or board[k+3] != g):
# 450 IF G=1 THEN 465
pass
elif board[j + 2] is not g: # Line 122
pass
elif board[j + 1] is not OccupiedBy.EMPTY:
pass
def line_118(board, g, h):
for j in range(7):
for k in range (3):
return line_120(board, g, h, j, k)
def think(board, g, h, moves):
if board[Space.MID_CENTER] is OccupiedBy.EMPTY:
return Space.MID_CENTER
if board[Space.MID_CENTER] is OccupiedBy.PLAYER:
if board[Space.TOP_CENTER] is OccupiedBy.PLAYER and board[Space.TOP_LEFT] is OccupiedBy.EMPTY \
or board[Space.MID_LEFT] is OccupiedBy.PLAYER and board[Space.TOP_LEFT] is OccupiedBy.EMPTY:
return Space.BOT_LEFT
elif board[Space.MID_RIGHT] is OccupiedBy.PLAYER and board[Space.BOT_RIGHT] is OccupiedBy.EMPTY \
or board[Space.BOT_CENTER] is OccupiedBy.PLAYER and board[Space.BOT_RIGHT] is OccupiedBy.EMPTY:
return Space.BOT_RIGHT
if g == OccupiedBy.PLAYER:
j = 3 * int((moves-1) / 3)
if move == j + 1:
k = 1
if move == j + 2:
k = 2
if move == j + 3:
k = 3
return subthink(g, h, j, k)
def render_board(board, space_mapping):
vertical_divider = '!'
horizontal_divider = '---+---+---'
lines = []
lines.append(vertical_divider.join(space_mapping[space] for space in board[0:3]))
lines.append(horizontal_divider)
lines.append(vertical_divider.join(space_mapping[space] for space in board[3:6]))
lines.append(horizontal_divider)
lines.append(vertical_divider.join(space_mapping[space] for space in board[6:9]))
return '\n'.join(lines)
def determine_winner(board, g):
# Check for matching horizontal lines
for i in range(Space.TOP_LEFT.value, Space.BOT_LEFT.value + 1, 3): # Line 1095
if board[i] != board[i+1] or board[i] != board[i+2]: # Lines 1100 and 1105
continue # First third of Line 1115
elif board[i] == OccupiedBy.COMPUTER: #
return Winner.COMPUTER
elif board[i] == OccupiedBy.PLAYER:
return Winner.PLAYER
# Check for matching vertical lines
for i in range(Space.TOP_LEFT.value, Space.TOP_RIGHT.value + 1, 1): # Second third of Line 1115
if board[i] != board[i+3] or board[i] != board[i+6]: # Last third of Line 1115
continue # First third of 1150
elif board[i] == OccupiedBy.COMPUTER: # Line 1135
return Winner.COMPUTER
elif board[i] == OccupiedBy.PLAYER: # Line 1137
return Winner.PLAYER
# Check diagonals
if any(space is OccupiedBy.EMPTY for space in board):
if board[Space.MID_CENTER.value] != g:
return Winner.NONE
elif (board[Space.TOP_LEFT.value] == g and board[Space.BOT_RIGHT.value] == g) or \
(board[Space.BOT_LEFT.value] == g and board[Space.TOP_RIGHT.value] == g):
return Winner.COMPUTER if g is OccupiedBy.COMPUTER else Winner.PLAYER
else:
return Winner.NONE
return Winner.DRAW
def computer_think(board):
empty_indices = [index for index, space in enumerate(board)
if space is OccupiedBy.EMPTY]
return empty_indices[0]
def prompt_player(board):
while True:
move = int(input("\nWHERE DO YOU MOVE? "))
if move == 0:
return 0
if move > 9 or board[move - 1] is not OccupiedBy.EMPTY:
print("THAT SQUARE IS OCCUPIED.\n\n")
continue
return move
def play():
print(' '*30 + 'TIC-TAC-TOE')
print(' '*15 + 'CREATIVE COMPUTING MORRISTOWN, NEW JERSEY')
print('\n\n')
print('THE BOARD IS NUMBERED:')
print(' 1 2 3')
print(' 4 5 6')
print(' 7 8 9')
print('\n\n')
# Default state
board = [OccupiedBy.EMPTY] * 9
current_player = OccupiedBy.PLAYER
space_mapping = {
OccupiedBy.EMPTY: ' ',
OccupiedBy.PLAYER: ' X ',
OccupiedBy.COMPUTER: ' O '
}
symbol = input("DO YOU WANT 'X' OR 'O'? ").upper()
# If the player doesn't choose X, then assume you want O
# and the computer goes first.
if symbol != "X":
space_mapping[OccupiedBy.PLAYER] = ' O '
space_mapping[OccupiedBy.COMPUTER] = ' X '
current_player = OccupiedBy.COMPUTER
while True:
if current_player is OccupiedBy.PLAYER:
move = prompt_player(board)
if move == 0:
print("THANKS FOR THE GAME.")
break
board[move - 1] = current_player
elif current_player is OccupiedBy.COMPUTER:
print("\nTHE COMPUTER MOVES TO...")
board[computer_think(board)] = current_player
print(render_board(board, space_mapping))
winner = determine_winner(board, current_player)
if winner is not Winner.NONE:
print(winner)
break
if current_player is OccupiedBy.COMPUTER:
current_player = OccupiedBy.PLAYER
elif current_player is OccupiedBy.PLAYER:
current_player = OccupiedBy.COMPUTER
if __name__ == '__main__':
play()
|
utils/validation.py | suspiria/MelNet | 190 | 11084708 | import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from .gmm import sample_gmm
def validate(args, model, melgen, tierutil, testloader, criterion, writer, step):
model.eval()
# torch.backends.cudnn.benchmark = False
test_loss = []
loader = tqdm(testloader, desc='Testing is in progress', dynamic_ncols=True)
with torch.no_grad():
for input_tuple in loader:
if args.tts:
seq, text_lengths, source, target, audio_lengths = input_tuple
mu, std, pi, alignment = model(
source.cuda(non_blocking=True),
seq.cuda(non_blocking=True),
text_lengths.cuda(non_blocking=True),
audio_lengths.cuda(non_blocking=True)
)
else:
source, target, audio_lengths = input_tuple
mu, std, pi = model(
source.cuda(non_blocking=True),
audio_lengths.cuda(non_blocking=True)
)
loss = criterion(
target.cuda(non_blocking=True),
mu, std, pi,
audio_lengths.cuda(non_blocking=True)
)
test_loss.append(loss)
test_loss = sum(test_loss) / len(test_loss)
audio_length = audio_lengths[0].item()
source = source[0].cpu().detach().numpy()[:, :audio_length]
target = target[0].cpu().detach().numpy()[:, :audio_length]
result = sample_gmm(mu[0], std[0], pi[0]).cpu().detach().numpy()[:, :audio_length]
if args.tts:
alignment = alignment[0].cpu().detach().numpy()[:, :audio_length]
else:
alignment = None
writer.log_validation(test_loss, source, target, result, alignment, step)
model.train()
# torch.backends.cudnn.benchmark = True
|
modules/arp_monitor.py | Lola224/hakkuframework | 250 | 11084716 | <filename>modules/arp_monitor.py<gh_stars>100-1000
# Copyright (C) 2015 – 2021 <NAME> (4shadoww)
from core.hakkuframework import *
import scapy.all as scapy
conf = {
"name": "arp_monitor", # Module's name (should be same as file name)
"version": "1.0", # Module version
"shortdesc": "arp packet monitor", # Short description
"github": "4shadoww", # Author's github
"author": "4shadoww", # Author
"email": "<EMAIL>", # Email
"initdate": "2016-12-31", # Initial date
"lastmod": "2021-07-11", # Last modification
"apisupport": False, # Api support
"needroot": 1, # Alert user if root permissions not available (remove variable below if root permissions not needed)
}
# List of the variables
variables = OrderedDict((
))
# Additional notes to options
option_notes = " this module doesn't have any options"
# Simple changelog
changelog = "Version 1.0:\nrelease"
def arp_display(pkt):
if pkt[scapy.ARP].op == 1: #who-has (request)
return "Request: " + pkt[scapy.ARP].psrc + " is asking about " + pkt[scapy.ARP].pdst
if pkt[scapy.ARP].op == 2: #is-at (response)
return "*Response: " + pkt[scapy.ARP].hwsrc + " has address " + pkt[scapy.ARP].psrc
# Run function
def run():
print_info("starting arp monitor...")
print_info("ctrl + c to end")
print(scapy.sniff(prn=arp_display, filter="arp", store=0))
print_info("monitoring ended")
|
e2e/fixtures/python/relative/simple.py | FivemStorescripts/OctoLinker | 2,677 | 11084744 | <gh_stars>1000+
a = "test"
_B = "test"
def my_func():
pass
class MyClass():
pass
|
examples/basic/mesh_threshold.py | hadivafaii/vedo | 836 | 11084776 | <filename>examples/basic/mesh_threshold.py
"""Extracts cells of a Mesh which satisfy
the threshold criterion: 37 < scalar < 37.5"""
from vedo import *
man = Mesh(dataurl+"man.vtk")
scals = man.points()[:, 0] + 37 # pick y coords of vertices
man.cmap("cool", scals).addScalarBar(title="threshold", horizontal=True)
# make a copy and threshold the mesh
cutman = man.clone().threshold(scals, 37, 37.5)
# distribute the meshes on the 2 renderers
show([(man, __doc__), cutman], N=2, elevation=-30, axes=11).close()
|
static/ppdet/modeling/anchor_heads/fcos_head.py | leakyH/PaddleDetection | 7,782 | 11084830 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Normal, Constant
from ppdet.modeling.ops import ConvNorm, DeformConvNorm
from ppdet.modeling.ops import MultiClassNMS
from ppdet.core.workspace import register
__all__ = ['FCOSHead']
@register
class FCOSHead(object):
"""
FCOSHead
Args:
num_classes (int): Number of classes
fpn_stride (list): The stride of each FPN Layer
prior_prob (float): Used to set the bias init for the class prediction layer
num_convs (int): The layer number in fcos head
norm_type (str): Normalization type, 'bn'/'sync_bn'/'affine_channel'
fcos_loss (object): Instance of 'FCOSLoss'
norm_reg_targets (bool): Normalization the regression target if true
centerness_on_reg(bool): The prediction of centerness on regression or clssification branch
use_dcn_in_tower (bool): Ues deformable conv on FCOSHead if true
nms (object): Instance of 'MultiClassNMS'
"""
__inject__ = ['fcos_loss', 'nms']
__shared__ = ['num_classes']
def __init__(self,
num_classes=80,
fpn_stride=[8, 16, 32, 64, 128],
prior_prob=0.01,
num_convs=4,
norm_type="gn",
fcos_loss=None,
norm_reg_targets=False,
centerness_on_reg=False,
use_dcn_in_tower=False,
nms=MultiClassNMS(
score_threshold=0.01,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
background_label=-1).__dict__):
self.num_classes = num_classes
self.fpn_stride = fpn_stride[::-1]
self.prior_prob = prior_prob
self.num_convs = num_convs
self.norm_reg_targets = norm_reg_targets
self.centerness_on_reg = centerness_on_reg
self.use_dcn_in_tower = use_dcn_in_tower
self.norm_type = norm_type
self.fcos_loss = fcos_loss
self.batch_size = 8
self.nms = nms
if isinstance(nms, dict):
self.nms = MultiClassNMS(**nms)
def _fcos_head(self, features, fpn_stride, fpn_scale, is_training=False):
"""
Args:
features (Variables): feature map from FPN
fpn_stride (int): the stride of current feature map
is_training (bool): whether is train or test mode
"""
subnet_blob_cls = features
subnet_blob_reg = features
in_channles = features.shape[1]
if self.use_dcn_in_tower:
conv_norm = DeformConvNorm
else:
conv_norm = ConvNorm
for lvl in range(0, self.num_convs):
conv_cls_name = 'fcos_head_cls_tower_conv_{}'.format(lvl)
subnet_blob_cls = conv_norm(
input=subnet_blob_cls,
num_filters=in_channles,
filter_size=3,
stride=1,
norm_type=self.norm_type,
act='relu',
initializer=Normal(
loc=0., scale=0.01),
bias_attr=True,
norm_name=conv_cls_name + "_norm",
name=conv_cls_name)
conv_reg_name = 'fcos_head_reg_tower_conv_{}'.format(lvl)
subnet_blob_reg = conv_norm(
input=subnet_blob_reg,
num_filters=in_channles,
filter_size=3,
stride=1,
norm_type=self.norm_type,
act='relu',
initializer=Normal(
loc=0., scale=0.01),
bias_attr=True,
norm_name=conv_reg_name + "_norm",
name=conv_reg_name)
conv_cls_name = "fcos_head_cls"
bias_init_value = -math.log((1 - self.prior_prob) / self.prior_prob)
cls_logits = fluid.layers.conv2d(
input=subnet_blob_cls,
num_filters=self.num_classes,
filter_size=3,
stride=1,
padding=1,
param_attr=ParamAttr(
name=conv_cls_name + "_weights",
initializer=Normal(
loc=0., scale=0.01)),
bias_attr=ParamAttr(
name=conv_cls_name + "_bias",
initializer=Constant(value=bias_init_value)),
name=conv_cls_name)
conv_reg_name = "fcos_head_reg"
bbox_reg = fluid.layers.conv2d(
input=subnet_blob_reg,
num_filters=4,
filter_size=3,
stride=1,
padding=1,
param_attr=ParamAttr(
name=conv_reg_name + "_weights",
initializer=Normal(
loc=0., scale=0.01)),
bias_attr=ParamAttr(
name=conv_reg_name + "_bias", initializer=Constant(value=0)),
name=conv_reg_name)
bbox_reg = bbox_reg * fpn_scale
if self.norm_reg_targets:
bbox_reg = fluid.layers.relu(bbox_reg)
if not is_training:
bbox_reg = bbox_reg * fpn_stride
else:
bbox_reg = fluid.layers.exp(bbox_reg)
conv_centerness_name = "fcos_head_centerness"
if self.centerness_on_reg:
subnet_blob_ctn = subnet_blob_reg
else:
subnet_blob_ctn = subnet_blob_cls
centerness = fluid.layers.conv2d(
input=subnet_blob_ctn,
num_filters=1,
filter_size=3,
stride=1,
padding=1,
param_attr=ParamAttr(
name=conv_centerness_name + "_weights",
initializer=Normal(
loc=0., scale=0.01)),
bias_attr=ParamAttr(
name=conv_centerness_name + "_bias",
initializer=Constant(value=0)),
name=conv_centerness_name)
return cls_logits, bbox_reg, centerness
def _get_output(self, body_feats, is_training=False):
"""
Args:
body_feates (list): the list of fpn feature maps
is_training (bool): whether is train or test mode
Return:
cls_logits (Variables): prediction for classification
bboxes_reg (Variables): prediction for bounding box
centerness (Variables): prediction for ceterness
"""
cls_logits = []
bboxes_reg = []
centerness = []
assert len(body_feats) == len(self.fpn_stride), \
"The size of body_feats is not equal to size of fpn_stride"
for fpn_name, fpn_stride in zip(body_feats, self.fpn_stride):
features = body_feats[fpn_name]
scale = fluid.layers.create_parameter(
shape=[1, ],
dtype="float32",
name="%s_scale_on_reg" % fpn_name,
default_initializer=fluid.initializer.Constant(1.))
cls_pred, bbox_pred, ctn_pred = self._fcos_head(
features, fpn_stride, scale, is_training=is_training)
cls_logits.append(cls_pred)
bboxes_reg.append(bbox_pred)
centerness.append(ctn_pred)
return cls_logits, bboxes_reg, centerness
def _compute_locations(self, features):
"""
Args:
features (list): List of Variables for FPN feature maps
Return:
Anchor points for each feature map pixel
"""
locations = []
for lvl, fpn_name in enumerate(features):
feature = features[fpn_name]
shape_fm = fluid.layers.shape(feature)
shape_fm.stop_gradient = True
h = shape_fm[2]
w = shape_fm[3]
fpn_stride = self.fpn_stride[lvl]
shift_x = fluid.layers.range(
0, w * fpn_stride, fpn_stride, dtype='float32')
shift_y = fluid.layers.range(
0, h * fpn_stride, fpn_stride, dtype='float32')
shift_x = fluid.layers.unsqueeze(shift_x, axes=[0])
shift_y = fluid.layers.unsqueeze(shift_y, axes=[1])
shift_x = fluid.layers.expand_as(
shift_x, target_tensor=feature[0, 0, :, :])
shift_y = fluid.layers.expand_as(
shift_y, target_tensor=feature[0, 0, :, :])
shift_x.stop_gradient = True
shift_y.stop_gradient = True
shift_x = fluid.layers.reshape(shift_x, shape=[-1])
shift_y = fluid.layers.reshape(shift_y, shape=[-1])
location = fluid.layers.stack(
[shift_x, shift_y], axis=-1) + fpn_stride // 2
location.stop_gradient = True
locations.append(location)
return locations
def __merge_hw(self, input, ch_type="channel_first"):
"""
Args:
input (Variables): Feature map whose H and W will be merged into one dimension
ch_type (str): channel_first / channel_last
Return:
new_shape (Variables): The new shape after h and w merged into one dimension
"""
shape_ = fluid.layers.shape(input)
bs = shape_[0]
ch = shape_[1]
hi = shape_[2]
wi = shape_[3]
img_size = hi * wi
img_size.stop_gradient = True
if ch_type == "channel_first":
new_shape = fluid.layers.concat([bs, ch, img_size])
elif ch_type == "channel_last":
new_shape = fluid.layers.concat([bs, img_size, ch])
else:
raise KeyError("Wrong ch_type %s" % ch_type)
new_shape.stop_gradient = True
return new_shape
def _postprocessing_by_level(self, locations, box_cls, box_reg, box_ctn,
im_info):
"""
Args:
locations (Variables): anchor points for current layer
box_cls (Variables): categories prediction
box_reg (Variables): bounding box prediction
box_ctn (Variables): centerness prediction
im_info (Variables): [h, w, scale] for input images
Return:
box_cls_ch_last (Variables): score for each category, in [N, C, M]
C is the number of classes and M is the number of anchor points
box_reg_decoding (Variables): decoded bounding box, in [N, M, 4]
last dimension is [x1, y1, x2, y2]
"""
act_shape_cls = self.__merge_hw(box_cls)
box_cls_ch_last = fluid.layers.reshape(
x=box_cls,
shape=[self.batch_size, self.num_classes, -1],
actual_shape=act_shape_cls)
box_cls_ch_last = fluid.layers.sigmoid(box_cls_ch_last)
act_shape_reg = self.__merge_hw(box_reg, "channel_last")
box_reg_ch_last = fluid.layers.transpose(box_reg, perm=[0, 2, 3, 1])
box_reg_ch_last = fluid.layers.reshape(
x=box_reg_ch_last,
shape=[self.batch_size, -1, 4],
actual_shape=act_shape_reg)
act_shape_ctn = self.__merge_hw(box_ctn)
box_ctn_ch_last = fluid.layers.reshape(
x=box_ctn,
shape=[self.batch_size, 1, -1],
actual_shape=act_shape_ctn)
box_ctn_ch_last = fluid.layers.sigmoid(box_ctn_ch_last)
box_reg_decoding = fluid.layers.stack(
[
locations[:, 0] - box_reg_ch_last[:, :, 0],
locations[:, 1] - box_reg_ch_last[:, :, 1],
locations[:, 0] + box_reg_ch_last[:, :, 2],
locations[:, 1] + box_reg_ch_last[:, :, 3]
],
axis=1)
box_reg_decoding = fluid.layers.transpose(
box_reg_decoding, perm=[0, 2, 1])
# recover the location to original image
im_scale = im_info[:, 2]
box_reg_decoding = box_reg_decoding / im_scale
box_cls_ch_last = box_cls_ch_last * box_ctn_ch_last
return box_cls_ch_last, box_reg_decoding
def _post_processing(self, locations, cls_logits, bboxes_reg, centerness,
im_info):
"""
Args:
locations (list): List of Variables composed by center of each anchor point
cls_logits (list): List of Variables for class prediction
bboxes_reg (list): List of Variables for bounding box prediction
centerness (list): List of Variables for centerness prediction
im_info(Variables): [h, w, scale] for input images
Return:
pred (LoDTensor): predicted bounding box after nms,
the shape is n x 6, last dimension is [label, score, xmin, ymin, xmax, ymax]
"""
pred_boxes_ = []
pred_scores_ = []
for _, (
pts, cls, box, ctn
) in enumerate(zip(locations, cls_logits, bboxes_reg, centerness)):
pred_scores_lvl, pred_boxes_lvl = self._postprocessing_by_level(
pts, cls, box, ctn, im_info)
pred_boxes_.append(pred_boxes_lvl)
pred_scores_.append(pred_scores_lvl)
pred_boxes = fluid.layers.concat(pred_boxes_, axis=1)
pred_scores = fluid.layers.concat(pred_scores_, axis=2)
pred = self.nms(pred_boxes, pred_scores)
return pred
def get_loss(self, input, tag_labels, tag_bboxes, tag_centerness):
"""
Calculate the loss for FCOS
Args:
input (list): List of Variables for feature maps from FPN layers
tag_labels (Variables): category targets for each anchor point
tag_bboxes (Variables): bounding boxes targets for positive samples
tag_centerness (Variables): centerness targets for positive samples
Return:
loss (dict): loss composed by classification loss, bounding box
regression loss and centerness regression loss
"""
cls_logits, bboxes_reg, centerness = self._get_output(
input, is_training=True)
loss = self.fcos_loss(cls_logits, bboxes_reg, centerness, tag_labels,
tag_bboxes, tag_centerness)
return loss
def get_prediction(self, input, im_info):
"""
Decode the prediction
Args:
input (list): List of Variables for feature maps from FPN layers
im_info(Variables): [h, w, scale] for input images
Return:
the bounding box prediction
"""
cls_logits, bboxes_reg, centerness = self._get_output(
input, is_training=False)
locations = self._compute_locations(input)
pred = self._post_processing(locations, cls_logits, bboxes_reg,
centerness, im_info)
return {"bbox": pred}
|
Lib/objc/_DeviceIdentity.py | snazari/Pyto | 701 | 11084832 | """
Classes from the 'DeviceIdentity' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
GestaltHlprDeviceIdentity = _Class("GestaltHlprDeviceIdentity")
DeviceTypeDeviceIdentity = _Class("DeviceTypeDeviceIdentity")
|
18-asyncio/charfinder/http_charfinder.py | yyang99/fluent_python_notebooks | 199 | 11084866 | <gh_stars>100-1000
#!/usr/bin/env python3
import sys
import asyncio
from aiohttp import web
from charfinder import UnicodeNameIndex
TEMPLATE_NAME = 'http_charfinder.html'
CONTENT_TYPE = 'text/html; charset=UTF-8'
SAMPLE_WORDS = ('bismillah chess cat circled Malayalam digit'
' Roman face Ethiopic black mark symbol dot'
' operator Braille hexagram').split()
ROW_TPL = '<tr><td>{code_str}</td><th>{char}</th><td>{name}</td></tr>'
LINK_TPL = '<a href="/?query={0}" title="find "{0}"">{0}</a>'
LINKS_HTML = ', '.join(LINK_TPL.format(word) for word in
sorted(SAMPLE_WORDS, key=str.upper))
index = UnicodeNameIndex()
with open(TEMPLATE_NAME) as tpl:
template = tpl.read()
template = template.replace('{links}', LINKS_HTML)
# BEGIN HTTP_CHARFINDER_HOME
def home(request): # <1>
query = request.GET.get('query', '').strip() # <2>
print('Query: {!r}'.format(query)) # <3>
if query: # <4>
descriptions = list(index.find_descriptions(query))
res = '\n'.join(ROW_TPL.format(**vars(descr))
for descr in descriptions)
msg = index.status(query, len(descriptions))
else:
descriptions = []
res = ''
msg = 'Enter words describing characters.'
html = template.format(query=query, result=res, # <5>
message=msg)
print('Sending {} results'.format(len(descriptions))) # <6>
return web.Response(content_type=CONTENT_TYPE, text=html) # <7>
# END HTTP_CHARFINDER_HOME
# BEGIN HTTP_CHARFINDER_SETUP
@asyncio.coroutine
def init(loop, address, port): # <1>
app = web.Application(loop=loop) # <2>
app.router.add_route('GET', '/', home) # <3>
handler = app.make_handler() # <4>
server = yield from loop.create_server(handler,
address, port) # <5>
return server.sockets[0].getsockname() # <6>
def main(address="127.0.0.1", port=8888):
port = int(port)
loop = asyncio.get_event_loop()
host = loop.run_until_complete(init(loop, address, port)) # <7>
print('Serving on {}. Hit CTRL-C to stop.'.format(host))
try:
loop.run_forever() # <8>
except KeyboardInterrupt: # CTRL+C pressed
pass
print('Server shutting down.')
loop.close() # <9>
if __name__ == '__main__':
main(*sys.argv[1:])
# END HTTP_CHARFINDER_SETUP
|
xv_leak_tools/test_components/open_wrt/__init__.py | UAEKondaya1/expressvpn_leak_testing | 219 | 11084879 | <gh_stars>100-1000
from xv_leak_tools.test_components.open_wrt.open_wrt_builder import OpenWRTBuilder
def register(factory):
factory.register(OpenWRTBuilder())
|
release/scripts/freestyle/styles/sequentialsplit_sketchy.py | rotoglup/blender | 365 | 11084881 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Filename : sequentialsplit_sketchy.py
# Author : <NAME>
# Date : 04/08/2005
# Purpose : Use the sequential split with two different
# predicates to specify respectively the starting and
# the stopping extremities for strokes
from freestyle.chainingiterators import ChainSilhouetteIterator
from freestyle.predicates import (
NotUP1D,
QuantitativeInvisibilityUP1D,
TrueUP1D,
pyBackTVertexUP0D,
pyVertexNatureUP0D,
)
from freestyle.shaders import (
ConstantColorShader,
IncreasingThicknessShader,
SpatialNoiseShader,
)
from freestyle.types import Nature, Operators
upred = QuantitativeInvisibilityUP1D(0)
Operators.select(upred)
Operators.bidirectional_chain(ChainSilhouetteIterator(), NotUP1D(upred))
# starting and stopping predicates:
start = pyVertexNatureUP0D(Nature.NON_T_VERTEX)
stop = pyBackTVertexUP0D()
Operators.sequential_split(start, stop, 10)
shaders_list = [
SpatialNoiseShader(7, 120, 2, True, True),
IncreasingThicknessShader(5, 8),
ConstantColorShader(0.2, 0.2, 0.2, 1),
]
Operators.create(TrueUP1D(), shaders_list)
|
histomicstk/utils/girder_convenience_utils.py | sunaifei/HistomicsTK | 249 | 11084895 | <gh_stars>100-1000
"""
Created on Thu Dec 12 13:19:18 2019
@author: tageldim
"""
# import os
import os
import girder_client
import json
from histomicstk.workflows.workflow_runner import Workflow_runner, \
Slide_iterator, Annotation_iterator
# import warnings
# warnings.simplefilter('once', UserWarning)
def connect_to_api(apiurl, apikey=None, interactive=True):
"""Connect to a specific girder API.
Parameters
----------
apiurl : str
URL to the AP to connect to.
apikey : str
API authentication token key
interactive : bool
Whether to use interactive mode instead.
Returns
-------
girder_client.GirderClient
Authenticated girder client.
"""
assert interactive or (apikey is not None)
gc = girder_client.GirderClient(apiUrl=apiurl)
if apikey is not None:
interactive = False
if interactive:
gc.authenticate(interactive=True)
else:
gc.authenticate(apiKey=apikey)
return gc
def get_absolute_girder_folderpath(gc, folder_id=None, folder_info=None):
"""Get absolute path for a girder folder.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
folder_id : str
girder id of folder
folder_info : dict
folder info from the girder server
Returns
-------
str
absolute path to folder in the girder server.
"""
assert any([j is not None for j in (folder_id, folder_info)])
if folder_id is not None:
folder_info = gc.get('/folder/%s' % folder_id)
fpath = gc.get('/folder/%s/rootpath' % folder_info['_id'])
fpath = "/".join([
j['object']['name'] for j in fpath
if j['object']['_modelType'] == 'folder'
]) + "/" + folder_info['name'] + "/"
return fpath
def update_permissions_for_annotation(
gc, annotation_id=None, annotation=None,
groups_to_add=None, replace_original_groups=True,
users_to_add=None, replace_original_users=True):
"""Update permissions for a single annotation.
Parameters
----------
gc : gider_client.GirderClient
authenticated girder client instance
annotation_id : str
girder id of annotation
annotation : dict
overrides annotation_id if given
groups_to_add : list
each entry is a dict containing the information about user groups
to add and their permission levels. A sample entry must have the
following keys
- level, int -> 0 (view), 1 (edit) or 2 (owner)
- name, str -> name of user group
- id, st -> girder id of user group
replace_original_groups : bool
whether to replace original groups or append to them
users_to_add : list
each entry is a dict containing the information about user
to add and their permission levels. A sample entry must have the
following keys
- level, int -> 0 (view), 1 (edit) or 2 (owner)
- login, str -> username of user
- id, st -> girder id of user
replace_original_users
whether to replace original users or append to them
Returns
-------
dict
server response
"""
groups_to_add = [] if groups_to_add is None else groups_to_add
users_to_add = [] if users_to_add is None else users_to_add
if annotation is not None:
annotation_id = annotation['_id']
elif annotation_id is None:
raise Exception(
"You must provide either the annotation or its girder id.")
# get current permissions
current = gc.get('/annotation/%s/access' % annotation_id)
# add or replace as needed
if replace_original_groups:
current['groups'] = []
current_group_ids = []
else:
current_group_ids = [j['id'] for j in current['groups']]
if replace_original_users:
current['users'] = []
current_user_ids = []
else:
current_user_ids = [j['id'] for j in current['users']]
for group in groups_to_add:
if group['id'] not in current_group_ids:
current['groups'].append(group)
for user in users_to_add:
if user['id'] not in current_user_ids:
current['users'].append(user)
# now update accordingly
# BAD WAY!! -- do NOT do this!
# return gc.put('/annotation/%s/access?access=%s' % (
# annotation_id, json.dumps(current)))
# PROPER WAY
return gc.put('/annotation/%s/access' % annotation_id, data={
'access': json.dumps(current)})
def update_permissions_for_annotations_in_slide(
gc, slide_id, verbose=0, monitorPrefix='', **kwargs):
"""Update permissions for all annotations in a slide.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
slide_id : str
girder id of slide
verbose : int
level of verbosity
monitorPrefix : str
prefix to prepend to printed statements
kwargs
passed as-is to update_permissions_for_annotation()
Returns
-------
list
each entry is a dict of the server response.
"""
anniter = Annotation_iterator(
gc=gc, slide_id=slide_id,
callback=update_permissions_for_annotation,
callback_kwargs=kwargs,
verbose=verbose, monitorPrefix=monitorPrefix)
return anniter.apply_callback_to_all_annotations()
def update_permissions_for_annotations_in_folder(
gc, folderid, workflow_kwargs, recursive=True,
monitor='', verbose=True):
"""Update permissions for all annotations in a folder recursively.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
folderid : str
girder id of folder
workflow_kwargs : dict
kwargs to pass to update_permissions_for_annotations_in_slide()
recursive : bool
do this recursively for subfolders?
monitor : str
text to prepend to printed statements
verbose : bool
print statements to screen?
Returns
-------
None
"""
# update permissions for each slide in folder
workflow_kwargs.update({'gc': gc})
workflow_runner = Workflow_runner(
slide_iterator=Slide_iterator(
gc, source_folder_id=folderid,
keep_slides=None,
),
workflow=update_permissions_for_annotations_in_slide,
workflow_kwargs=workflow_kwargs,
recursive=recursive,
monitorPrefix=monitor,
verbose=verbose,
)
workflow_runner.run()
def update_styles_for_annotation(gc, annotation, changes):
"""Update styles for all relevant elements in an annotation.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
annotation : dict
annotation
changes : dict
indexed by current group name to be updated, and values are
the new styles. Each element in ann["annotation"]["elements"]
whose current "group" attribute is in this dict's keys is
updated according to the new style.
Returns
-------
dict
server response
"""
# find out if annotation needs editing
if 'groups' not in annotation.keys():
return
elif not any([g in changes.keys() for g in annotation['groups']]):
return
# edit elements one by one
for el in annotation['annotation']['elements']:
if el['group'] in changes.keys():
el.update(changes[el['group']])
# print(" updating ...")
return gc.put(
"/annotation/%s" % annotation['_id'], json=annotation['annotation'])
def update_styles_for_annotations_in_slide(
gc, slide_id, verbose=0, monitorPrefix='', callback=None, **kwargs):
"""Update styles for all annotations in a slide.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
slide_id : str
girder id of slide
verbose : int
level of verbosity
monitorPrefix : str
prefix to prepend to printed statements
callback : function
if None, defaults to update_styles_for_annotation. Passed as-is
to histomicstk.workflows.workflow_runner.Annotation_iterator
kwargs
passed as-is to the update_styles_for_annotation
Returns
-------
list
each entry is a dict of the server response.
"""
if callback is None:
callback = update_styles_for_annotation
anniter = Annotation_iterator(
gc=gc, slide_id=slide_id,
callback=callback,
callback_kwargs=kwargs,
verbose=verbose, monitorPrefix=monitorPrefix)
return anniter.apply_callback_to_all_annotations()
def update_styles_for_annotations_in_folder(
gc, folderid, workflow_kwargs, recursive=True,
catch_exceptions=True, monitor='', verbose=True):
"""Update styles for all annotations in a folder recursively.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
folderid : str
girder id of folder
workflow_kwargs : dict
kwargs to pass to Update styles for all annotations in a slide()
recursive : bool
do this recursively for subfolders?
catch_exceptions : bool
passed as-is to histomicstk.workflows.workflow_runner.Workflow_runner
monitor : str
text to prepend to printed statements
verbose : bool
print statements to screen?
Returns
-------
None
"""
# update annotation styles
workflow_kwargs.update({'gc': gc})
workflow_runner = Workflow_runner(
slide_iterator=Slide_iterator(
gc, source_folder_id=folderid,
keep_slides=None,
),
workflow=update_styles_for_annotations_in_slide,
workflow_kwargs=workflow_kwargs,
recursive=recursive,
catch_exceptions=catch_exceptions,
monitorPrefix=monitor,
verbose=verbose,
)
workflow_runner.run()
def revert_annotation(
gc, annotation_id=None, annotation=None, version=None,
revert_to_nonempty_elements=False, only_revert_if_empty=True):
"""Revert an annotation to a previous version.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
annotation_id : str
girder id of annotation
annotation : dict
overrides annotation_id if given
version : int
versoin number for annotation. If None, and
not revert_to_nonempty_elements
the default behavior of the endpoint is evoked, which reverts the
annotation if it was deleted and if not, reverts to the last version.
revert_to_nonempty_elements : bool
if true, reverts to the most recent version of the annotation
with non-empty elements.
only_revert_if_empty : bool
if true, only reverts annotation if it contains an empty element list
Returns
-------
dict
server response
"""
if annotation is not None:
annotation_id = annotation['_id']
elif annotation_id is None:
raise Exception(
"You must provide either the annotation or its girder id.")
history = gc.get("/annotation/%s/history" % annotation_id)
# no need to revert if empty
if only_revert_if_empty and len(history[0]["groups"]) > 0:
return dict()
# cannot revert if only version
if len(history) < 2:
return dict()
if (version is None) and revert_to_nonempty_elements:
# NOTE: even though the "history" may show
# the elements as empty, the "groups" attribute is really the
# indication if the annotation version actually has some elements.
# TODO -- This is likely a bug (?); fix me!!!
for ver in history:
if len(ver["groups"]) > 0:
version = ver['_version']
break
ver = "" if version is None else "?version=%d" % version
# if version is None:
# print(" Reverting ...")
# else:
# print(" Reverting to version %d" % version)
return gc.put("/annotation/%s/history/revert%s" % (annotation_id, ver))
def revert_annotations_in_slide(
gc, slide_id, verbose=0, monitorPrefix='', **kwargs):
"""Revert all annotations in a slide to a previous version.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
slide_id : str
girder id of slide
verbose : int
level of verbosity
monitorPrefix : str
prefix to prepend to printed statements
kwargs
passed as-is to the revert_annotation
Returns
-------
list
each entry is a dict of the server response.
"""
anniter = Annotation_iterator(
gc=gc, slide_id=slide_id,
callback=revert_annotation,
callback_kwargs=kwargs,
verbose=verbose, monitorPrefix=monitorPrefix)
return anniter.apply_callback_to_all_annotations()
def revert_annotations_in_folder(
gc, folderid, workflow_kwargs, recursive=True,
monitor='', verbose=True):
"""Revert all annotations in a folder recursively.
Parameters
----------
gc : girder_client.GirderClient
authenticated girder client
folderid : str
girder id of folder
workflow_kwargs : dict
kwargs to pass to revert_annotations_in_slide
recursive : bool
do this recursively for subfolders?
monitor : str
text to prepend to printed statements
verbose : bool
print statements to screen?
Returns
-------
None
"""
# update annotation styles
workflow_kwargs.update({'gc': gc})
workflow_runner = Workflow_runner(
slide_iterator=Slide_iterator(
gc, source_folder_id=folderid,
keep_slides=None,
),
workflow=revert_annotations_in_slide,
workflow_kwargs=workflow_kwargs,
recursive=recursive,
monitorPrefix=monitor,
verbose=verbose,
)
workflow_runner.run()
# %%===========================================================================
def reproduce_annotations_workflow(
gc, folderid, annotation_jsonfile, monitorPrefix=''):
"""Dump annotations into single slide from local folder (Internal function).
Parameters
-----------
gc : girder_client.GirderClient
authenticated girder client instance
folderid : str
girder id of girder folder to copy the slide to before posting
the annotations to it. This prevents confusing old annotations that
were associated with the slide originally and the new posted annotations.
annotation_jsonfile : str
path to annotation json file
monitorPrefix : str
prefix to monitor string
"""
try:
# extract name + path
itemname = os.path.basename(annotation_jsonfile).replace(
'_annotations.json', '')
local = os.path.dirname(annotation_jsonfile)
# copy item without annotations
with open(os.path.join(local, itemname + '.json')) as jf:
source_item_info = json.load(jf)
print("%s: copy item" % monitorPrefix)
item = gc.post(
'/item/%s/copy?folderId=%s&name=%s©Annotations=False'
% (source_item_info['_id'], folderid, itemname))
# load annotations
with open(annotation_jsonfile) as af:
annotations = json.load(af)
# now post
n_annotations = len(annotations)
for anno, annotation in enumerate(annotations):
try:
print("%s: post annotation %d of %d" % (
monitorPrefix, anno, n_annotations))
_ = gc.post(
"/annotation?itemId=" + item['_id'],
json=annotation['annotation'])
except Exception as e:
print(e.__repr__())
except Exception as e:
print(e.__repr__())
def reproduce_annotations_from_backup(gc, folderid, local):
"""Reproduce annotations on HistomicsUI from local backup.
This is the reverse functionality to dump_annotations.locally().
This reproduces this tiered structure on HistomicsUI. The original
slides (items) must still be there in the folder from the backup was
made because these will be copied (without annotations) before the
local annotations (from JSON files) are posted to them.
Parameters
-----------
gc : girder_client.GirderClient
authenticated girder client instance
folderid : str
girder id of girder folder to post reproduced annotations.
local : str
local path to get subfolders/slides/annotations
"""
monitor = os.path.basename(local)
# for each slide, copy it and post annotations
jsonfiles = [
os.path.join(local, j) for j in os.listdir(local)
if j.endswith('_annotations.json')]
for jsonfile in jsonfiles:
reproduce_annotations_workflow(
gc=gc, folderid=folderid, annotation_jsonfile=jsonfile,
monitorPrefix=monitor)
# for each subfolder, create a new folder on HistomicsUI and call self
subdirs = [
j for j in os.listdir(local) if os.path.isdir(os.path.join(local, j))]
for subdir in subdirs:
try:
# create folder in HistomicsUI
new_folder = gc.post('/folder?parentId=%s&name=%s' % (
folderid, subdir))
# call self with same prameters
reproduce_annotations_from_backup(
gc=gc, folderid=new_folder['_id'],
local=os.path.join(local, subdir))
except Exception as e:
print(e.__repr__())
# %%===========================================================================
|
simple-DCGAN/model.py | riciche/SimpleCVReproduction | 923 | 11084897 | <filename>simple-DCGAN/model.py
import torch
import torch.nn as nn
class NetG(nn.Module):
'''
generator
'''
def __init__(self, opt):
super(NetG, self).__init__()
num_gen_feature = opt.ngf
# 输入为nz维度的噪声(nz*1*1)
"""
out = (in-1)*stride-2*padding+kernel_size
"""
self.base = nn.Sequential(
nn.ConvTranspose2d(opt.nz,
num_gen_feature * 8,
4,
1,
0,
bias=False),
nn.BatchNorm2d(num_gen_feature * 8),
nn.ReLU(inplace=True),
# 4->8
nn.ConvTranspose2d(num_gen_feature * 8,
num_gen_feature * 4,
4,
2,
1,
bias=False),
nn.BatchNorm2d(num_gen_feature * 4),
nn.ReLU(True),
# 8-> 16
nn.ConvTranspose2d(num_gen_feature * 4,
num_gen_feature * 2,
4,
2,
1,
bias=False),
nn.BatchNorm2d(num_gen_feature * 2),
nn.ReLU(True),
# 16->32
nn.ConvTranspose2d(num_gen_feature * 2,
num_gen_feature,
4,
2,
1,
bias=False),
nn.BatchNorm2d(num_gen_feature),
nn.ReLU(True),
# last out 3 * 96 * 96
nn.ConvTranspose2d(num_gen_feature, 3, 5, 3, 1, bias=False),
# tanh 归一化到-1-1, sigmoid归一化 0-1
nn.Tanh()
)
def forward(self, x):
return self.base(x)
class NetD(nn.Module):
'''
discriminator
'''
def __init__(self, opt):
super(NetD, self).__init__()
ndf = opt.ndf
self.base = nn.Sequential(
nn.Conv2d(3, ndf, 5, 3, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 8, 1, 4, 2, 0, bias=False), nn.Sigmoid()
)
def forward(self, x):
return self.base(x).view(-1) |
models/__init__.py | benbijituo/OpenNRE-PyTorch | 224 | 11084900 | from .Model import Model
from .CNN_ATT import CNN_ATT
from .PCNN_ATT import PCNN_ATT
from .CNN_AVE import CNN_AVE
from .PCNN_AVE import PCNN_AVE
from .CNN_ONE import CNN_ONE
from .PCNN_ONE import PCNN_ONE
|
crosshair/opcode_intercept_test.py | pschanely/CrossHair | 785 | 11084925 | from crosshair.statespace import MessageType
from crosshair.test_util import check_states
def test_dict_index():
a = {"two": 2, "four": 4, "six": 6}
def numstr(x: str) -> int:
"""
post: _ != 4
raises: KeyError
"""
return a[x]
assert check_states(numstr) == {MessageType.POST_FAIL}
|
ch3/dependency-parser-nivre/app/transitionparser/deciders.py | thundercrawl/book-of-qna-code | 121 | 11084932 | <reponame>thundercrawl/book-of-qna-code
# Copyright 2010 <NAME>
##
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
##
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
"""
Transition based parsing (both arc-standard and arc-eager).
Easily extended to support other variants.
"""
from __future__ import print_function
from __future__ import division
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(curdir, os.path.pardir))
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
from absl import app
from absl import flags
from absl import logging
from collections import defaultdict
import copy
import random
from ml import ml
from common import *
from common.exceptions import *
from configurations import *
# misc / pretty print / temp junk
def _ids(tok):
if tok['id'] == 0:
tok['form'] = 'ROOT'
return (tok['id'], tok['tag'])
class MLActionDecider:
'''
action deciders / policeis
'''
def __init__(self, model, featExt):
self.m = model
self.fs = featExt
def next_action(self, stack, deps, sent, i):
if len(stack) < 2:
return SHIFT
fs = self.fs.extract(stack, deps, sent, i)
action, scores = self.m.predict(fs)
if i >= len(sent) and action == SHIFT:
action = scores.index(max(scores[1:]))
return action
def next_actions(self, stack, deps, sent, i, conf=None):
fs = self.fs.extract(stack, deps, sent, i)
action, scores = self.m.predict(fs)
# [-122, 0.3, 3] -> {0:-122, 1:0.3, 2:3}
scores = dict(enumerate(scores))
actions = [
item for item,
score in sorted(
scores.items(),
key=lambda x:-
x[1])]
return actions
def scores(self, conf): # TODO: who uses this??
if len(conf.stack) < 2:
return {SHIFT: 1}
fs = self.fs.extract(conf.stack, conf.deps, conf.sent, conf.i)
action, scores = self.m.predict(fs)
# [-122, 0.3, 3] -> {0:-122, 1:0.3, 2:3}
scores = dict(enumerate(scores))
if conf.i >= len(conf.sent):
del scores[SHIFT]
return scores
def get_scores(self, conf):
if len(conf.stack) < 2:
return {SHIFT: 1}
fs = self.fs.extract(conf.stack, conf.deps, conf.sent, conf.i)
scores = self.m.get_scores(fs)
return scores
def get_prob_scores(self, conf):
if len(conf.stack) < 2:
return [1.0, 0, 0]
fs = self.fs.extract(conf.stack, conf.deps, conf.sent, conf.i)
besti, scores = self.m.predict(fs)
return scores
def update(self, stack, deps, sent, i):
self.m.update(wrong, correct, self.fs.extract(stack, deps, sent, i))
class OracleActionDecider: # {{{
def __init__(self, oracle):
self.o = oracle
def next_action(self, stack, deps, sent, i):
return self.o.next_action(stack, deps, sent, i)
def next_actions(self, stack, deps, sent, i):
return self.o.next_actions(stack, deps, sent, i)
def get_scores(self, conf):
return {self.next_action(conf.stack, conf.deps, conf.sent, conf.i): 1}
class AuditMLActionDecider: # {{{
def __init__(self, model, featExt):
self.m = model
self.fs = featExt
self.current_sent = None
self.childs_of_x = None
self.connected_childs = set()
self.idtotok = {}
def next_action(self, stack, deps, sent, i):
def _enrich(set_of_node_ids):
return [_ids(self.idtotok[i]) for i in set_of_node_ids]
if self.current_sent != sent:
self.current_sent = sent
idtotok = {}
for tok in self.current_sent:
self.idtotok[tok['id']] = tok
self.childs_of_x = defaultdict(set)
self.connected_childs = set([-1])
for tok in sent:
self.childs_of_x[tok['parent']].add(tok['id'])
if len(stack) < 2:
return SHIFT
fs = self.fs.extract(stack, deps, sent, i)
action, scores = self.m.predict(fs)
logging.debug("action [%s], scores [%s]", action, scores)
if i >= len(sent) and action == SHIFT:
action = scores.index(max(scores[1:]))
if action == REDUCE_R:
if stack[-1]['parent'] == stack[-2]['id']:
if len(self.childs_of_x[stack[-1]['id']
] - self.connected_childs) > 0:
logging.error("R not connecting: %s | %s , because: %s", _ids(stack[-1]), _ids(stack[-2]), _enrich(self.childs_of_x[stack[-1]['id']] - self.connected_childs))
else:
logging.error("R not XX")
if action == REDUCE_L:
if len(self.childs_of_x[stack[-2]['id']] -
self.connected_childs) < 1:
self.connected_childs.add(stack[-2]['id'])
if action == REDUCE_R:
if len(self.childs_of_x[stack[-1]['id']] -
self.connected_childs) < 1:
self.connected_childs.add(stack[-1]['id'])
return action, scores
class LoggingActionDecider: # {{{
def __init__(self, decider, featExt, out=sys.stdout):
self.decider = decider
self.fs = featExt
self.out = out
def next_action(self, stack, deps, sent, i):
features = self.fs.extract(stack, deps, sent, i)
logging.debug("features [%s]", features)
action = self.decider.next_action(stack, deps, sent, i)
self.out.write("%s %s\n" % (action, " ".join(features)))
return action
def next_actions(self, stack, deps, sent, i):
action = self.next_action(stack, deps, sent, i)
return [action]
def save(self, param=None):
self.out.close()
class MLTrainerActionDecider: # {{{
def __init__(self, mlAlgo, decider, featExt, earlyUpdate=False):
self.decider = decider
self.ml = mlAlgo
self.fs = featExt
self.earlyUpdate = earlyUpdate
def next_actions(self, stack, deps, sent, i, conf=None):
return [self.next_action(stack, deps, sent, i, conf)]
def next_action(self, stack, deps, sent, i, conf=None):
action = self.decider.next_action(stack, deps, sent, i)
mlaction = self.ml.update(
action, self.fs.extract(
stack, deps, sent, i))
if action != mlaction:
if self.earlyUpdate:
raise MLTrainerWrongActionException()
return action
def save(self, fout):
self.ml.finalize()
self.ml.dump(fout)
class MLPassiveAggressiveTrainerActionDecider: # {{{
def __init__(self, mlAlgo, decider, featExt, earlyUpdate=False):
self.decider = decider
self.ml = mlAlgo
self.fs = featExt
self.earlyUpdate = earlyUpdate
def next_actions(self, stack, deps, sent, i):
return [self.next_action(stack, deps, sent, i)]
def next_action(self, stack, deps, sent, i):
action = self.decider.next_action(stack, deps, sent, i)
mlaction = self.ml.do_pa_update(
self.fs.extract(
stack, deps, sent, i), action, C=1.0)
if action != mlaction:
if self.earlyUpdate:
raise MLTrainerWrongActionException()
return action
def save(self, fout):
self.ml.finalize()
self.ml.dump(fout)
class MLTrainerActionDecider2: # {{{
"""
Like MLTrainerActionDecider but does the update itself (a little less efficient, a bit more informative)
"""
def __init__(self, mlAlgo, decider, featExt, earlyUpdate=False):
self.decider = decider
self.ml = mlAlgo
self.fs = featExt
self.earlyUpdate = earlyUpdate
def next_actions(self, stack, deps, sent, i, conf=None):
return [self.next_action(stack, deps, sent, i, conf)]
def score_deps(self, deps, sent):
score = 0
deps = deps.deps # a set of (p,c) ids
sent_deps = set()
for tok in sent:
if tok['id'] == 0:
continue
sent_deps.add((tok['parent'], tok['id']))
for pc in sent_deps:
if pc not in deps:
score += 0.2
for pc in deps:
if pc not in sent_deps:
score += 1
return score
def cum_score_of_action(self, action, conf, ml=False):
newconf = conf.newAfter(action)
decider = copy.deepcopy(self.decider)
while not newconf.is_in_finish_state():
try:
if ml:
next = self.next_ml_action(newconf)
else:
next = decider.next_action(
newconf.stack, newconf.deps, newconf.sent, newconf.i)
newconf.do_action(next)
except IllegalActionException:
assert(len(newconf.sent) == newconf.i)
break
return self.score_deps(newconf.deps, newconf.sent)
def next_ml_action(self, conf):
features = self.fs.extract(conf.stack, conf.deps, conf.sent, conf.i)
act_scores = [(score, act) for act, score in self.ml.get_scores(
features).items() if act in conf.valid_actions()]
return max(act_scores)[1]
def next_action(self, stack, deps, sent, i, conf=None):
features = self.fs.extract(stack, deps, sent, i)
goldaction = self.decider.next_action(stack, deps, sent, i)
act_scores = [(score, act) for act, score in self.ml.get_scores(
features).items() if act in conf.valid_actions()]
pred_s, pred_a = max(act_scores)
self.ml.tick()
if pred_a != goldaction:
# calculate cost of NO UPDATE:
noupdate_cost = self.cum_score_of_action(pred_a, conf, ml=True)
# now try to update:
self.ml.add(features, goldaction, 1.0)
self.ml.add(features, pred_a, -1.0)
update_cost = self.cum_score_of_action(
self.next_ml_action(conf), conf, ml=True)
if noupdate_cost < update_cost:
logging.debug("noupdate: %s, update: %s", noupdate_cost, update_cost)
# undo prev update
self.ml.add(features, goldaction, -1.0)
self.ml.add(features, pred_a, 1.0)
return goldaction
def save(self, fout):
self.ml.finalize()
self.ml.dump(fout)
class MLTrainerActionDecider3: # {{{
"""
Like MLTrainerActionDecider but does the update itself (a little less efficient, a bit more informative)
"""
def __init__(self, mlAlgo, decider, featExt, earlyUpdate=False):
self.decider = decider
self.ml = mlAlgo
self.fs = featExt
self.earlyUpdate = earlyUpdate
def next_actions(self, stack, deps, sent, i, conf=None):
return [self.next_action(stack, deps, sent, i, conf)]
def score_deps(self, deps, sent):
score = 0
deps = deps.deps # a set of (p,c) ids
sent_deps = set()
for tok in sent:
if tok['id'] == 0:
continue
sent_deps.add((tok['parent'], tok['id']))
for pc in sent_deps:
if pc not in deps:
score += 0.2
for pc in deps:
if pc not in sent_deps:
score += 1
return score
def cum_score_of_action(self, action, conf, ml=False):
newconf = conf.newAfter(action)
decider = copy.deepcopy(self.decider)
while not newconf.is_in_finish_state():
try:
if ml:
next = self.next_ml_action(newconf)
else:
next = decider.next_action(
newconf.stack, newconf.deps, newconf.sent, newconf.i)
newconf.do_action(next)
except IllegalActionException:
logging.debug("oracle says [%s], but it is illegal, probably at end", next)
assert(len(newconf.sent) == newconf.i)
break
return self.score_deps(newconf.deps, newconf.sent)
def next_ml_action(self, conf):
features = self.fs.extract(conf.stack, conf.deps, conf.sent, conf.i)
act_scores = [(score, act) for act, score in self.ml.get_scores(
features).items() if act in conf.valid_actions()]
return max(act_scores)[1]
def next_action(self, stack, deps, sent, i, conf=None):
features = self.fs.extract(stack, deps, sent, i)
goldaction = self.decider.next_action(stack, deps, sent, i)
act_scores = [(score, act) for act, score in self.ml.get_scores(
features).items() if act in conf.valid_actions()]
pred_s, pred_a = max(act_scores)
noupdate_cost = self.cum_score_of_action(pred_a, conf, ml=True)
if pred_a != SHIFT:
self.ml.add(features, SHIFT, 1.0)
self.ml.add(features, pred_a, -1.0)
shiftupdate_cost = self.cum_score_of_action(SHIFT, conf, ml=True)
if shiftupdate_cost < noupdate_cost:
self.ml.tick()
return SHIFT
else: # undo
self.ml.add(features, SHIFT, -1.0)
self.ml.add(features, pred_a, 1.0)
self.ml.tick()
return pred_a
self.ml.tick()
return pred_a
costs = []
for score, act in act_scores:
if pred_a != act:
self.ml.add(features, act, 1.0)
self.ml.add(features, pred_a, -1.0)
costs.append(
(self.cum_score_of_action(
act, conf, ml=True), act))
self.ml.add(features, act, -1.0)
self.ml.add(features, pred_a, 1.0)
else:
costs.append((noupdate_cost, pred_a))
min_cost, act = min(costs)
if act != pred_a:
logging.debug("min_cost [%s], noupdate_cost [%s], act [%s], goldaction [%s]", min_cost, noupdate_cost, act, goldaction)
self.ml.add(features, act, 1.0)
self.ml.add(features, pred_a, -1.0)
else:
pass
self.ml.tick()
return act
def save(self, fout):
self.ml.finalize()
self.ml.dump(fout)
|
armi/bookkeeping/visualization/__init__.py | keckler/armi | 162 | 11084952 | # Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Visualization package contains functionality and entry points for producing files
amenable to visualization of ARMI run results.
This could theoretically support all sorts of visualization file formats, but for now,
only VTK files are supported. VTK was selected because it has wide support from vis
tools, while being a simple-enough format that quality pure-Python libraries exist to
produce them. Other formats (e.g., SILO) tend to require more system-dependent binary
dependencies, so optional support for them may be added later.
"""
from armi import plugins
from armi.bookkeeping.visualization.entryPoint import VisFileEntryPoint
|
multi/input.py | JinchengShi/ATRank | 393 | 11084977 | <reponame>JinchengShi/ATRank
import math
import numpy as np
class DataInput:
def __init__(self, data, batch_size):
self.batch_size = batch_size
self.data = data
self.epoch_size = math.ceil(len(self.data) / self.batch_size)
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i == self.epoch_size:
raise StopIteration
batch_data = self.data[self.i * self.batch_size : min((self.i+1) * self.batch_size, len(self.data))]
self.i += 1
u, i, y, i_sl, q_sl, c_sl = [], [], [], [], [], []
for t in batch_data:
u.append(t[0])
assert(len(t[2]) == len(t[3]) and len(t[3]) == len(t[4]))
i_sl.append(len(t[2]))
assert(len(t[5]) == len(t[6]))
q_sl.append(len(t[5]))
assert(len(t[7]) == len(t[8]))
c_sl.append(len(t[7]))
i.append(t[9])
y.append(t[10])
hist_i_id = np.zeros([len(batch_data), max(i_sl)], np.int64)
hist_i_act = np.zeros([len(batch_data), max(i_sl)], np.int64)
hist_i_time = np.zeros([len(batch_data), max(i_sl)], np.int64)
k = 0
for t in batch_data:
for l in range(len(t[2])):
hist_i_id[k][l] = t[2][l]
hist_i_act[k][l] = t[3][l]
hist_i_time[k][l] = t[4][l]
k += 1
hist_q_id = np.zeros([len(batch_data), max(q_sl)], np.int64)
hist_q_time = np.zeros([len(batch_data), max(q_sl)], np.int64)
k = 0
for t in batch_data:
for l in range(len(t[5])):
hist_q_id[k][l] = t[5][l]
hist_q_time[k][l] = t[6][l]
k += 1
hist_c_id = np.zeros([len(batch_data), max(c_sl)], np.int64)
hist_c_time = np.zeros([len(batch_data), max(c_sl)], np.int64)
k = 0
for t in batch_data:
for l in range(len(t[7])):
hist_c_id[k][l] = t[7][l]
hist_c_time[k][l] = t[8][l]
k += 1
return self.i, \
(u, i, y, \
hist_i_id, hist_i_act, hist_i_time, i_sl, \
hist_q_id, hist_q_time, q_sl, \
hist_c_id, hist_c_time, c_sl)
|
applications/tensorflow/dynamic_sparsity/ipu_sparse_ops/tests/test_sparse_training.py | payoto/graphcore_examples | 260 | 11085025 | <gh_stars>100-1000
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
from examples_tests.test_util import SubProcessChecker
from pathlib import Path
import os
import sys
import numpy as np
import pytest
cwd = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(1, os.path.join(cwd, '..', '..'))
def assert_equal(a, b):
assert np.array_equal(a, b)
@pytest.mark.usefixtures("ipu_sparse_ops")
class TestBuildAndRun(SubProcessChecker):
def test_pruning_simple(self):
from ipu_sparse_ops import sparse_training
rows = np.array([1, 2, 3, 4, 5])
cols = np.array([11, 12, 13, 14, 15])
values = np.array([10, 20, 30, 40, 50])
slot_values = None
t, m = sparse_training.prune_bottom_k_weights(rows, cols, values, slot_values, 2, "test")
assert_equal(t[0], [3, 4, 5])
assert_equal(t[1], [13, 14, 15])
assert_equal(t[2], [30, 40, 50])
assert m == {}
def test_pruning_momentum(self):
from ipu_sparse_ops import sparse_training
rows = np.array([1, 2, 3, 4, 5])
cols = np.array([11, 12, 13, 14, 15])
values = np.array([10, 20, -3, 40, 5])
slot_values = {'momentum': np.array([0.1, 0.2, 0.3, 0.4, 0.5])}
t, m = sparse_training.prune_bottom_k_weights(rows, cols, values, slot_values, 3, "test")
assert_equal(t[0], [2, 4])
assert_equal(t[1], [12, 14])
assert_equal(t[2], [20, 40])
assert_equal(m['momentum'], [0.2, 0.4])
def test_regrow_rigl(self):
from ipu_sparse_ops import sparse, sparse_training
dense = np.array(
[[0.1, 0.2],
[0.3, 0.4]])
g = np.array(
[[1, 1, 1, 1, 1000], # largest grad in this row is at index (0, 4)
[1, 1, 1000, 1, 1]]) # largest grad in this row is at index (1, 2)
a = sparse.triplets_from_dense(dense)
t = sparse_training.regrow_rigl(a, g, sparse_training.zero_values_generator, 2, True, "test")
# Coords of largest grads are (0, 4) and (1, 2):
assert_equal(t[0], [0, 1]) # row indices
assert_equal(t[1], [4, 2]) # col indices
assert_equal(t[2], [0, 0]) # New values are 0 from the generator
def test_regrow_rigl_zero_grad(self):
from ipu_sparse_ops import sparse, sparse_training
dense = np.array(
[[0.1, 0.2],
[0.3, 0.4]])
g = np.array(
[[1, 1, 0, 0.1, 0], # largest grad in this row is at index (0, 3)
[1, 1, 0, 0, 0]])
a = sparse.triplets_from_dense(dense)
t = sparse_training.regrow_rigl(a, g, sparse_training.zero_values_generator, 2, True, "test")
print(t)
# No guarantee about index of second value because we don't use stable sort in regrow_rigl
# so only test the first index:
assert t[0][0] == 0
assert t[1][0] == 3
assert_equal(t[2], [0, 0]) # New values are 0 from the generator
def test_zeros(self):
from ipu_sparse_ops import sparse_training
assert_equal([0, 0, 0, 0], sparse_training.zero_values_generator(4))
assert_equal([0], sparse_training.zero_values_generator())
def test_join(self):
from ipu_sparse_ops import sparse_training
a = (
np.array([1, 2, 3]),
np.array([2, 4, 6]),
np.array([0.1, 0.2, 0.3])
)
b = (
np.array([4, 5, 6]),
np.array([8, 10, 12]),
np.array([0.4, 0.5, 0.6])
)
m = {'momentum': np.array([1.4, 1.5, 1.6])}
g, m = sparse_training.join_triplets(a, b, m, 3)
assert_equal(g[0], [1, 2, 3, 4, 5, 6])
assert_equal(g[1], [2, 4, 6, 8, 10, 12])
assert_equal(g[2], [0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
assert_equal(m['momentum'], [1.4, 1.5, 1.6, 0, 0, 0])
def test_join_no_momentum(self):
from ipu_sparse_ops import sparse_training
a = (
np.array([1, 2, 3]),
np.array([4, 5, 6]),
np.array([0.1, 0.2, 0.3])
)
b = (
np.array([1, 2, 3]),
np.array([4, 5, 6]),
np.array([0.4, 0.5, 0.6])
)
g, m = sparse_training.join_triplets(a, b, None, 3)
assert_equal(g[0], [1, 2, 3, 1, 2, 3])
assert_equal(g[1], [4, 5, 6, 4, 5, 6])
assert_equal(g[2], [0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
assert m == {}
|
IoT_Environment_Sensor/air_quality.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 11085098 | <reponame>gamblor21/Adafruit_Learning_System_Guides<gh_stars>100-1000
"""
IoT environmental sensor node.
Adafruit invests time and resources providing this open source code.
Please support Adafruit and open source hardware by purchasing
products from Adafruit!
Written by <NAME> for Adafruit Industries
Copyright (c) 2019 Adafruit Industries
Licensed under the MIT license.
All text above must be included in any redistribution.
"""
import adafruit_logging as logging
try:
import struct
except ImportError:
import ustruct as struct
logger = logging.getLogger('main')
class AirQualitySensor (object):
def __init__(self, uart):
self._uart = uart
self._buffer = []
self._pm10_standard = 0
self._pm25_standard = 0
self._pm100_standard = 0
self._pm10_env = 0
self._pm25_env = 0
self._pm100_env = 0
self._particles_03um = 0
self._particles_05um = 0
self._particles_10um = 0
self._particles_25um = 0
self._particles_50um = 0
self._particles_100um = 0
def read(self):
data = self._uart.read(32) # read up to 32 bytes
data = list(data)
self._buffer += data
while self._buffer and self._buffer[0] != 0x42:
self._buffer.pop(0)
if len(self._buffer) > 200:
self._buffer = [] # avoid an overrun if all bad data
return False
if len(self._buffer) < 32:
return False
if self._buffer[1] != 0x4d:
self._buffer.pop(0)
return False
frame_len = struct.unpack(">H", bytes(self._buffer[2:4]))[0]
if frame_len != 28:
self._buffer = []
return False
logger.debug('buffer length: %d', len(self._buffer) - 4)
frame = struct.unpack(">HHHHHHHHHHHHHH", bytes(self._buffer[4:32]))
self._pm10_standard, self._pm25_standard, self._pm100_standard, self._pm10_env, \
self._pm25_env, self._pm100_env, self._particles_03um, self._particles_05um, \
self._particles_10um, self._particles_25um, self._particles_50um, \
self._particles_100um, _, checksum = frame
check = sum(self._buffer[0:30])
if check != checksum:
self._buffer = []
return False
return True
@property
def pm10_standard(self):
return self._pm10_standard
@property
def pm25_standard(self):
return self._pm25_standard
@property
def pm100_standard(self):
return self._pm100_standard
@property
def pm10_env(self):
return self._pm10_env
@property
def pm25_env(self):
return self._pm25_env
@property
def pm100_env(self):
return self._pm100_env
@property
def particles_03um(self):
return self._particles_03um
@property
def particles_05um(self):
return self._particles_05um
@property
def particles_10um(self):
return self._particles_10um
@property
def particles_25um(self):
return self._particles_25um
@property
def particles_50um(self):
return self._particles_50um
@property
def particles_100um(self):
return self._particles_100um
|
ddparser/parser/data_struct/field.py | ishine/DDParser | 779 | 11085133 | <reponame>ishine/DDParser
# -*- coding: UTF-8 -*-
################################################################################
#
# Copyright (c) 2020 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
"""
本文件定义数据的结构
"""
from collections import Counter
import numpy as np
from ddparser.parser.nets import nn
from ddparser.parser.data_struct import utils
from ddparser.parser.data_struct import Vocab
class RawField(object):
"""Field base class"""
def __init__(self, name, fn=None):
super(RawField, self).__init__()
self.name = name
self.fn = fn
def __repr__(self):
"""repr"""
return "({}): {}()".format(self.name, self.__class__.__name__)
def preprocess(self, sequence):
"""preprocess"""
if self.fn is not None:
sequence = self.fn(sequence)
return sequence
def transform(self, sequences):
"""Sequences transform function"""
return [self.preprocess(seq) for seq in sequences]
class Field(RawField):
"""Field"""
def __init__(self,
name,
pad=None,
unk=None,
bos=None,
eos=None,
lower=False,
use_vocab=True,
tokenize=None,
tokenizer=None,
fn=None):
self.name = name
self.pad = pad
self.unk = unk
self.bos = bos
self.eos = eos
self.lower = lower
self.use_vocab = use_vocab
self.tokenize = tokenize
self.tokenizer = tokenizer
self.fn = fn
self.specials = [token for token in [pad, unk, bos, eos] if token is not None]
def __repr__(self):
"""repr"""
s, params = "({}): {}(".format(self.name, self.__class__.__name__), []
if self.pad is not None:
params.append("pad={}".format(self.pad))
if self.unk is not None:
params.append("unk={}".format(self.unk))
if self.bos is not None:
params.append("bos={}".format(self.bos))
if self.eos is not None:
params.append("eos={}".format(self.eos))
if self.lower:
params.append("lower={}".format(self.lower))
if not self.use_vocab:
params.append("use_vocab={}".format(self.use_vocab))
s += ", ".join(params)
s += ")"
return s
@property
def pad_index(self):
"""pad index"""
if self.pad is None:
return 0
if hasattr(self, 'vocab'):
return self.vocab[self.pad]
return self.specials.index(self.pad)
@property
def unk_index(self):
"""unk index"""
if self.unk is None:
return 0
if hasattr(self, 'vocab'):
return self.vocab[self.unk]
return self.specials.index(self.unk)
@property
def bos_index(self):
"""bos index"""
if self.bos is None:
return 0
if hasattr(self, 'vocab'):
return self.vocab[self.bos]
return self.specials.index(self.bos)
@property
def eos_index(self):
"""eos index"""
if self.eos is None:
return 0
if hasattr(self, 'vocab'):
return self.vocab[self.eos]
return self.specials.index(self.eos)
def preprocess(self, sequence):
"""preprocess"""
if self.fn is not None:
sequence = self.fn(sequence)
if self.tokenize is not None:
sequence = self.tokenize(sequence)
elif self.tokenizer is not None:
sequence = self.tokenizer.tokenize(sequence)
if not sequence: sequence = [self.unk]
if self.lower:
sequence = [token.lower() for token in sequence]
return sequence
def build(self, corpus, min_freq=1):
"""Create vocab based on corpus"""
if hasattr(self, 'vocab'):
return
sequences = getattr(corpus, self.name)
counter = Counter(token for seq in sequences for token in self.preprocess(seq))
self.vocab = Vocab(counter, min_freq, self.specials, self.unk_index)
def transform(self, sequences):
"""Sequences transform function, such as converting word to id, adding bos tags to sequences, etc."""
sequences = [self.preprocess(seq) for seq in sequences]
if self.use_vocab:
sequences = [self.vocab[seq] for seq in sequences]
if self.bos:
sequences = [[self.bos_index] + seq for seq in sequences]
if self.eos:
sequences = [seq + [self.eos_index] for seq in sequences]
sequences = [np.array(seq, dtype=np.int64) for seq in sequences]
return sequences
class SubwordField(Field):
"""SubwordField"""
def __init__(self, *args, **kwargs):
self.fix_len = kwargs.pop('fix_len') if 'fix_len' in kwargs else 0
super(SubwordField, self).__init__(*args, **kwargs)
def build(self, corpus, min_freq=1):
"""Create vocab based on corpus"""
if hasattr(self, 'vocab'):
return
sequences = getattr(corpus, self.name)
counter = Counter(piece for seq in sequences for token in seq for piece in self.preprocess(token))
self.vocab = Vocab(counter, min_freq, self.specials, self.unk_index)
def transform(self, sequences):
"""Sequences transform function, such as converting word to id, adding bos tags to sequences, etc."""
sequences = [[self.preprocess(token) for token in seq] for seq in sequences]
if self.fix_len <= 0:
self.fix_len = max(len(token) for seq in sequences for token in seq)
if self.use_vocab:
sequences = [[[self.vocab[i] for i in token] for token in seq] for seq in sequences]
if self.bos:
sequences = [[[self.bos_index]] + seq for seq in sequences]
if self.eos:
sequences = [seq + [[self.eos_index]] for seq in sequences]
sequences = [
nn.pad_sequence([np.array(ids[:self.fix_len], dtype=np.int64) for ids in seq], self.pad_index, self.fix_len)
for seq in sequences
]
return sequences
class ErnieField(Field):
"""SubwordField"""
def __init__(self, *args, **kwargs):
self.fix_len = kwargs.pop('fix_len') if 'fix_len' in kwargs else 0
super(ErnieField, self).__init__(*args, **kwargs)
def transform(self, sequences):
"""Sequences transform function, such as converting word to id, adding bos tags to sequences, etc."""
sequences = [[self.preprocess(token) for token in seq] for seq in sequences]
if self.fix_len <= 0:
self.fix_len = max(len(token) for seq in sequences for token in seq)
if self.use_vocab:
sequences = [[[self.vocab[i] for i in token] for token in seq] for seq in sequences]
if self.bos:
sequences = [[[self.bos_index]] + seq for seq in sequences]
if self.eos:
sequences = [seq + [[self.eos_index]] for seq in sequences]
sequences = [
nn.pad_sequence([np.array(ids[:self.fix_len], dtype=np.int64) for ids in seq], self.pad_index, self.fix_len)
for seq in sequences
]
return sequences
|
tools/grit/grit/tool/xmb.py | zealoussnow/chromium | 14,668 | 11085157 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The 'grit xmb' tool.
"""
from __future__ import print_function
import getopt
import os
import sys
from xml.sax import saxutils
import six
from grit import grd_reader
from grit import lazy_re
from grit import tclib
from grit import util
from grit.tool import interface
# Used to collapse presentable content to determine if
# xml:space="preserve" is needed.
_WHITESPACES_REGEX = lazy_re.compile(r'\s\s*')
# See XmlEscape below.
_XML_QUOTE_ESCAPES = {
u"'": u''',
u'"': u'"',
}
def _XmlEscape(s):
"""Returns text escaped for XML in a way compatible with Google's
internal Translation Console tool. May be used for attributes as
well as for contents.
"""
return saxutils.escape(six.text_type(s), _XML_QUOTE_ESCAPES).encode('utf-8')
def _WriteAttribute(file, name, value):
"""Writes an XML attribute to the specified file.
Args:
file: file to write to
name: name of the attribute
value: (unescaped) value of the attribute
"""
name = name.encode('utf-8')
if value:
file.write(b' %s="%s"' % (name, _XmlEscape(value)))
def _WriteMessage(file, message):
presentable_content = message.GetPresentableContent()
assert (isinstance(presentable_content, six.string_types) or
(len(message.parts) == 1 and
type(message.parts[0] == tclib.Placeholder)))
preserve_space = presentable_content != _WHITESPACES_REGEX.sub(
u' ', presentable_content.strip())
file.write(b'<msg')
_WriteAttribute(file, 'desc', message.GetDescription())
_WriteAttribute(file, 'id', message.GetId())
_WriteAttribute(file, 'meaning', message.GetMeaning())
if preserve_space:
_WriteAttribute(file, 'xml:space', 'preserve')
file.write(b'>')
if not preserve_space:
file.write(b'\n ')
parts = message.GetContent()
for part in parts:
if isinstance(part, tclib.Placeholder):
file.write(b'<ph')
_WriteAttribute(file, 'name', part.GetPresentation())
file.write(b'><ex>')
file.write(_XmlEscape(part.GetExample()))
file.write(b'</ex>')
file.write(_XmlEscape(part.GetOriginal()))
file.write(b'</ph>')
else:
file.write(_XmlEscape(part))
if not preserve_space:
file.write(b'\n')
file.write(b'</msg>\n')
def WriteXmbFile(file, messages):
"""Writes the given grit.tclib.Message items to the specified open
file-like object in the XMB format.
"""
file.write(b"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE messagebundle [
<!ELEMENT messagebundle (msg)*>
<!ATTLIST messagebundle class CDATA #IMPLIED>
<!ELEMENT msg (#PCDATA|ph|source)*>
<!ATTLIST msg id CDATA #IMPLIED>
<!ATTLIST msg seq CDATA #IMPLIED>
<!ATTLIST msg name CDATA #IMPLIED>
<!ATTLIST msg desc CDATA #IMPLIED>
<!ATTLIST msg meaning CDATA #IMPLIED>
<!ATTLIST msg obsolete (obsolete) #IMPLIED>
<!ATTLIST msg xml:space (default|preserve) "default">
<!ATTLIST msg is_hidden CDATA #IMPLIED>
<!ELEMENT source (#PCDATA)>
<!ELEMENT ph (#PCDATA|ex)*>
<!ATTLIST ph name CDATA #REQUIRED>
<!ELEMENT ex (#PCDATA)>
]>
<messagebundle>
""")
for message in messages:
_WriteMessage(file, message)
file.write(b'</messagebundle>')
class OutputXmb(interface.Tool):
"""Outputs all translateable messages in the .grd input file to an
.xmb file, which is the format used to give source messages to
Google's internal Translation Console tool. The format could easily
be used for other systems.
Usage: grit xmb [-i|-h] [-l LIMITFILE] OUTPUTPATH
OUTPUTPATH is the path you want to output the .xmb file to.
The -l option can be used to output only some of the resources to the .xmb file.
LIMITFILE is the path to a file that is used to limit the items output to the
xmb file. If the filename extension is .grd, the file must be a .grd file
and the tool only output the contents of nodes from the input file that also
exist in the limit file (as compared on the 'name' attribute). Otherwise it must
contain a list of the IDs that output should be limited to, one ID per line, and
the tool will only output nodes with 'name' attributes that match one of the
IDs.
The -i option causes 'grit xmb' to output an "IDs only" file instead of an XMB
file. The "IDs only" file contains the message ID of each message that would
normally be output to the XMB file, one message ID per line. It is designed for
use with the 'grit transl2tc' tool's -l option.
Other options:
-D NAME[=VAL] Specify a C-preprocessor-like define NAME with optional
value VAL (defaults to 1) which will be used to control
conditional inclusion of resources.
-E NAME=VALUE Set environment variable NAME to VALUE (within grit).
"""
# The different output formats supported by this tool
FORMAT_XMB = 0
FORMAT_IDS_ONLY = 1
def __init__(self, defines=None):
super(OutputXmb, self).__init__()
self.format = self.FORMAT_XMB
self.defines = defines or {}
def ShortDescription(self):
return 'Exports all translateable messages into an XMB file.'
def Run(self, opts, args):
os.environ['cwd'] = os.getcwd()
self.SetOptions(opts)
limit_file = None
limit_is_grd = False
limit_file_dir = None
own_opts, args = getopt.getopt(args, 'l:D:ih', ('help',))
for key, val in own_opts:
if key == '-l':
limit_file = open(val, 'r')
limit_file_dir = util.dirname(val)
if not len(limit_file_dir):
limit_file_dir = '.'
limit_is_grd = os.path.splitext(val)[1] == '.grd'
elif key == '-i':
self.format = self.FORMAT_IDS_ONLY
elif key == '-D':
name, val = util.ParseDefine(val)
self.defines[name] = val
elif key == '-E':
(env_name, env_value) = val.split('=', 1)
os.environ[env_name] = env_value
elif key == '--help':
self.ShowUsage()
sys.exit(0)
if not len(args) == 1:
print('grit xmb takes exactly one argument, the path to the XMB file '
'to output.')
return 2
xmb_path = args[0]
res_tree = grd_reader.Parse(opts.input, debug=opts.extra_verbose, defines=self.defines)
res_tree.SetOutputLanguage('en')
res_tree.SetDefines(self.defines)
res_tree.OnlyTheseTranslations([])
res_tree.RunGatherers()
with open(xmb_path, 'wb') as output_file:
self.Process(
res_tree, output_file, limit_file, limit_is_grd, limit_file_dir)
if limit_file:
limit_file.close()
print("Wrote %s" % xmb_path)
def Process(self, res_tree, output_file, limit_file=None, limit_is_grd=False,
dir=None):
"""Writes a document with the contents of res_tree into output_file,
limiting output to the IDs specified in limit_file, which is a GRD file if
limit_is_grd is true, otherwise a file with one ID per line.
The format of the output document depends on this object's format attribute.
It can be FORMAT_XMB or FORMAT_IDS_ONLY.
The FORMAT_IDS_ONLY format causes this function to write just a list
of the IDs of all messages that would have been added to the XMB file, one
ID per line.
The FORMAT_XMB format causes this function to output the (default) XMB
format.
Args:
res_tree: base.Node()
output_file: file open for writing
limit_file: None or file open for reading
limit_is_grd: True | False
dir: Directory of the limit file
"""
if limit_file:
if limit_is_grd:
limit_list = []
limit_tree = grd_reader.Parse(limit_file,
dir=dir,
debug=self.o.extra_verbose)
for node in limit_tree:
if 'name' in node.attrs:
limit_list.append(node.attrs['name'])
else:
# Not a GRD file, so it's just a file with one ID per line
limit_list = [item.strip() for item in limit_file.read().split('\n')]
ids_already_done = {}
messages = []
for node in res_tree:
if (limit_file and
not ('name' in node.attrs and node.attrs['name'] in limit_list)):
continue
if not node.IsTranslateable():
continue
for clique in node.GetCliques():
if not clique.IsTranslateable():
continue
if not clique.GetMessage().GetRealContent():
continue
# Some explanation is in order here. Note that we can have
# many messages with the same ID.
#
# The way we work around this is to maintain a list of cliques
# per message ID (in the UberClique) and select the "best" one
# (the first one that has a description, or an arbitrary one
# if there is no description) for inclusion in the XMB file.
# The translations are all going to be the same for messages
# with the same ID, although the way we replace placeholders
# might be slightly different.
id = clique.GetMessage().GetId()
if id in ids_already_done:
continue
ids_already_done[id] = 1
message = node.UberClique().BestClique(id).GetMessage()
messages += [message]
# Ensure a stable order of messages, to help regression testing.
messages.sort(key=lambda x:x.GetId())
if self.format == self.FORMAT_IDS_ONLY:
# We just print the list of IDs to the output file.
for msg in messages:
output_file.write(msg.GetId())
output_file.write('\n')
else:
assert self.format == self.FORMAT_XMB
WriteXmbFile(output_file, messages)
|
src/python_minifier/rename/mapper.py | tudstlennkozh/python-minifier | 301 | 11085167 | """
For each node in an AST set the namespace to use for name binding and resolution
"""
import ast
from python_minifier.rename.util import is_namespace
from python_minifier.util import is_ast_node
def add_parent_to_arguments(arguments, func):
arguments.parent = func
arguments.namespace = func
for arg in getattr(arguments, 'posonlyargs', []) + arguments.args:
add_parent(arg, arguments, func)
if hasattr(arg, 'annotation') and arg.annotation is not None:
add_parent(arg.annotation, arguments, func.namespace)
if hasattr(arguments, 'kwonlyargs'):
for arg in arguments.kwonlyargs:
add_parent(arg, arguments, func)
if arg.annotation is not None:
add_parent(arg.annotation, arguments, func.namespace)
for node in arguments.kw_defaults:
if node is not None:
add_parent(node, arguments, func.namespace)
for node in arguments.defaults:
add_parent(node, arguments, func.namespace)
if arguments.vararg:
if hasattr(arguments, 'varargannotation') and arguments.varargannotation is not None:
add_parent(arguments.varargannotation, arguments, func.namespace)
elif isinstance(arguments.vararg, str):
pass
else:
add_parent(arguments.vararg, arguments, func)
if arguments.kwarg:
if hasattr(arguments, 'kwargannotation') and arguments.kwargannotation is not None:
add_parent(arguments.kwargannotation, arguments, func.namespace)
elif isinstance(arguments.kwarg, str):
pass
else:
add_parent(arguments.kwarg, arguments, func)
def add_parent_to_functiondef(functiondef):
"""
Add correct parent and namespace attributes to functiondef nodes
"""
if functiondef.args is not None:
add_parent_to_arguments(functiondef.args, func=functiondef)
for node in functiondef.body:
add_parent(node, parent=functiondef, namespace=functiondef)
for node in functiondef.decorator_list:
add_parent(node, parent=functiondef, namespace=functiondef.namespace)
if hasattr(functiondef, 'returns') and functiondef.returns is not None:
add_parent(functiondef.returns, parent=functiondef, namespace=functiondef.namespace)
def add_parent_to_classdef(classdef):
"""
Add correct parent and namespace attributes to classdef nodes
"""
for node in classdef.bases:
add_parent(node, parent=classdef, namespace=classdef.namespace)
if hasattr(classdef, 'keywords'):
for node in classdef.keywords:
add_parent(node, parent=classdef, namespace=classdef.namespace)
if hasattr(classdef, 'starargs') and classdef.starargs is not None:
add_parent(classdef.starargs, parent=classdef, namespace=classdef.namespace)
if hasattr(classdef, 'kwargs') and classdef.kwargs is not None:
add_parent(classdef.kwargs, parent=classdef, namespace=classdef.namespace)
for node in classdef.body:
add_parent(node, parent=classdef, namespace=classdef)
for node in classdef.decorator_list:
add_parent(node, parent=classdef, namespace=classdef.namespace)
def add_parent(node, parent=None, namespace=None):
"""
Add a parent attribute to child nodes
Add a namespace attribute to child nodes
:param node: The tree to add parent and namespace properties to
:type node: :class:`ast.AST`
:param parent: The parent node of this node
:type parent: :class:`ast.AST`
:param namespace: The namespace Node that this node is in
:type namespace: ast.Lambda or ast.Module or ast.FunctionDef or ast.AsyncFunctionDef or ast.ClassDef or ast.DictComp or ast.SetComp or ast.ListComp or ast.Generator
"""
node.parent = parent if parent is not None else node
node.namespace = namespace if namespace is not None else node
if is_namespace(node):
node.bindings = []
node.global_names = set()
node.nonlocal_names = set()
if is_ast_node(node, (ast.FunctionDef, 'AsyncFunctionDef')):
add_parent_to_functiondef(node)
elif isinstance(node, ast.Lambda):
add_parent_to_arguments(node.args, func=node)
add_parent(node.body, parent=node, namespace=node)
elif isinstance(node, ast.ClassDef):
add_parent_to_classdef(node)
else:
for child in ast.iter_child_nodes(node):
add_parent(child, parent=node, namespace=node)
return
if isinstance(node, ast.comprehension):
add_parent(node.target, parent=node, namespace=namespace)
add_parent(node.iter, parent=node, namespace=namespace)
for if_ in node.ifs:
add_parent(if_, parent=node, namespace=namespace)
return
if isinstance(node, ast.Global):
namespace.global_names.update(node.names)
if is_ast_node(node, 'Nonlocal'):
namespace.nonlocal_names.update(node.names)
for child in ast.iter_child_nodes(node):
add_parent(child, parent=node, namespace=namespace)
def add_namespace(module):
add_parent(module)
|
venv/lib/python3.8/site-packages/soupsieve/css_types.py | willBear/willBear-Fundamental_Analysis | 124 | 11085170 | """CSS selector structure items."""
import copyreg
from collections.abc import Hashable, Mapping
__all__ = (
'Selector',
'SelectorNull',
'SelectorTag',
'SelectorAttribute',
'SelectorContains',
'SelectorNth',
'SelectorLang',
'SelectorList',
'Namespaces',
'CustomSelectors'
)
SEL_EMPTY = 0x1
SEL_ROOT = 0x2
SEL_DEFAULT = 0x4
SEL_INDETERMINATE = 0x8
SEL_SCOPE = 0x10
SEL_DIR_LTR = 0x20
SEL_DIR_RTL = 0x40
SEL_IN_RANGE = 0x80
SEL_OUT_OF_RANGE = 0x100
SEL_DEFINED = 0x200
SEL_PLACEHOLDER_SHOWN = 0x400
class Immutable(object):
"""Immutable."""
__slots__ = ('_hash',)
def __init__(self, **kwargs):
"""Initialize."""
temp = []
for k, v in kwargs.items():
temp.append(type(v))
temp.append(v)
super(Immutable, self).__setattr__(k, v)
super(Immutable, self).__setattr__('_hash', hash(tuple(temp)))
@classmethod
def __base__(cls):
"""Get base class."""
return cls
def __eq__(self, other):
"""Equal."""
return (
isinstance(other, self.__base__()) and
all([getattr(other, key) == getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __ne__(self, other):
"""Equal."""
return (
not isinstance(other, self.__base__()) or
any([getattr(other, key) != getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __hash__(self):
"""Hash."""
return self._hash
def __setattr__(self, name, value):
"""Prevent mutability."""
raise AttributeError("'{}' is immutable".format(self.__class__.__name__))
def __repr__(self): # pragma: no cover
"""Representation."""
return "{}({})".format(
self.__base__(), ', '.join(["{}={!r}".format(k, getattr(self, k)) for k in self.__slots__[:-1]])
)
__str__ = __repr__
class ImmutableDict(Mapping):
"""Hashable, immutable dictionary."""
def __init__(self, *args, **kwargs):
"""Initialize."""
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if (
is_dict and not all([isinstance(v, Hashable) for v in arg.values()]) or
not is_dict and not all([isinstance(k, Hashable) and isinstance(v, Hashable) for k, v in arg])
):
raise TypeError('All values must be hashable')
self._d = dict(*args, **kwargs)
self._hash = hash(tuple([(type(x), x, type(y), y) for x, y in sorted(self._d.items())]))
def __iter__(self):
"""Iterator."""
return iter(self._d)
def __len__(self):
"""Length."""
return len(self._d)
def __getitem__(self, key):
"""Get item: `namespace['key']`."""
return self._d[key]
def __hash__(self):
"""Hash."""
return self._hash
def __repr__(self): # pragma: no cover
"""Representation."""
return "{!r}".format(self._d)
__str__ = __repr__
class Namespaces(ImmutableDict):
"""Namespaces."""
def __init__(self, *args, **kwargs):
"""Initialize."""
# If there are arguments, check the first index.
# `super` should fail if the user gave multiple arguments,
# so don't bother checking that.
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg.items()]):
raise TypeError('Namespace keys and values must be Unicode strings')
elif not is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg]):
raise TypeError('Namespace keys and values must be Unicode strings')
super(Namespaces, self).__init__(*args, **kwargs)
class CustomSelectors(ImmutableDict):
"""Custom selectors."""
def __init__(self, *args, **kwargs):
"""Initialize."""
# If there are arguments, check the first index.
# `super` should fail if the user gave multiple arguments,
# so don't bother checking that.
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg.items()]):
raise TypeError('CustomSelectors keys and values must be Unicode strings')
elif not is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg]):
raise TypeError('CustomSelectors keys and values must be Unicode strings')
super(CustomSelectors, self).__init__(*args, **kwargs)
class Selector(Immutable):
"""Selector."""
__slots__ = (
'tag', 'ids', 'classes', 'attributes', 'nth', 'selectors',
'relation', 'rel_type', 'contains', 'lang', 'flags', '_hash'
)
def __init__(
self, tag, ids, classes, attributes, nth, selectors,
relation, rel_type, contains, lang, flags
):
"""Initialize."""
super(Selector, self).__init__(
tag=tag,
ids=ids,
classes=classes,
attributes=attributes,
nth=nth,
selectors=selectors,
relation=relation,
rel_type=rel_type,
contains=contains,
lang=lang,
flags=flags
)
class SelectorNull(Immutable):
"""Null Selector."""
def __init__(self):
"""Initialize."""
super(SelectorNull, self).__init__()
class SelectorTag(Immutable):
"""Selector tag."""
__slots__ = ("name", "prefix", "_hash")
def __init__(self, name, prefix):
"""Initialize."""
super(SelectorTag, self).__init__(
name=name,
prefix=prefix
)
class SelectorAttribute(Immutable):
"""Selector attribute rule."""
__slots__ = ("attribute", "prefix", "pattern", "xml_type_pattern", "_hash")
def __init__(self, attribute, prefix, pattern, xml_type_pattern):
"""Initialize."""
super(SelectorAttribute, self).__init__(
attribute=attribute,
prefix=prefix,
pattern=pattern,
xml_type_pattern=xml_type_pattern
)
class SelectorContains(Immutable):
"""Selector contains rule."""
__slots__ = ("text", "_hash")
def __init__(self, text):
"""Initialize."""
super(SelectorContains, self).__init__(
text=text
)
class SelectorNth(Immutable):
"""Selector nth type."""
__slots__ = ("a", "n", "b", "of_type", "last", "selectors", "_hash")
def __init__(self, a, n, b, of_type, last, selectors):
"""Initialize."""
super(SelectorNth, self).__init__(
a=a,
n=n,
b=b,
of_type=of_type,
last=last,
selectors=selectors
)
class SelectorLang(Immutable):
"""Selector language rules."""
__slots__ = ("languages", "_hash",)
def __init__(self, languages):
"""Initialize."""
super(SelectorLang, self).__init__(
languages=tuple(languages)
)
def __iter__(self):
"""Iterator."""
return iter(self.languages)
def __len__(self): # pragma: no cover
"""Length."""
return len(self.languages)
def __getitem__(self, index): # pragma: no cover
"""Get item."""
return self.languages[index]
class SelectorList(Immutable):
"""Selector list."""
__slots__ = ("selectors", "is_not", "is_html", "_hash")
def __init__(self, selectors=tuple(), is_not=False, is_html=False):
"""Initialize."""
super(SelectorList, self).__init__(
selectors=tuple(selectors),
is_not=is_not,
is_html=is_html
)
def __iter__(self):
"""Iterator."""
return iter(self.selectors)
def __len__(self):
"""Length."""
return len(self.selectors)
def __getitem__(self, index):
"""Get item."""
return self.selectors[index]
def _pickle(p):
return p.__base__(), tuple([getattr(p, s) for s in p.__slots__[:-1]])
def pickle_register(obj):
"""Allow object to be pickled."""
copyreg.pickle(obj, _pickle)
pickle_register(Selector)
pickle_register(SelectorNull)
pickle_register(SelectorTag)
pickle_register(SelectorAttribute)
pickle_register(SelectorContains)
pickle_register(SelectorNth)
pickle_register(SelectorLang)
pickle_register(SelectorList)
|
LeetCode/python3/1048.py | ZintrulCre/LeetCode_Archiver | 279 | 11085196 | class Solution:
def longestStrChain(self, words: List[str]) -> int:
dp = {}
for word in sorted(words, key=len):
dp[word] = 1
for i in range(len(word)):
temp = word[:i] + word[i + 1:]
dp[word] = max(dp.get(word, 0), dp.get(temp, 0) + 1)
return max(dp.values() or [1])
|
unittest/test_Syntax.py | baidu/broc | 127 | 11085198 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
test case for Syntax
Authors: liruihao(<EMAIL>)
Date: 2015/11/11 16:24:42
"""
import os
import sys
import tempfile
import unittest
broc_path = os.path.realpath(os.path.join(os.path.realpath(__file__), '..', '..'))
sys.path.insert(0, broc_path)
from dependency import Syntax
from dependency import Target
from dependency import Source
from dependency import SyntaxTag
from dependency import Environment
from util import Function
from dependency import BrocModule_pb2
from dependency import BrocTree
from util import Log
class TestSyntax(unittest.TestCase):
"""
unit test for Syntax
"""
def setUp(self):
"""
init
"""
sys.argv = ['NOT PLANISH']
module = BrocModule_pb2.Module()
module.name = 'broc'
module.module_cvspath = 'baidu/broc'
module.broc_cvspath = 'baidu/broc/BROC'
module.is_main = True
module.repo_kind = BrocModule_pb2.Module.GIT
module.revision = "1234"
module.last_changed_rev = "1236"
module.dep_level = 0
#get home dir
home = os.environ['HOME']
module.workspace = '%s/unittest_broc/workspace' % home
module.root_path = '%s/unittest_broc/workspace/baidu/broc' % home
module.url = 'https://github.com/baidu/broc'
module.br_kind = BrocModule_pb2.Module.BRANCH
module.br_name = 'trunk'
#module.commit_id = '5d9819900c2781873aa0ffce285d5d3e75b072a8'
self._module = module
Function.RunCommand("mkdir -p %s" % module.root_path, ignore_stderr_when_ok = True)
Function.RunCommand("touch %s/hello.cpp" % module.root_path, ignore_stderr_when_ok = True)
Function.RunCommand("touch %s/hello.h" % module.root_path, ignore_stderr_when_ok = True)
self._env = Environment.Environment(module)
Environment.SetCurrent(self._env)
def tearDown(self):
"""
teardown
"""
Function.RunCommand("rm -rf ~/unittest_broc", ignore_stderr_when_ok = True)
def test_COMPILER_PATH(self):
"""
test Syntax.COMPILER_PATH
"""
Syntax.COMPILER_PATH('/usr/bin/')
self.assertEqual('/usr/bin/', self._env.CompilerDir())
def test_CPPFLAGS(self):
"""
test Syntax.CPPFLAGS
"""
#test case of debug mode
self._env._build_mode = 'debug'
Syntax.CPPFLAGS("-g -Wall", "-g -O2")
self.assertTrue("-g -Wall" in self._env._g_cppflags.V() \
and "-g -O2" not in self._env._g_cppflags.V())
#test case of muti CPPFLAGS
Syntax.CPPFLAGS("-W -Wshadow", "-g -O2")
Syntax.CPPFLAGS("-finline-functions", "-g -O2")
self.assertTrue("-g -Wall" in self._env._g_cppflags.V() \
and "-g -O2" not in self._env._g_cppflags.V())
self.assertTrue("-W -Wshadow" in self._env._g_cppflags.V() \
and "-g -O2" not in self._env._g_cppflags.V())
self.assertTrue("-finline-functions" in self._env._g_cppflags.V() \
and "-g -O2" not in self._env._g_cppflags.V())
#reset g_cppflags
self._env._g_cppflags = SyntaxTag.TagCPPFLAGS()
#test case of release mode
self._env._build_mode = 'release'
Syntax.CPPFLAGS("-g -Wall", "-g -O2")
self.assertTrue("-g -O2" in self._env._g_cppflags.V() \
and "-g -Wall" not in self._env._g_cppflags.V())
def test_CppFlags(self):
"""
test Syntax.CppFlags
"""
#test case of debug mode
self._env._build_mode = 'debug'
tag = Syntax.CppFlags("-g -Wall", "-g -O2")
self.assertTrue("-g -Wall" in tag._v and "-g -O2" not in tag.V())
#test case of release mode
self._env._build_mode = 'release'
tag2 = Syntax.CppFlags("-g -Wall", "-g -O2")
self.assertTrue("-g -O2" in tag2.V() and "-g -Wall" not in tag2.V())
def test_CFLAGS(self):
"""
test Syntax.CFLAGS
"""
#test case of debug mode
self._env._build_mode = 'debug'
Syntax.CFLAGS("-g -Wall", "-g -O2")
self.assertTrue("-g -Wall" in self._env._g_cflags.V() \
and "-g -O2" not in self._env._g_cflags.V())
#test case of muti CPPFLAGS
Syntax.CFLAGS("-W -Wshadow", "-g -O2")
Syntax.CFLAGS("-finline-functions", "-g -O2")
self.assertTrue("-g -Wall" in self._env._g_cflags.V() \
and "-g -O2" not in self._env._g_cflags.V())
self.assertTrue("-W -Wshadow" in self._env._g_cflags.V() \
and "-g -O2" not in self._env._g_cflags.V())
self.assertTrue("-finline-functions" in self._env._g_cflags.V() \
and "-g -O2" not in self._env._g_cflags.V())
#reset g_cflags
self._env._g_cflags = SyntaxTag.TagCFLAGS()
#test case of release mode
self._env._build_mode = 'release'
Syntax.CFLAGS("-g -Wall", "-g -O2")
self.assertTrue("-g -O2" in self._env._g_cflags.V() \
and "-g -Wall" not in self._env._g_cflags.V())
def test_CFlags(self):
"""
test Syntax.CFlags
"""
#test case of debug mode
Environment.SetCurrent(self._env)
self._env._build_mode = 'debug'
tag = Syntax.CFlags("-g -Wall", "-g -O2")
self.assertTrue("-g -Wall" in tag._v and "-g -O2" not in tag.V())
#test case of release mode
self._env._build_mode = 'release'
self._env._build_mode = 'release'
tag2 = Syntax.CFlags("-g -Wall", "-g -O2")
self.assertTrue("-g -O2" in tag2.V() and "-g -Wall" not in tag2.V())
def test_CXXFLAGS(self):
"""
test Syntax.CXXFLAGS
"""
#test case of debug mode
Environment.SetCurrent(self._env)
self._env._build_mode = 'debug'
Syntax.CXXFLAGS("-g -Wall", "-g -O2")
self.assertTrue("-g -Wall" in self._env._g_cxxflags.V() \
and "-g -O2" not in self._env._g_cxxflags.V())
#test case of muti CPPFLAGS
Syntax.CXXFLAGS("-W -Wshadow", "-g -O2")
Syntax.CXXFLAGS("-finline-functions", "-g -O2")
self.assertTrue("-g -Wall" in self._env._g_cxxflags.V() \
and "-g -O2" not in self._env._g_cxxflags.V())
self.assertTrue("-W -Wshadow" in self._env._g_cxxflags.V() \
and "-g -O2" not in self._env._g_cxxflags.V())
self.assertTrue("-finline-functions" in self._env._g_cxxflags.V() \
and "-g -O2" not in self._env._g_cxxflags.V())
#reset g_cxxflags
self._env._g_cxxflags = SyntaxTag.TagCXXFLAGS()
#test case of release mode
self._env._build_mode = 'release'
Syntax.CXXFLAGS("-g -Wall", "-g -O2")
self.assertTrue("-g -O2" in self._env._g_cxxflags.V() \
and "-g -Wall" not in self._env._g_cxxflags.V())
def test_CxxFlags(self):
"""
test Syntax.CxxFlags
"""
#test case of debug mode
Environment.SetCurrent(self._env)
self._env._build_mode = 'debug'
tag = Syntax.CxxFlags("-g -Wall", "-g -O2")
self.assertTrue("-g -Wall" in tag._v and "-g -O2" not in tag.V())
#test case of release mode
self._env._build_mode = 'release'
tag2 = Syntax.CxxFlags("-g -Wall", "-g -O2")
self.assertTrue("-g -O2" in tag2.V() and "-g -Wall" not in tag2.V())
def test_CONVERT_OUT(self):
"""
test Syntax.CONVERT_OUT
"""
self.assertEqual("broc_out/baidu/broc/hello.h", Syntax.CONVERT_OUT("./hello.h"))
#test CONVERT_OUT can raise Syntax.NotInSelfModuleError or not
flag = False
try:
Syntax.CONVERT_OUT("../hello.h")
except Syntax.NotInSelfModuleError as er:
flag = True
self.assertTrue(flag)
def test_INCLUDE(self):
"""
test Syntax.INCLUDE
"""
Environment.SetCurrent(self._env)
# arg starts with $WORKSPACE
Syntax.INCLUDE("$WORKSPACE/baidu/broc")
self.assertTrue("baidu/broc" in self._env.IncludePaths().V())
self.assertTrue("baidu/agile" not in self._env.IncludePaths().V())
# arg starts with broc_out
Syntax.INCLUDE("broc_out/baidu/broc")
self.assertTrue("broc_out/baidu/broc" in self._env.IncludePaths().V())
self.assertTrue("broc_out/baidu/agile" not in self._env.IncludePaths().V())
# arg is abs path
Syntax.INCLUDE("/opt/include")
self.assertTrue("/opt/include" in self._env.IncludePaths().V())
self.assertTrue("/home/include" not in self._env.IncludePaths().V())
# arg in self module
Syntax.INCLUDE("./include")
incpath = os.path.normpath(os.path.join(self._module.workspace, \
self._module.module_cvspath, "include"))
self.assertTrue(incpath in self._env.IncludePaths().V())
def test_Include(self):
"""
test Syntax.Include
"""
Environment.SetCurrent(self._env)
# arg starts with $WORKSPACE
tag = Syntax.Include("$WORKSPACE/baidu/broc")
self.assertTrue("baidu/broc" in tag.V())
self.assertTrue("baidu/agile" not in tag.V())
# arg starts with broc_out
tag = Syntax.Include("broc_out/baidu/broc")
self.assertTrue("broc_out/baidu/broc" in tag.V())
self.assertTrue("broc_out/baidu/agile" not in tag.V())
# arg is abs path
tag = Syntax.Include("/opt/include")
self.assertTrue("/opt/include" in tag.V())
self.assertTrue("/home/include" not in tag.V())
# arg in self module
tag = Syntax.Include("./include")
#incpath = os.path.normpath(os.path.join(self._module.workspace, \
# self._module.module_cvspath, "include"))
incpath=os.path.join(self._module.module_cvspath, 'include')
self.assertTrue(incpath in tag.V())
def test_Libs(self):
"""
test Syntax.Libs
"""
#one lib in arg
tag = Syntax.Libs("$OUT_ROOT/baidu/broc/lib/libbroc.a")
self.assertTrue("broc_out/baidu/broc/lib/libbroc.a" in tag.V())
#more than one libs in args
flag = True
tag = Syntax.Libs("$OUT_ROOT/baidu/broc/lib/libbroc.a", \
"$OUT_ROOT/protobuf/lib/libprotobuf.a", \
"$OUT_ROOT/ccode/lib/libccode.a" \
"$OUT_ROOT/dict/lib/libdict.a")
lib_list = ["broc_out/baidu/broc/lib/libbroc.a", \
"broc_out/protobuf/lib/libprotobuf.a", \
"broc_out/ccode/lib/libccode.a" \
"broc_out/dict/lib/libdict.a"]
for lib in lib_list:
if lib not in tag.V():
flag = False
self.assertTrue(flag)
#arg not start with $OUT_ROOT
flag = False
try:
tag = Syntax.Libs("baidu/broc/lib/libbroc.a")
except Syntax.BrocArgumentIllegalError as ex:
flag = True
self.assertTrue(flag)
def test_LDFLAGS(self):
"""
test Syntax.LDFLAGS
"""
#test case of debug mode
Environment.SetCurrent(self._env)
self._env._build_mode = 'debug'
Syntax.LDFLAGS("-lpthread -lcrypto", "-lprotobuf -lpthread")
self.assertTrue("-lpthread -lcrypto" in self._env._g_linkflags.V() \
and "-lprotobuf -lpthread" not in self._env._g_linkflags.V())
#reset g_linkflags
self._env._g_linkflags = SyntaxTag.TagCPPFLAGS()
#test case of release mode
self._env._build_mode = 'release'
Syntax.LDFLAGS("-lpthread -lcrypto", "-lprotobuf -lpthread")
self.assertTrue("-lprotobuf -lpthread" in self._env._g_linkflags.V() \
and "-lpthread -lcrypto" not in self._env._g_linkflags.V())
def test_LDFlags(self):
"""
test Syntax.LDFlags
"""
#test case of debug mode
Environment.SetCurrent(self._env)
self._env._build_mode = 'debug'
tag = Syntax.LDFlags("-lpthread -lcrypto", "-lprotobuf -lpthread")
self.assertTrue("-lpthread -lcrypto" in tag.V() \
and "-lprotobuf -lpthread" not in tag.V())
#reset g_linkflags
self._env._g_linkflags = SyntaxTag.TagLDFLAGS()
#test case of release mode
self._env._build_mode = 'release'
tag = Syntax.LDFlags("-lpthread -lcrypto", "-lprotobuf -lpthread")
self.assertTrue("-lprotobuf -lpthread" in tag.V() \
and "-lpthread -lcrypto" not in tag.V())
def test_GLOB(self):
"""
test Syntax.GLOB
"""
#test case of one file
files = Syntax.GLOB("./*.cpp")
self.assertEqual(files, "hello.cpp")
#test case of more files and those files must in the lexicographical order
module = self._module
Function.RunCommand("touch %s/hello1.h" % module.root_path, ignore_stderr_when_ok = True)
Function.RunCommand("touch %s/hello2.h" % module.root_path, ignore_stderr_when_ok = True)
Function.RunCommand("touch %s/hello3.h" % module.root_path, ignore_stderr_when_ok = True)
Function.RunCommand("touch %s/hello10.h" % module.root_path, ignore_stderr_when_ok = True)
files = Syntax.GLOB("./*.h")
self.assertEqual(files, "hello.h hello1.h hello10.h hello2.h hello3.h")
#test case of files not in self module
Function.RunCommand("touch %s/../README" % module.root_path, ignore_stderr_when_ok = True)
flag = False
try:
Syntax.GLOB("../README")
except Syntax.NotInSelfModuleError as e:
flag = True
self.assertTrue(flag)
#test case of no such files
flag = False
try:
Syntax.GLOB("./just_test.cpp")
except Syntax.BrocArgumentIllegalError as e:
flag = True
self.assertTrue(flag)
def test_ParseNameAndArgs(self):
"""
test Syntax._ParseNameAndArgs
"""
#only has name
files, args = Syntax._ParseNameAndArgs("broc")
self.assertEqual(files, ["broc"])
self.assertEqual(args, [])
#more args
inctag = Syntax.Include("./ ./include")
cpptag = Syntax.CppFlags("-DDEBUG", "-DBROC")
ctag = Syntax.CFlags("-O2", "-O0")
cxxtag = Syntax.CxxFlags("-Werror", "-Wall")
files, args = Syntax._ParseNameAndArgs("./*.cpp", inctag)
self.assertEqual(files, ["./*.cpp"])
self.assertEqual(args, [inctag])
files, args = Syntax._ParseNameAndArgs("./*.cpp", inctag, cpptag)
self.assertEqual(files, ["./*.cpp"])
self.assertEqual(args, [inctag, cpptag])
files, args = Syntax._ParseNameAndArgs("./*.cpp", cpptag, ctag)
self.assertEqual(files, ["./*.cpp"])
self.assertEqual(args, [cpptag, ctag])
files, args = Syntax._ParseNameAndArgs("./*.cpp", "./*.c", cxxtag, ctag)
self.assertEqual(files, ["./*.cpp", "./*.c"])
self.assertEqual(args, [cxxtag, ctag])
files, args = Syntax._ParseNameAndArgs("./*.cpp", "./*.c", inctag, cpptag, cxxtag, ctag)
self.assertEqual(files, ["./*.cpp", "./*.c"])
self.assertEqual(args, [inctag, cpptag, cxxtag, ctag])
def test_Sources(self):
"""
test Syntax.Sources
"""
#get local flags tag
cpptags = Syntax.CppFlags("-DDEBUG_LOCAL", "-DRELEASE_LOCAL")
cxxtags = Syntax.CxxFlags("-Wwrite-strings", "-Wswitch")
ctags = Syntax.CFlags("-Wwrite-strings", "-Wswitch")
incflags = Syntax.Include("$WORKSPACE/baidu/bcloud")
tag = Syntax.Sources("hello.cpp", cpptags, cxxtags, ctags, incflags)
src = tag.V()[0]
Source.Source.Action(src)
self.assertEqual(src.cppflags, ["-DDEBUG_LOCAL"])
self.assertEqual(src.cxxflags, ["-Wwrite-strings"])
self.assertEqual(src.cflags, ["-Wwrite-strings"])
self.assertEqual(src.includes, [".", "broc_out", "baidu/bcloud"])
self.assertEqual(src.infile, "baidu/broc/hello.cpp")
def test_CreateSources(self):
"""
test Syntax._CreateSource
"""
#init env global flags
self._env._g_cppflags = SyntaxTag.TagCPPFLAGS()
self._env._g_cflags = SyntaxTag.TagCFLAGS()
self._env._g_cxxflags = SyntaxTag.TagCXXFLAGS()
self._env._g_incflags = SyntaxTag.TagINCLUDE()
self._env._g_incflags.AddV('. broc_out')
self._env._build_mode = 'debug'
#set local flags tag
cpptags = Syntax.CppFlags("-DDEBUG_LOCAL", "-DRELEASE_LOCAL")
cxxtags = Syntax.CxxFlags("-Wwrite-strings", "-Wswitch")
ctags = Syntax.CFlags("-Wwrite-strings", "-Wswitch")
incflag = Syntax.Include("$WORKSPACE/baidu/bcloud")
#no flags
src = Syntax._CreateSources("baidu/broc/hello.cpp", [])
Source.Source.Action(src)
self.assertEqual(src.cppflags, [])
self.assertEqual(src.cxxflags, [])
self.assertEqual(src.cflags, [])
self.assertEqual(src.includes, [".", "broc_out"])
self.assertEqual(src.infile, "baidu/broc/hello.cpp")
#only local flags
src = Syntax._CreateSources("baidu/broc/hello.cpp", \
[cpptags, cxxtags, ctags, incflag])
Source.Source.Action(src)
self.assertEqual(src.cppflags, ["-DDEBUG_LOCAL"])
self.assertEqual(src.cxxflags, ["-Wwrite-strings"])
self.assertEqual(src.cflags, ["-Wwrite-strings"])
self.assertEqual(src.includes, [".", "broc_out", "baidu/bcloud"])
#only global flags
Syntax.CFLAGS("-Werror -O2", "-W")
Syntax.CXXFLAGS("-Werror -O2", "-W")
Syntax.CPPFLAGS("-DDEBUG", "-DRELEASE")
Syntax.INCLUDE("$WORKSPACE/baidu/broc")
src = Syntax._CreateSources("baidu/broc/hello.cpp", [])
Source.Source.Action(src)
self.assertEqual(src.cppflags, ["-DDEBUG"])
self.assertEqual(src.cxxflags, ["-Werror -O2"])
self.assertEqual(src.cflags, ["-Werror -O2"])
self.assertEqual(src.includes, [".", "broc_out", "baidu/broc"])
self.assertEqual(src.infile, "baidu/broc/hello.cpp")
#more value of global flags
Syntax.CFLAGS("-Wall", "-Wall")
Syntax.CXXFLAGS("-Wall", "-Wall")
Syntax.CPPFLAGS("-DBROC", "-DBROC")
src = Syntax._CreateSources("baidu/broc/hello.cpp", [])
Source.Source.Action(src)
self.assertEqual(src.cppflags, ["-DDEBUG", "-DBROC"])
self.assertEqual(src.cxxflags, ["-Werror -O2", "-Wall"])
self.assertEqual(src.cflags, ["-Werror -O2", "-Wall"])
self.assertEqual(src.includes, [".", "broc_out", "baidu/broc"])
self.assertEqual(src.infile, "baidu/broc/hello.cpp")
#local flags cover golbal flags
src = Syntax._CreateSources("baidu/broc/hello.cpp", [cpptags, cxxtags])
Source.Source.Action(src)
self.assertEqual(src.cppflags, ["-DDEBUG_LOCAL"])
self.assertEqual(src.cxxflags, ["-Wwrite-strings"])
self.assertEqual(src.cflags, ["-Werror -O2", "-Wall"])
self.assertEqual(src.includes, [".", "broc_out", "baidu/broc"])
self.assertEqual(src.infile, "baidu/broc/hello.cpp")
def test_APPLICATION(self):
"""
test Syntax.APPLICATION
"""
#set local flags tag
ldflag = Syntax.LDFlags("-lpthread", "-lcrypto")
libs = Syntax.Libs("$OUT_ROOT/baidu/broc/libhello.a")
cpptags = Syntax.CppFlags("-DBROC", "-DRELEASE")
#set global flags
Syntax.LDFLAGS("-lmcpack", "-lrt")
src = Syntax.Sources("hello.cpp")
#an error name of application
flag = False
try:
Syntax.APPLICATION("^*^&*!*$^", src)
except Syntax.BrocArgumentIllegalError as ex:
flag = True
self.assertTrue(flag)
#an error args of application
flag = False
try:
Syntax.APPLICATION("hello", src, cpptags)
except Syntax.BrocArgumentIllegalError as ex:
flag = True
self.assertTrue(flag)
#global ldflags
Syntax.APPLICATION("hello", src)
app = self._env.Targets()[0]
app.Action()
self.assertEqual(app.link_options, ["-lmcpack"])
self.assertEqual(app.tag_libs.V(), [])
#two samename target
flag = False
try:
Syntax.APPLICATION("hello", src)
except Syntax.BrocArgumentIllegalError as ex:
flag = True
self.assertTrue(flag)
#local ldflags
Syntax.APPLICATION("hello2", src, ldflag)
app = self._env.Targets()[1]
app.Action()
self.assertEqual(app.link_options, ["-lpthread"])
self.assertEqual(app.tag_libs.V(), [])
#Libs
Syntax.APPLICATION("hello3", src, ldflag, libs)
app = self._env.Targets()[2]
app.Action()
self.assertEqual(app.link_options, ["-lpthread"])
self.assertEqual(app.tag_libs.V(), ["broc_out/baidu/broc/libhello.a"])
def test_STATIC_LIBRARY(self):
"""
test Syntax.STATIC_LIBRARY
"""
#set local flags tag
libs = Syntax.Libs("$OUT_ROOT/baidu/broc/libhello.a")
cpptags = Syntax.CppFlags("-DBROC", "-DRELEASE")
src = Syntax.Sources("hello.cpp")
#an error name of application
flag = False
try:
Syntax.STATIC_LIBRARY("^*^&*!*$^", src)
except Syntax.BrocArgumentIllegalError as ex:
flag = True
self.assertTrue(flag)
#an error args of application
flag = False
try:
Syntax.STATIC_LIBRARY("hello", src, cpptags)
except Syntax.BrocArgumentIllegalError as ex:
flag = True
self.assertTrue(flag)
#Libs
Syntax.STATIC_LIBRARY("hello", src, libs)
library = self._env.Targets()[0]
library.Action()
self.assertEqual(library.tag_libs.V(), ["broc_out/baidu/broc/libhello.a"])
#two samename target
flag = False
try:
Syntax.STATIC_LIBRARY("hello", src)
except Syntax.BrocArgumentIllegalError as ex:
flag = True
self.assertTrue(flag)
#library DoCopy
Function.RunCommand("mkdir -p %s/lib" % self._module.root_path, \
ignore_stderr_when_ok = True)
Function.RunCommand("touch %s/lib/libhello1.a" % self._module.root_path, \
ignore_stderr_when_ok = True)
now_dir = os.getcwd()
os.chdir(self._module.workspace)
Syntax.STATIC_LIBRARY("hello1")
lib_paths = os.path.join(self._module.workspace, "broc_out", \
self._module.module_cvspath, "output/lib/libhello1.a")
self.assertTrue(os.path.exists(lib_paths))
os.chdir(now_dir)
def test_UT_APPLICATION(self):
"""
test Syntax.UT_APPLICATION
"""
#set local flags tag
ldflag = Syntax.LDFlags("-lpthread", "-lcrypto")
libs = Syntax.Libs("$OUT_ROOT/baidu/broc/libhello.a")
cpptags = Syntax.CppFlags("-DBROC", "-DRELEASE")
utargs = Syntax.UTArgs("--log=a.log --conf=a.conf")
#set global flags
Syntax.LDFLAGS("-lmcpack", "-lrt")
src = Syntax.Sources("hello.cpp")
#an error name of utapplication
flag = False
try:
Syntax.UT_APPLICATION("^*^&*!*$^", src)
except Syntax.BrocArgumentIllegalError as ex:
flag = True
self.assertTrue(flag)
#an error args of utapplication
flag = False
try:
Syntax.UT_APPLICATION("hello", src, cpptags)
except Syntax.BrocArgumentIllegalError as ex:
flag = True
self.assertTrue(flag)
#global ldflags
Syntax.UT_APPLICATION("hello", src)
utapp = self._env.Targets()[0]
utapp.Action()
self.assertEqual(utapp.link_options, ["-lmcpack"])
self.assertEqual(utapp.tag_libs.V(), [])
self.assertEqual(utapp._ut_args, [])
#two samename target
flag = False
try:
Syntax.UT_APPLICATION("hello", src)
except Syntax.BrocArgumentIllegalError as ex:
flag = True
self.assertTrue(flag)
#local ldflags
Syntax.UT_APPLICATION("hello2", src, ldflag)
utapp = self._env.Targets()[1]
utapp.Action()
self.assertEqual(utapp.link_options, ["-lpthread"])
self.assertEqual(utapp.tag_libs.V(), [])
self.assertEqual(utapp._ut_args, [])
#Libs
Syntax.UT_APPLICATION("hello3", src, ldflag, libs)
utapp = self._env.Targets()[2]
utapp.Action()
self.assertEqual(utapp.link_options, ["-lpthread"])
self.assertEqual(utapp.tag_libs.V(), ["broc_out/baidu/broc/libhello.a"])
self.assertEqual(utapp._ut_args, [])
#UTArgs
Syntax.UT_APPLICATION("hello4", src, ldflag, libs, utargs)
utapp = self._env.Targets()[3]
utapp.Action()
self.assertEqual(utapp.link_options, ["-lpthread"])
self.assertEqual(utapp.tag_libs.V(), ["broc_out/baidu/broc/libhello.a"])
self.assertEqual(utapp._ut_args, ["--log=a.log", "--conf=a.conf"])
def test_PROTO_LIBRARY(self):
"""
test Syntax.PROTO_LIBRARY
"""
#make a new proto file
Function.RunCommand("touch %s/hello.proto" % self._module.root_path, \
ignore_stderr_when_ok = True)
#set local flags
cpptags = Syntax.CppFlags("-DDEBUG_LOCAL", "-DRELEASE_LOCAL")
cxxtags = Syntax.CxxFlags("-Wwrite-strings", "-Wswitch")
incflag = Syntax.Include("")
libflag = Syntax.Libs("$OUT_ROOT/baidu/broc/output/lib/libhello.a")
now_dir = os.getcwd()
os.chdir(self._module.workspace)
protos = Syntax.PROTO_LIBRARY("hello", "*.proto", cpptags, cxxtags, incflag, libflag)
proto_library = self._env.Targets()[0]
src = proto_library.tag_sources.V()[0]
proto_library.Action()
os.chdir(now_dir)
#check result
proto_cmd = """mkdir -p broc_out/baidu/broc && protoc \
--cpp_out=broc_out/baidu/broc -I=baidu/broc \
-I=. baidu/broc/*.proto\n"""
self.assertEqual(' '.join(protos.__str__().split()), ' '.join(proto_cmd.split()))
self.assertEqual(src.cppflags, ["-DDEBUG_LOCAL"])
self.assertEqual(src.cxxflags, ["-Wwrite-strings"])
self.assertEqual(src.includes, [".", "broc_out", 'baidu/broc',
u'broc_out/baidu/broc'])
self.assertEqual(src.infile, "broc_out/baidu/broc/hello.pb.cc")
self.assertEqual(proto_library.tag_libs.V(), \
["broc_out/baidu/broc/output/lib/libhello.a"])
def test_UTArgs(self):
"""
test Syntax.UTArgs
"""
tag = Syntax.UTArgs("--conf=a.conf --log=a.log")
self.assertTrue("--conf=a.conf" in tag.V())
self.assertTrue("--log=a.log" in tag.V())
self.assertTrue("--help" not in tag.V())
def test_PUBLISH(self):
"""
test Syntax.PUBLISH
"""
#src has one file
Syntax.PUBLISH("conf/a.conf", "$OUT/conf")
dst = os.path.join(self._env.OutputPath(), "conf")
src = os.path.join(self._module.module_cvspath, "conf/a.conf")
self.assertTrue("mkdir -p %s && cp -rf %s %s" % (dst, src, dst))
#src has more files
Syntax.PUBLISH("conf/a1.conf conf/a2.conf", "$OUT/conf")
dst = os.path.join(self._env.OutputPath(), "conf")
for s in "conf/a1.conf conf/a2.conf".split(' '):
src = os.path.join(self._module.module_cvspath, s)
self.assertTrue("mkdir -p %s && cp -rf %s %s" % (dst, src, dst))
#out_dir doesn't start with $OUT
flag = False
try:
Syntax.PUBLISH("conf/a3.conf", "conf")
except Syntax.BrocArgumentIllegalError as e:
flag = True
self.assertTrue(flag)
#src doesn't in self module
flag = False
try:
Syntax.PUBLISH("../../conf/a3.conf", "$OUT/conf")
except Syntax.NotInSelfModuleError as e:
flag = True
self.assertTrue(flag)
def test_SVN_PATH(self):
"""
test Syntax.SVN_PATH
"""
self.assertEqual(self._module.root_path, Syntax.SVN_PATH())
def test_SVN_URL(self):
"""
test Syntax.SVN_URL
"""
self.assertEqual(self._module.url, Syntax.SVN_URL())
def test_SVN_REVISION(self):
"""
test Syntax.SVN_REVISION
"""
self.assertEqual(self._module.revision, Syntax.SVN_REVISION())
def test_SVN_LAST_CHANGED_REV(self):
"""
test Syntax.LAST_CHANGED_REV
"""
self.assertEqual(self._module.last_changed_rev, Syntax.SVN_LAST_CHANGED_REV())
def test_GIT_PATH(self):
"""
test Syntax.GIT_PATH
"""
self.assertEqual(self._module.root_path, Syntax.GIT_PATH())
def test_GIT_URL(self):
"""
test Syntax.
"""
self.assertEqual(self._module.url, Syntax.GIT_URL())
def test_GIT_BRANCH(self):
"""
test Syntax.GIT_BRANCH
"""
self.assertEqual(self._module.br_name, Syntax.GIT_BRANCH())
def test_GIT_TAG(self):
"""
test Syntax.GIT_TAG
"""
self.assertEqual(self._module.tag_name, Syntax.GIT_TAG())
if __name__ == "__main__":
unittest.main()
|
segmentation_models_pytorch/fpn/__init__.py | yida2311/segmentation_models.pytorch | 5,325 | 11085200 | from .model import FPN |
tests/test_errors.py | muhammadvellani/Adafruit_IO_Python | 136 | 11085251 | # Copyright (c) 2014 Adafruit Industries
# Author: <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import unittest
from Adafruit_IO import Client, RequestError, ThrottlingError
import base
class TestErrors(base.IOTestCase):
def test_request_error_from_bad_key(self):
io = Client("test_user", "this is a bad key from a test")
with self.assertRaises(RequestError):
io.send("TestStream", 42)
@unittest.skip("Throttling test must be run in isolation to prevent other tests from failing.")
def test_throttling_error_after_6_requests_in_short_period(self):
io = Client(self.get_test_key())
with self.assertRaises(ThrottlingError):
for i in range(6):
io.send("TestStream", 42)
time.sleep(0.1) # Small delay to keep from hammering network.
|
testing/scripts/rust/rust_main_program_unittests.py | chromium/chromium | 14,668 | 11085282 | <gh_stars>1000+
#!/usr/bin/env vpython3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from pyfakefs import fake_filesystem_unittest
import tempfile
import unittest
from test_results import TestResult
from rust_main_program import _get_exe_specific_tests
from rust_main_program import _scrape_test_list
from rust_main_program import _scrape_test_results
from rust_main_program import _parse_args
from rust_main_program import _TestExecutableWrapper
class Tests(fake_filesystem_unittest.TestCase):
def test_scrape_test_list(self):
test_input = """
test_foo: test
test_bar: test
""".strip()
actual_results = _scrape_test_list(test_input, 'test_exe_name')
expected_results = ['test_exe_name/test_foo', 'test_exe_name/test_bar']
self.assertEqual(actual_results, expected_results)
def test_scrape_test_results(self):
test_input = """
running 3 tests
test test_foo ... ok
test test_bar ... ok
test test_foobar ... FAILED
failures:
---- test_foobar stdout ----
thread 'test_foobar' panicked at 'assertion failed: `(left == right)`
left: `7`,
right: `124`', ../../build/rust/tests/test_rust_source_set/src/lib.rs:29:5
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
failures:
test_foobar
test result: FAILED. 2 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s
""".strip()
list_of_expected_test_names = ['test_foo', 'test_bar', 'test_foobar']
actual_results = _scrape_test_results(test_input, 'test_exe_name',
list_of_expected_test_names)
expected_results = [
TestResult('test_exe_name/test_foo', 'PASS'),
TestResult('test_exe_name/test_bar', 'PASS'),
TestResult('test_exe_name/test_foobar', 'FAILED')
]
self.assertEqual(actual_results, expected_results)
def test_parse_args(self):
args = _parse_args(['--rust-test-executable=foo'])
self.assertEqual(['foo'], args.rust_test_executables)
args = _parse_args(
['--rust-test-executable=foo', '--rust-test-executable=bar'])
self.assertEqual(['foo', 'bar'], args.rust_test_executables)
def test_get_exe_specific_tests(self):
result = _get_exe_specific_tests(
"exe_name", ["exe_name/foo1", "exe_name/foo2", "other_exe/foo3"])
self.assertEqual(['foo1', 'foo2'], result)
def test_executable_wrapper_basic_construction(self):
with tempfile.TemporaryDirectory() as tmpdirname:
exe_filename = 'foo-bar.exe'
exe_path = os.path.join(tmpdirname, exe_filename)
with open(exe_path, 'w'):
pass
t = _TestExecutableWrapper(exe_path)
self.assertEqual('foo-bar', t._name_of_test_executable)
def test_executable_wrapper_missing_file(self):
with self.assertRaises(ValueError):
_TestExecutableWrapper('no-such-file.exe')
if __name__ == '__main__':
unittest.main()
|
db/regex.py | bigblue/pynab | 161 | 11085283 | additions = [
# start above 200k for pynab regex
{
'id': 200000,
'group_name': '.*',
'regex': '/^trtk\d{4,8} (?:repost )?(?:- )?\[\d{1,5}\/\d{1,5}\] - "(?P<name>.+?)\.(?:nzb|vol\d+(?:\+\d+){1,}?\.par2|part\d+\.rar|par2|r\d{1,})" yEnc/i',
'ordinal': 1,
'status': True,
'description': 'music releases'
}
]
nn_replacements = {
'677': {
'id': 677,
'regex': '/^.*?\"(?P<name>.*?)\.(pdb|htm|prc|lit|epub|lrf|txt|pdf|rtf|doc|chf|chn|mobi|chm|doc|par|rar|sfv|nfo|nzb|srt|ass|txt|zip|ssa|r\d{1,3}|7z|tar|idx|t\d{1,2}|u\d{1,3})/i',
'status': True,
'ordinal': 9,
'group_name': 'alt.binaries.e-book.flood'
},
'678': {
'id': 678,
'regex': '/^.*?\"(?P<name>.*?)\.(pdb|htm|prc|lit|epub|lrf|txt|pdf|rtf|doc|chf|chn|mobi|chm|doc|par|rar|sfv|nfo|nzb|srt|ass|txt|zip|ssa|r\d{1,3}|7z|tar|idx|t\d{1,2}|u\d{1,3})/i',
'status': True,
'ordinal': 9,
'group_name': 'alt.binaries.e-book'
},
'679': {
'id': 679,
'regex': '/^.*?\"(?P<name>.*?)\.(pdb|htm|prc|lit|epub|lrf|txt|pdf|rtf|doc|chf|chn|mobi|chm|doc|par|rar|sfv|nfo|nzb|srt|ass|txt|zip|ssa|r\d{1,3}|7z|tar|idx|t\d{1,2}|u\d{1,3})/i',
'status': True,
'ordinal': 9,
'group_name': 'alt.binaries.ebook'
},
'680': {
'id': 680,
'regex': '/^.*?\"(?P<name>.*?)\.(pdb|htm|prc|lit|epub|lrf|txt|pdf|rtf|doc|chf|chn|mobi|chm|doc|par|rar|sfv|nfo|nzb|srt|ass|txt|zip|ssa|r\d{1,3}|7z|tar|idx|t\d{1,2}|u\d{1,3})/i',
'status': True,
'ordinal': 9,
'group_name': 'alt.binaries.e-book.technical'
},
'682': {
'id': 682,
'regex': '/^.*?\"(?P<name>.*?)\.(pdb|htm|prc|lit|epub|lrf|txt|pdf|rtf|doc|chf|chn|mobi|chm|doc|par|rar|sfv|nfo|nzb|srt|ass|txt|zip|ssa|r\d{1,3}|7z|tar|idx|t\d{1,2}|u\d{1,3})/i',
'status': False,
'ordinal': 9,
'group_name': 'alt.binaries.ebook.flood'
},
}
nzedb_replacements = {
'1030': {
'id': 1030,
'regex': '/^\(\?+\)[-_\s]{0,3}[\(\[]\d+\/\d+[\]\)][-_\s]{0,3}"(?P<match0>.+?([-_](proof|sample|thumbs?))*(\.part\d*(\.rar)?|\.rar)?(\d{1,3}\.rev|\.vol.+?|\.[A-Za-z0-9]{2,4}).)"[-_\s]{0,3}yEnc$/i',
'status': True,
'ordinal': 65,
'group_name': 'alt.binaries.tv'
}
} |
setup.py | daniellawrence/graphitesend | 122 | 11085289 | <filename>setup.py
#!/usr/bin/env python
from distutils.core import setup
setup(
name='graphitesend',
version='0.10.0',
description='A simple interface for sending metrics to Graphite',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/daniellawrence/graphitesend',
packages=['graphitesend'],
long_description="https://github.com/daniellawrence/graphitesend",
entry_points={
'console_scripts': [
'graphitesend = graphitesend.graphitesend:cli',
],
},
extras_require={
'asynchronous': ['gevent>=1.0.0'],
}
)
|
dl_lib/network/__init__.py | PeizeSun/CenterNet-better | 543 | 11085296 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
from .backbone import Backbone, ResnetBackbone
from .centernet import CenterNet
from .head import CenternetDeconv, CenternetHead
from .loss.reg_l1_loss import reg_l1_loss
|
lumopt/lumerical_methods/lumerical_scripts.py | jbellevi/lumopt | 101 | 11085298 | <reponame>jbellevi/lumopt
""" Copyright chriskeraly
Copyright (c) 2019 Lumerical Inc. """
import numpy as np
import scipy as sp
import scipy.constants
import lumapi
from lumopt.utilities.fields import Fields, FieldsNoInterp
def get_fields_on_cad(fdtd, monitor_name, field_result_name, get_eps, get_D, get_H, nointerpolation, unfold_symmetry = True):
unfold_symmetry_string = "true" if unfold_symmetry else "false"
fdtd.eval("options=struct; options.unfold={0};".format(unfold_symmetry_string) +
"{0} = struct;".format(field_result_name) +
"{0}.E = getresult('{1}','E',options);".format(field_result_name, monitor_name))
if get_eps or get_D:
index_monitor_name = monitor_name + '_index'
fdtd.eval("{0}.index = getresult('{1}','index',options);".format(field_result_name, index_monitor_name))
if get_H:
fdtd.eval("{0}.H = getresult('{1}','H',options);".format(field_result_name, monitor_name))
if nointerpolation:
fdtd.eval("{0}.delta = struct;".format(field_result_name) +
"{0}.delta.x = getresult('{1}','delta_x',options);".format(field_result_name, monitor_name) +
"{0}.delta.y = getresult('{1}','delta_y',options);".format(field_result_name, monitor_name))
monitor_dimension = fdtd.getresult(monitor_name, 'dimension')
if monitor_dimension == 3:
fdtd.eval("{0}.delta.z = getdata('{1}','delta_z');".format(field_result_name, monitor_name))
else:
fdtd.eval("{0}.delta.z = 0.0;".format(field_result_name))
def get_fields(fdtd, monitor_name, field_result_name, get_eps, get_D, get_H, nointerpolation, unfold_symmetry = True, on_cad_only = False):
get_fields_on_cad(fdtd, monitor_name, field_result_name, get_eps, get_D, get_H, nointerpolation, unfold_symmetry)
## If required, we now transfer the field data to Python and package it up
if not on_cad_only:
fields_dict = lumapi.getVar(fdtd.handle, field_result_name)
if get_eps:
if fdtd.getnamednumber('varFDTD') == 1:
if 'index_x' in fields_dict['index'] and 'index_y' in fields_dict['index'] and not 'index_z' in fields_dict['index']: # varFDTD TE simulation
fields_dict['index']['index_z'] = fields_dict['index']['index_x']*0.0 + 1.0
elif not 'index_x' in fields_dict['index'] and not 'index_y' in fields_dict['index'] and 'index_z' in fields_dict['index']: # varFDTD TM simulation
fields_dict['index']['index_x'] = fields_dict['index']['index_z']*0.0 + 1.0
fields_dict['index']['index_y'] = fields_dict['index']['index_x']
assert 'index_x' in fields_dict['index'] and 'index_y' in fields_dict['index'] and 'index_z' in fields_dict['index']
fields_eps = np.stack((np.power(fields_dict['index']['index_x'], 2),
np.power(fields_dict['index']['index_y'], 2),
np.power(fields_dict['index']['index_z'], 2)),
axis = -1)
else:
fields_eps = None
fields_D = fields_dict['E']['E'] * fields_eps * sp.constants.epsilon_0 if get_D else None
fields_H = fields_dict['H']['H'] if get_H else None
if nointerpolation:
deltas = [fields_dict['delta']['x'], fields_dict['delta']['y'], fields_dict['delta']['z']]
return FieldsNoInterp(fields_dict['E']['x'], fields_dict['E']['y'], fields_dict['E']['z'], fields_dict['E']['lambda'], deltas, fields_dict['E']['E'], fields_D, fields_eps, fields_H)
else:
return Fields(fields_dict['E']['x'], fields_dict['E']['y'], fields_dict['E']['z'], fields_dict['E']['lambda'], fields_dict['E']['E'], fields_D, fields_eps, fields_H)
def set_spatial_interp(fdtd,monitor_name,setting):
script='select("{}");set("spatial interpolation","{}");'.format(monitor_name,setting)
fdtd.eval(script)
def get_eps_from_sim(fdtd, monitor_name = 'opt_fields', unfold_symmetry = True):
index_monitor_name = monitor_name + '_index'
unfold_symmetry_string = "true" if unfold_symmetry else "false"
fdtd.eval(('options=struct; options.unfold={0};'
'{1}_result = getresult("{1}","index",options);'
'{1}_eps_x = ({1}_result.index_x)^2;'
'{1}_eps_y = ({1}_result.index_y)^2;'
'{1}_eps_z = ({1}_result.index_z)^2;'
'{1}_x = {1}_result.x;'
'{1}_y = {1}_result.y;'
'{1}_z = {1}_result.z;'
'{1}_lambda = {1}_result.lambda;'
).format(unfold_symmetry_string, index_monitor_name))
fields_eps_x = fdtd.getv('{0}_eps_x'.format(index_monitor_name)) # np.power(index_dict['index_x'], 2)
fields_eps_y = fdtd.getv('{0}_eps_y'.format(index_monitor_name)) # np.power(index_dict['index_y'], 2)
fields_eps_z = fdtd.getv('{0}_eps_z'.format(index_monitor_name)) # np.power(index_dict['index_z'], 2)
index_monitor_x = fdtd.getv('{0}_x'.format(index_monitor_name)) # index_dict['x']
index_monitor_y = fdtd.getv('{0}_y'.format(index_monitor_name)) # index_dict['y']
index_monitor_z = fdtd.getv('{0}_z'.format(index_monitor_name)) # index_dict['z']
index_monitor_lambda = fdtd.getv('{0}_lambda'.format(index_monitor_name)) # index_dict['lambda']
# index_dict = fdtd.getresult(index_monitor_name, 'index') #< Currently does not work with unfolding options
# fields_eps_x = np.power(index_dict['index_x'], 2)
# fields_eps_y = np.power(index_dict['index_y'], 2)
# fields_eps_z = np.power(index_dict['index_z'], 2)
# index_monitor_x = index_dict['x']
# index_monitor_y = index_dict['y']
# index_monitor_z = index_dict['z']
# index_monitor_lambda = index_dict['lambda']
fields_eps = np.stack((fields_eps_x, fields_eps_y, fields_eps_z), axis = -1)
return fields_eps, index_monitor_x,index_monitor_y,index_monitor_z, index_monitor_lambda
|
saleor/graphql/core/filters.py | siyoola/saleor | 1,392 | 11085316 | <reponame>siyoola/saleor<gh_stars>1000+
import django_filters
import graphene
from django.core.exceptions import ValidationError
from django.forms import CharField, Field, MultipleChoiceField
from django_filters import Filter, MultipleChoiceFilter
from graphql_relay import from_global_id
from ..utils.filters import filter_range_field
from .enums import JobStatusEnum
from .types import DateTimeRangeInput
class DefaultMultipleChoiceField(MultipleChoiceField):
default_error_messages = {"invalid_list": "Enter a list of values."}
def to_python(self, value):
if not value:
return []
if not isinstance(value, list):
value = [value]
return value
def validate(self, value):
"""Validate that the input is a list or tuple."""
if self.required and not value:
raise ValidationError(self.error_messages["required"], code="required")
if not isinstance(value, (list, tuple)):
raise ValidationError(
self.error_messages["invalid_list"], code="invalid_list"
)
return True
class EnumFilter(django_filters.CharFilter):
"""Filter class for Graphene enum object.
enum_class needs to be passed explicitly as well as the method.
"""
def __init__(self, input_class, *args, **kwargs):
assert kwargs.get(
"method"
), "Providing exact filter method is required for EnumFilter"
self.input_class = input_class
super().__init__(*args, **kwargs)
class ListObjectTypeFilter(django_filters.MultipleChoiceFilter):
field_class = DefaultMultipleChoiceField
def __init__(self, input_class, *args, **kwargs):
self.input_class = input_class
super().__init__(*args, **kwargs)
class ObjectTypeFilter(django_filters.Filter):
def __init__(self, input_class, *args, **kwargs):
self.input_class = input_class
super().__init__(*args, **kwargs)
def filter_created_at(qs, _, value):
return filter_range_field(qs, "created_at", value)
def filter_updated_at(qs, _, value):
return filter_range_field(qs, "updated_at", value)
def filter_status(qs, _, value):
if not value:
return qs
return qs.filter(status=value)
def filter_metadata(qs, _, value):
for metadata_item in value:
if metadata_item.value:
qs = qs.filter(metadata__contains={metadata_item.key: metadata_item.value})
else:
qs = qs.filter(metadata__has_key=metadata_item.key)
return qs
class BaseJobFilter(django_filters.FilterSet):
created_at = ObjectTypeFilter(
input_class=DateTimeRangeInput, method=filter_created_at
)
updated_at = ObjectTypeFilter(
input_class=DateTimeRangeInput, method=filter_updated_at
)
status = EnumFilter(input_class=JobStatusEnum, method=filter_status)
class MetadataFilter(graphene.InputObjectType):
key = graphene.String(required=True, description="Key of a metadata item.")
value = graphene.String(required=False, description="Value of a metadata item.")
class MetadataFilterBase(django_filters.FilterSet):
metadata = ListObjectTypeFilter(input_class=MetadataFilter, method=filter_metadata)
class Meta:
abstract = True
class GlobalIDFormField(Field):
default_error_messages = {"invalid": "Invalid ID specified."}
def clean(self, value):
if not value and not self.required:
return None
try:
_type, _id = from_global_id(value)
except (TypeError, ValueError):
raise ValidationError(self.error_messages["invalid"])
try:
CharField().clean(_id)
CharField().clean(_type)
except ValidationError:
raise ValidationError(self.error_messages["invalid"])
return value
class GlobalIDFilter(Filter):
field_class = GlobalIDFormField
def filter(self, qs, value):
"""Convert the filter value to a primary key before filtering."""
_id = None
if value is not None:
_, _id = from_global_id(value)
return super(GlobalIDFilter, self).filter(qs, _id)
class GlobalIDMultipleChoiceField(MultipleChoiceField):
default_error_messages = {
"invalid_choice": "One of the specified IDs was invalid (%(value)s).",
"invalid_list": "Enter a list of values.",
}
def to_python(self, value):
return super().to_python(value)
def valid_value(self, value):
# Clean will raise a validation error if there is a problem
GlobalIDFormField().clean(value)
return True
class GlobalIDMultipleChoiceFilter(MultipleChoiceFilter):
field_class = GlobalIDMultipleChoiceField
def filter(self, qs, value):
gids = [from_global_id(v)[1] for v in value]
return super(GlobalIDMultipleChoiceFilter, self).filter(qs, gids)
|
env/lib/python3.6/site-packages/xlwt/__init__.py | anthowen/duplify | 5,079 | 11085331 | <filename>env/lib/python3.6/site-packages/xlwt/__init__.py
__VERSION__ = '1.3.0'
from .Workbook import Workbook
from .Worksheet import Worksheet
from .Row import Row
from .Column import Column
from .Formatting import Font, Alignment, Borders, Pattern, Protection
from .Style import XFStyle, easyxf, easyfont, add_palette_colour
from .ExcelFormula import *
|
syplatform/platform.py | larrycameron80/outis | 129 | 11085339 | <filename>syplatform/platform.py
class Platform:
""" Platform: abstract platform of the agent """
# noinspection PyUnusedLocal
def __init__(self, handler):
raise NotImplementedError("Platform.init should have been implemented by platform module")
def isstaged(self):
"""
is the current platform set staged or not? overwrite to handle stages
"""
return False
def getstager(self):
raise NotImplementedError("Platform.getstager should have been implemented by platform module")
def getagent(self):
raise NotImplementedError("Platform.getagent should have been implemented by platform module")
|
sahara/tests/unit/service/edp/data_sources/swift/test_swift_type.py | openstack/sahara | 161 | 11085343 | # Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from unittest import mock
from oslo_utils import uuidutils
import testtools
import sahara.exceptions as ex
from sahara.service.edp.data_sources.swift.implementation import SwiftType
from sahara.service.edp import job_utils
from sahara.swift import utils as su
from sahara.tests.unit import base
from sahara.tests.unit.service.edp import edp_test_utils as u
from sahara.utils.types import FrozenDict
SAMPLE_SWIFT_URL = "swift://1234/object"
SAMPLE_SWIFT_URL_WITH_SUFFIX = "swift://1234%s/object" % su.SWIFT_URL_SUFFIX
class TestSwiftTypeValidation(base.SaharaTestCase):
def setUp(self):
super(TestSwiftTypeValidation, self).setUp()
self.s_type = SwiftType()
@mock.patch('sahara.context.ctx')
def test_prepare_cluster(self, ctx):
ctx.return_value = 'dummy'
ds_url = "swift://container/input"
ds = u.create_data_source(ds_url,
name="data_source",
id=uuidutils.generate_uuid())
job_configs = {
'configs': {
job_utils.DATA_SOURCE_SUBST_NAME: True,
job_utils.DATA_SOURCE_SUBST_UUID: True
}
}
old_configs = copy.deepcopy(job_configs)
self.s_type.prepare_cluster(ds, u.create_cluster(),
job_configs=job_configs)
# Swift configs should be filled in since they were blank
self.assertEqual(ds.credentials['user'],
job_configs['configs']
['fs.swift.service.sahara.username'])
self.assertEqual(ds.credentials['password'],
job_configs['configs']
['fs.swift.service.sahara.password'])
self.assertNotEqual(old_configs, job_configs)
job_configs['configs'] = {'fs.swift.service.sahara.username': 'sam',
'fs.swift.service.sahara.password': '<PASSWORD>',
job_utils.DATA_SOURCE_SUBST_NAME: False,
job_utils.DATA_SOURCE_SUBST_UUID: True}
old_configs = copy.deepcopy(job_configs)
self.s_type.prepare_cluster(ds, u.create_cluster(),
job_configs=job_configs)
# Swift configs should not be overwritten
self.assertEqual(old_configs['configs'], job_configs['configs'])
job_configs['configs'] = {job_utils.DATA_SOURCE_SUBST_NAME: True,
job_utils.DATA_SOURCE_SUBST_UUID: False}
job_configs['proxy_configs'] = {'proxy_username': 'john',
'proxy_password': '<PASSWORD>',
'proxy_trust_id': 'trustme'}
old_configs = copy.deepcopy(job_configs)
self.s_type.prepare_cluster(ds, u.create_cluster(),
job_configs=job_configs)
# Swift configs should be empty and proxy configs should be preserved
self.assertEqual(old_configs['configs'], job_configs['configs'])
self.assertEqual(old_configs['proxy_configs'],
job_configs['proxy_configs'])
# If there's no configs do nothing
job_configs['configs'] = None
old_configs = copy.deepcopy(job_configs)
self.s_type.prepare_cluster(ds, u.create_cluster(),
job_configs=job_configs)
self.assertEqual(old_configs, job_configs)
# If it's a FrozenDict do nothing
job_configs = {
'configs': {
job_utils.DATA_SOURCE_SUBST_NAME: True,
job_utils.DATA_SOURCE_SUBST_UUID: True
}
}
old_configs = copy.deepcopy(job_configs)
job_configs = FrozenDict(job_configs)
self.s_type.prepare_cluster(ds, u.create_cluster(),
job_configs=job_configs)
self.assertEqual(old_configs, job_configs)
def test_swift_type_validation(self):
data = {
"name": "test_data_data_source",
"url": SAMPLE_SWIFT_URL,
"type": "swift",
"credentials": {
"user": "user",
"password": "password"
},
"description": "long description"
}
self.s_type.validate(data)
def test_swift_type_validation_missing_credentials(self):
data = {
"name": "test_data_data_source",
"url": SAMPLE_SWIFT_URL,
"type": "swift",
"description": "long description"
}
with testtools.ExpectedException(ex.InvalidCredentials):
self.s_type.validate(data)
# proxy enabled should allow creation without credentials
self.override_config('use_domain_for_proxy_users', True)
self.s_type.validate(data)
def test_swift_type_validation_credentials_missing_user(self):
data = {
"name": "test_data_data_source",
"url": SAMPLE_SWIFT_URL,
"type": "swift",
"credentials": {
"password": "password"
},
"description": "long description"
}
with testtools.ExpectedException(ex.InvalidCredentials):
self.s_type.validate(data)
# proxy enabled should allow creation without credentials
self.override_config('use_domain_for_proxy_users', True)
self.s_type.validate(data)
def test_swift_type_validation_credentials_missing_password(self):
data = {
"name": "test_data_data_source",
"url": SAMPLE_SWIFT_URL,
"type": "swift",
"credentials": {
"user": "user",
},
"description": "long description"
}
with testtools.ExpectedException(ex.InvalidCredentials):
self.s_type.validate(data)
# proxy enabled should allow creation without credentials
self.override_config('use_domain_for_proxy_users', True)
self.s_type.validate(data)
def test_swift_type_validation_wrong_schema(self):
data = {
"name": "test_data_data_source",
"url": "swif://1234/object",
"type": "swift",
"description": "incorrect url schema"
}
with testtools.ExpectedException(ex.InvalidDataException):
self.s_type.validate(data)
def test_swift_type_validation_explicit_suffix(self):
data = {
"name": "test_data_data_source",
"url": SAMPLE_SWIFT_URL_WITH_SUFFIX,
"type": "swift",
"description": "incorrect url schema",
"credentials": {
"user": "user",
"password": "password"
}
}
self.s_type.validate(data)
def test_swift_type_validation_wrong_suffix(self):
data = {
"name": "test_data_data_source",
"url": "swift://1234.suffix/object",
"type": "swift",
"description": "incorrect url schema"
}
with testtools.ExpectedException(ex.InvalidDataException):
self.s_type.validate(data)
def test_swift_type_validation_missing_object(self):
data = {
"name": "test_data_data_source",
"url": "swift://1234/",
"type": "swift",
"description": "incorrect url schema"
}
with testtools.ExpectedException(ex.InvalidDataException):
self.s_type.validate(data)
|
kpm/formats/kub.py | ericchiang/kpm | 121 | 11085357 | <reponame>ericchiang/kpm
import logging
import os.path
from collections import OrderedDict
import hashlib
import yaml
import json
import jsonpatch
from kpm.platforms.kubernetes import Kubernetes, get_endpoint
from kpm.utils import colorize, mkdir_p
from kpm.display import print_deploy_result
from kpm.formats.kub_base import KubBase
logger = logging.getLogger(__name__)
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
class Kub(KubBase):
media_type = 'kpm'
platform = "kubernetes"
def _resource_name(self, resource):
return resource.get('name', resource['value']['metadata']['name'])
def _resource_build(self, kub, resource):
self._annotate_resource(kub, resource)
return {
"file":
resource['file'],
"update_mode":
resource.get('update_mode', 'update'),
"hash":
resource['value']['metadata']['annotations'].get('kpm.hash', None),
"protected":
resource['protected'],
"name":
self._resource_name(resource),
"kind":
resource['value']['kind'].lower(),
"endpoint":
get_endpoint(resource['value']['kind'].lower()).format(namespace=self.namespace),
"body":
json.dumps(resource['value'])
}
# @TODO do it in jsonnet
def _annotate_resource(self, kub, resource):
sha = None
if 'annotations' not in resource['value']['metadata']:
resource['value']['metadata']['annotations'] = {}
if resource.get('hash', True):
sha = hashlib.sha256(json.dumps(resource['value'])).hexdigest()
resource['value']['metadata']['annotations']['kpm.hash'] = sha
annotation = resource['value']['metadata']['annotations']
annotation['kpm.version'] = kub.version
annotation['kpm.package'] = kub.name
annotation['kpm.parent'] = self.name
annotation['kpm.protected'] = str(resource['protected']).lower()
return resource
def _create_namespaces(self):
if self.namespace:
ns = self.create_namespace(self.namespace)
self._resources.insert(0, ns)
def resources(self):
""" Override resources to auto-create namespace"""
if self._resources is None:
self._resources = self.manifest.resources
self._create_namespaces()
return self._resources
def _apply_patches(self, resources):
for _, resource in resources.iteritems():
if self.namespace:
if 'namespace' in resource['value']['metadata']:
op = 'replace'
else:
op = 'add'
resource['patch'].append({
"op": op,
"path": "/metadata/namespace",
"value": self.namespace
})
if len(resource['patch']):
patch = jsonpatch.JsonPatch(resource['patch'])
result = patch.apply(resource['value'])
resource['value'] = result
return resources
@property
def kubClass(self):
return Kub
def create_namespace(self, namespace):
value = {"apiVersion": "v1", "kind": "Namespace", "metadata": {"name": namespace}}
resource = {
"file": "%s-ns.yaml" % namespace,
"name": namespace,
"generated": True,
"order": -1,
"hash": False,
"protected": True,
"update_mode": 'update',
"value": value,
"patch": [],
"variables": {},
"type": "namespace"
}
return resource
def build(self):
result = []
for kub in self.dependencies:
result.append(self._dep_build(kub))
return {"deploy": result, "package": {"name": self.name, "version": self.version}}
def _dep_build(self, kub):
package = {
"package": kub.name,
"version": kub.version,
"namespace": kub.namespace,
"resources": []
}
for resource in kub.resources():
package['resources'].\
append(self._resource_build(kub, resource))
return package
def _process_deploy(self, dry=False, force=False, fmt="txt", proxy=None, action="create",
dest="/tmp/kpm"):
def output_progress(kubsource, status, fmt="text"):
if fmt == 'text':
print " --> %s (%s): %s" % (kubsource.name, kubsource.kind, colorize(status))
dest = os.path.join(dest, self.name, self.version)
mkdir_p(dest)
table = []
results = []
if fmt == "text":
print "%s %s " % (action, self.name)
i = 0
for kub in self.dependencies:
package = self._dep_build(kub)
i += 1
pname = package["package"]
version = package["version"]
namespace = package["namespace"]
if fmt == "text":
print "\n %02d - %s:" % (i, package["package"])
for resource in package["resources"]:
body = resource["body"]
endpoint = resource["endpoint"]
# Use API instead of kubectl
with open(
os.path.join(dest, "%s-%s" % (resource['name'],
resource['file'].replace("/", "_"))),
'wb') as f:
f.write(body)
kubresource = Kubernetes(namespace=namespace, body=body, endpoint=endpoint,
proxy=proxy)
status = getattr(kubresource, action)(force=force, dry=dry, strategy=resource.get(
'update_mode', 'update'))
if fmt == "text":
output_progress(kubresource, status)
result_line = OrderedDict([("package", pname), ("version", version), (
"kind", kubresource.kind), ("dry", dry), ("name", kubresource.name), (
"namespace", kubresource.namespace), ("status", status)])
if status != 'ok' and action == 'create':
kubresource.wait(3)
results.append(result_line)
if fmt == "text":
header = ["package", "version", "kind", "name", "namespace", "status"]
display_line = []
for k in header:
display_line.append(result_line[k])
table.append(display_line)
if fmt == "text":
print_deploy_result(table)
return results
def deploy(self, *args, **kwargs):
kwargs['action'] = 'create'
return self._process_deploy(*args, **kwargs)
def delete(self, *args, **kwargs):
kwargs['action'] = 'delete'
return self._process_deploy(*args, **kwargs)
|
model_repos/CycleMLP/mcloader/__init__.py | gsc2001/ConvexNet | 207 | 11085361 | <reponame>gsc2001/ConvexNet<gh_stars>100-1000
from .classification import ClassificationDataset
from .data_prefetcher import DataPrefetcher
|
__scraping__/fantasy.premierleague.com/main-1.py | whitmans-max/python-examples | 140 | 11085363 |
# author: https://blog.furas.pl
# date: 2020.06.22
# link: https://stackoverflow.com/questions/62521126/table-web-scraping-issues-with-python/
from urllib.request import urlopen
import json
import pandas as pd
#url = 'https://fantasy.premierleague.com/player-list'
url = 'https://fantasy.premierleague.com/api/bootstrap-static/'
text = urlopen(url).read().decode()
data = json.loads(text)
print('\n--- element type ---\n')
#print(data['element_types'][0])
for item in data['element_types']:
print(item['id'], item['plural_name'])
print('\n--- Goalkeepers ---\n')
number = 0
for item in data['elements'][:1]:
if item['element_type'] == 1: # Goalkeepers
number += 1
print('---', number, '---')
print('type :', data['element_types'][item['element_type']-1]['plural_name'])
print('first_name :', item['first_name'])
print('second_name :', item['second_name'])
print('total_points:', item['total_points'])
print('team :', data['teams'][item['team']-1]['name'])
print('cost :', item['now_cost']/10)
if item['first_name'] == 'Alisson':
for key, value in item.items():
print(' ', key, '=',value)
|
test/restful/test_rooms_acl.py | thenetcircle/dino | 150 | 11085398 | <reponame>thenetcircle/dino<gh_stars>100-1000
from datetime import datetime
from datetime import timedelta
from uuid import uuid4 as uuid
from dino.auth.redis import AuthRedis
from dino.config import ApiActions, SessionKeys, RedisKeys
from dino.rest.resources.rooms_acl import RoomsAclResource
from test.base import BaseTest
from test.db import BaseDatabaseTest
class RoomsAclTest(BaseDatabaseTest):
def setUp(self):
# environ.env.db = FakeDb()
self.set_up_env('redis')
self.env.db = self.db
self.resource = RoomsAclResource()
self.auth = AuthRedis(env=self.env, host='mock')
self.session = {
SessionKeys.user_id.value: BaseTest.USER_ID,
SessionKeys.user_name.value: BaseTest.USER_NAME,
SessionKeys.gender.value: BaseTest.GENDER,
}
for key, value in self.session.items():
self.auth.update_session_for_key(BaseTest.USER_ID, key, value)
self.channel_id = str(uuid())
self.env.db.create_channel("test name", self.channel_id, BaseTest.OTHER_USER_ID)
self.env.auth = self.auth
self.env.session = self.session
self.resource.env = self.env
def tearDown(self):
self.db.redis.flushall()
self.env.cache._flushall()
def test_get_one_rooms(self):
self.env.db.create_room("room name 1", str(uuid()), self.channel_id, BaseTest.OTHER_USER_ID, "user name", False, 999)
self.assertEqual(1, len(self.resource._do_get(BaseTest.USER_ID)))
def test_get_three_rooms(self):
self.env.db.create_room("room name 1", str(uuid()), self.channel_id, BaseTest.OTHER_USER_ID, "user name", False, 999)
self.env.db.create_room("room name 2", str(uuid()), self.channel_id, BaseTest.OTHER_USER_ID, "user name", False, 999)
self.env.db.create_room("room name 3", str(uuid()), self.channel_id, BaseTest.OTHER_USER_ID, "user name", False, 999)
self.assertEqual(3, len(self.resource._do_get(BaseTest.USER_ID)))
def test_get_two_rooms_one_not_allowed(self):
room_id_to_forbid = str(uuid())
self.env.db.create_room("room name 1", str(uuid()), self.channel_id, BaseTest.OTHER_USER_ID, "user name", False, 999)
self.env.db.create_room("room name 2", room_id_to_forbid, self.channel_id, BaseTest.OTHER_USER_ID, "user name", False, 999)
self.env.db.create_room("room name 3", str(uuid()), self.channel_id, BaseTest.OTHER_USER_ID, "user name", False, 999)
self.env.db.add_acls_in_room_for_action(
room_id_to_forbid,
ApiActions.JOIN,
{"gender": "m"}
)
rooms = self.resource._do_get(BaseTest.USER_ID)
self.assertEqual(2, len(rooms))
self.assertTrue(all((room["room_name"] in {"room name 1", "room name 3"} for room in rooms)))
def test_get_two_channels_one_not_allowed_two_rooms_one_not_allowed(self):
room_id_to_forbid = str(uuid())
self.env.db.create_room("room name 1", str(uuid()), self.channel_id, BaseTest.OTHER_USER_ID, "user name", False, 999)
self.env.db.create_room("room name 2", room_id_to_forbid, self.channel_id, BaseTest.OTHER_USER_ID, "user name", False, 999)
self.env.db.create_room("room name 3", str(uuid()), self.channel_id, BaseTest.OTHER_USER_ID, "user name", False, 999)
self.env.db.add_acls_in_room_for_action(
room_id_to_forbid,
ApiActions.JOIN,
{"gender": "m"}
)
channel_id_to_forbid = str(uuid())
self.env.db.create_channel("test name 2", channel_id_to_forbid, BaseTest.OTHER_USER_ID)
self.env.db.create_room("room name 4", str(uuid()), channel_id_to_forbid, BaseTest.OTHER_USER_ID, "user name", False, 999)
self.env.db.add_acls_in_channel_for_action(
channel_id_to_forbid,
ApiActions.LIST,
{"gender": "m"}
)
rooms = self.resource._do_get(BaseTest.USER_ID)
self.assertEqual(2, len(rooms))
self.assertTrue(all((room["room_name"] in {"room name 1", "room name 3"} for room in rooms)))
self.assertTrue(all((room["channel_name"] == "test name" for room in rooms)))
def test_set_last_cleared(self):
last_cleared = self.resource._get_last_cleared()
self.resource._set_last_cleared(datetime.utcnow()+timedelta(minutes=5))
self.assertNotEqual(last_cleared, self.resource._get_last_cleared())
|
tests/graph/test_node_graph.py | emekdahl/fal | 360 | 11085404 | <reponame>emekdahl/fal<gh_stars>100-1000
from typing import Dict, List
import networkx as nx
from unittest.mock import MagicMock, PropertyMock
from unittest.mock import patch
from fal.node_graph import NodeGraph, _add_after_scripts, _add_before_scripts
from utils import assert_contains_only, create_mock_model
@patch("dbt.contracts.graph.parsed.ParsedModelNode")
@patch("fal.FalDbt")
def test_add_after_scripts(parsed_node, fal_dbt_class):
graph = nx.DiGraph()
node_lookup = {}
modelA = create_mock_model(parsed_node, "modelA", ["scriptA.py", "scriptB.py"], [])
fal_dbt_instance = fal_dbt_class("/dir", "/profiles")
fal_dbt_instance.scripts_dir = "/dir"
fal_dbt_instance.keyword = "fal"
graph, node_lookup = _add_after_scripts(
modelA, "model.modelA", fal_dbt_instance, graph, node_lookup
)
assert_contains_only(
list(node_lookup.keys()),
["script.modelA.AFTER.scriptA.py", "script.modelA.AFTER.scriptB.py"],
)
assert_contains_only(
list(graph.successors("model.modelA")),
["script.modelA.AFTER.scriptA.py", "script.modelA.AFTER.scriptB.py"],
)
@patch("dbt.contracts.graph.parsed.ParsedModelNode")
@patch("fal.FalDbt")
def test_add_before_scripts(parsed_node, fal_dbt_class):
graph = nx.DiGraph()
node_lookup = {}
modelA = create_mock_model(
parsed_node, "modelA", [], [], before_script_paths=["scriptA.py", "scriptB.py"]
)
fal_dbt_instance = fal_dbt_class("/dir", "/profiles")
fal_dbt_instance.scripts_dir = "/dir"
fal_dbt_instance.keyword = "fal"
graph, node_lookup = _add_before_scripts(
modelA, "model.modelA", fal_dbt_instance, graph, node_lookup
)
assert_contains_only(
list(node_lookup.keys()),
["script.modelA.BEFORE.scriptA.py", "script.modelA.BEFORE.scriptB.py"],
)
assert_contains_only(
list(graph.predecessors("model.modelA")),
["script.modelA.BEFORE.scriptA.py", "script.modelA.BEFORE.scriptB.py"],
)
@patch("fal.FalDbt")
def test_empty_fal_dbt(fal_dbt_class):
fal_dbt_instance = fal_dbt_class("/dir", "/profiles")
fal_dbt_instance.scripts_dir = "/dir"
fal_dbt_instance.keyword = "fal"
fal_dbt_instance.list_models = MagicMock(return_value=[])
node_graph = NodeGraph.from_fal_dbt(fal_dbt_instance)
assert list(node_graph.node_lookup.keys()) == []
@patch("dbt.contracts.graph.parsed.ParsedModelNode")
@patch("fal.FalDbt")
def test_create_with_fal_dbt(parsed_node, fal_dbt_class):
modelA = create_mock_model(parsed_node, "modelA", ["scriptA.py", "scriptB.py"], [])
modelB = create_mock_model(parsed_node, "modelB", ["scriptB.py"], ["model.modelA"])
modelC = create_mock_model(
parsed_node, "modelC", ["scriptC.py"], ["model.modelA", "model.modelB"]
)
fal_dbt_instance = fal_dbt_class("/dir", "/profiles")
fal_dbt_instance.scripts_dir = "/dir"
fal_dbt_instance.keyword = "fal"
fal_dbt_instance.list_models = MagicMock(return_value=[modelA, modelB, modelC])
node_graph = NodeGraph.from_fal_dbt(fal_dbt_instance)
assert_contains_only(
list(node_graph.node_lookup.keys()),
[
"model.modelA",
"model.modelB",
"model.modelC",
"script.modelA.AFTER.scriptA.py",
"script.modelA.AFTER.scriptB.py",
"script.modelB.AFTER.scriptB.py",
"script.modelC.AFTER.scriptC.py",
],
)
assert_contains_only(
node_graph.get_descendants("model.modelA"),
[
"model.modelC",
"script.modelA.AFTER.scriptB.py",
"script.modelC.AFTER.scriptC.py",
"script.modelA.AFTER.scriptA.py",
"model.modelB",
"script.modelB.AFTER.scriptB.py",
],
)
assert_contains_only(
node_graph.get_descendants("model.modelB"),
[
"script.modelB.AFTER.scriptB.py",
"model.modelC",
"script.modelC.AFTER.scriptC.py",
],
)
assert_contains_only(
node_graph.get_descendants("model.modelC"), ["script.modelC.AFTER.scriptC.py"]
)
|
src/python/nimbusml/tests/preprocessing/test_tostring.py | montehoover/NimbusML | 134 | 11085431 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
import distro
import unittest
import numpy as np
from pandas import DataFrame
from nimbusml.preprocessing import ToString
from sklearn.utils.testing import assert_equal
@unittest.skipIf('centos' in distro.linux_distribution(full_distribution_name=False)[0].lower(), "centos is not supported")
class TestToString(unittest.TestCase):
def test_tostring(self):
data={'f0': [4, 4, -1, 9],
'f1': [5, 5, 3.1, -0.23],
'f2': [6, 6.7, np.nan, np.nan]}
data = DataFrame(data).astype({'f0': np.int32,
'f1': np.float32,
'f2': np.float64})
xf = ToString(columns={'f0.out': 'f0',
'f1.out': 'f1',
'f2.out': 'f2'})
result = xf.fit_transform(data)
assert_equal(result['f0.out'][1], '4')
assert_equal(result['f0.out'][2], '-1')
assert_equal(result['f1.out'][1], '5.000000')
assert_equal(result['f1.out'][2], '3.100000')
assert_equal(result['f2.out'][1], '6.700000')
assert_equal(result['f2.out'][2], 'NaN')
if __name__ == '__main__':
unittest.main()
|
tools/bundle_sdk.py | dartino/fletch | 144 | 11085447 | #!/usr/bin/env python
# Copyright (c) 2015, the Dartino project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# This script is creating a self contained directory with all the tools,
# libraries, packages and samples needed for running Dartino.
# This script assumes that the target architecture has been build in the passed
# in --build_dir and that the corresponding 32-bit architecture is also.
# Finally it also assumes that out/ReleaseXARM/dartino-vm, out/ReleaseSTM
# and out/ReleaseCM4 have been build.
import optparse
import subprocess
import sys
import utils
import re
from sets import Set
from os import makedirs, listdir
from os.path import dirname, join, exists, basename, abspath
from shutil import copyfile, copymode, copytree, rmtree, ignore_patterns
from fnmatch import fnmatch
TOOLS_DIR = abspath(dirname(__file__))
SAMPLES = ['general', 'raspberry-pi2', 'stm32f746g-discovery',
'stm32f411re-nucleo']
def ParseOptions():
parser = optparse.OptionParser()
parser.add_option("--build_dir")
parser.add_option("--deb_package")
parser.add_option("--create_documentation", default=False,
action="store_true")
parser.add_option("--include_tools", default=False, action="store_true")
(options, args) = parser.parse_args()
return options
def CopyFile(src, dst):
copyfile(src, dst)
copymode(src, dst)
def EnsureDeleted(directory):
if exists(directory):
rmtree(directory)
if exists(directory):
raise Exception("Could not delete %s" % directory)
def BuildDir32(build_dir):
return build_dir.replace('X64', 'IA32')
def CopySharedLibraries(bin_dir, build_dir):
shared_libraries = ['mbedtls']
# Libraries are placed differently on mac and linux:
# Linux has lib/libNAME.so
# Mac has libNAME.dylib
os_name = utils.GuessOS()
lib_dst = join(bin_dir, 'lib') if os_name == 'linux' else bin_dir
lib_src = join(build_dir, 'lib') if os_name == 'linux' else build_dir
suffix = 'so' if os_name == 'linux' else 'dylib'
if os_name == 'linux':
makedirs(lib_dst)
for lib in shared_libraries:
lib_name = 'lib%s.%s' % (lib, suffix)
src = join(lib_src, lib_name)
dst = join(lib_dst, lib_name)
CopyFile(src, dst)
def CopyBinaries(bundle_dir, build_dir):
bin_dir = join(bundle_dir, 'bin')
internal = join(bundle_dir, 'internal')
makedirs(bin_dir)
makedirs(internal)
# Copy the dartino VM.
CopyFile(join(build_dir, 'dartino-vm'), join(bin_dir, 'dartino-vm'))
# Copy the 32-bit version of dartino-flashify.
CopyFile(join(BuildDir32(build_dir), 'dartino-flashify'),
join(bin_dir, 'dartino-flashify'))
# The driver for the sdk is specially named dartino_for_sdk.
CopyFile(join(build_dir, 'dartino_for_sdk'), join(bin_dir, 'dartino'))
# Copy the dartino tab completion
CopyFile(join('src', 'tools', 'driver', 'dartino-completion.sh'),
join(bin_dir, 'dartino-completion.sh'))
# We move the dart vm to internal to not put it on the path of users
CopyFile(join(build_dir, 'dart'), join(internal, 'dart'))
# natives.json is read relative to the dart binary
CopyFile(join(build_dir, 'natives.json'), join(internal, 'natives.json'))
CopySharedLibraries(bin_dir, build_dir)
def ThirdPartyDartSdkDir():
os_name = utils.GuessOS()
if os_name == "macos":
os_name = "mac"
return join('third_party', 'dart-sdk', os_name, 'dart-sdk')
def CopyDartSdk(bundle_dir):
source = ThirdPartyDartSdkDir()
target = join(bundle_dir, 'internal', 'dart-sdk')
print 'copying %s to %s' % (source, target)
copytree(source, target)
# Copy the platform decriptor, rewriting paths to point to the
# sdk location at `sdk_dir` instead of `repo_dir`.
def CopyPlatformDescriptor(bundle_dir, platform_descriptor_name, repo_dir,
sdk_dir):
platform_path = join('lib', platform_descriptor_name)
with open(platform_path) as f:
lines = f.read().splitlines()
dest = join(bundle_dir, 'internal', 'dartino_lib', platform_descriptor_name)
print("Copying from %s to %s adjusting paths." % (platform_path, dest))
with open(dest, 'w') as generated:
for line in lines:
if line.startswith('#') or line.startswith('['):
pass
else:
# The property-lines consist of name:uri. The uri can
# contain a ':' so we only split at the first ':'.
parts = line.split(':', 1)
if len(parts) == 2:
name, path = parts
path = path.strip()
if path.startswith(repo_dir):
# Dart-sdk library
path = path.replace(repo_dir, sdk_dir)
line = "%s: %s" % (name, path)
generated.write('%s\n' % line)
# We have two lib dependencies: the libs from the sdk and the libs dir with
# patch files from the dartino repo.
def CopyLibs(bundle_dir, build_dir):
internal = join(bundle_dir, 'internal')
dartino_lib = join(internal, 'dartino_lib')
dart_lib = join(internal, 'dart_lib')
copytree('lib', dartino_lib)
copytree('third_party/dart/sdk/lib', dart_lib)
CopyPlatformDescriptor(bundle_dir, 'dartino_mobile.platform',
'../third_party/dart/sdk/lib', '../dart_lib')
CopyPlatformDescriptor(bundle_dir, 'dartino_embedded.platform',
'../third_party/dart/sdk/lib', '../dart_lib')
def CopyInclude(bundle_dir):
include = join(bundle_dir, 'include')
copytree('include', include)
def CopyInternalPackages(bundle_dir, build_dir):
internal_pkg = join(bundle_dir, 'internal', 'pkg')
makedirs(internal_pkg)
# Copy the pkg dirs for tools and the pkg dirs referred from their
# .packages files.
copied_pkgs = Set()
for tool in ['dartino_compiler', 'flash_sd_card']:
copytree(join('pkg', tool), join(internal_pkg, tool))
tool_pkg = 'pkg/%s' % tool
fixed_packages_file = join(internal_pkg, tool, '.packages')
lines = []
with open(join(tool_pkg, '.packages')) as f:
lines = f.read().splitlines()
with open(fixed_packages_file, 'w') as generated:
for l in lines:
if l.startswith('#') or l.startswith('%s:lib' % tool):
generated.write('%s\n' % l)
else:
components = l.split(':')
name = components[0]
relative_path = components[1]
source = join(tool_pkg, relative_path)
target = join(internal_pkg, name)
print source
if not target in copied_pkgs:
print 'copying %s to %s' % (source, target)
makedirs(target)
assert(source.endswith('lib'))
copytree(source, join(target, 'lib'))
copied_pkgs.add(target)
generated.write('%s:../%s/lib\n' % (name, name))
def CopyPackagesAndSettingsTemplate(bundle_dir):
target_dir = join(bundle_dir, 'pkg')
makedirs(target_dir)
copyfile(join('pkg', 'dartino_sdk_dartino_settings'),
join(bundle_dir, 'internal', '.dartino-settings'))
# Read the pkg/dartino-sdk.packages file
# and generate internal/dartino-sdk.packages
with open(join('pkg', 'dartino-sdk.packages')) as f:
lines = f.read().splitlines()
with open(join(bundle_dir, 'internal', 'dartino-sdk.packages'), 'w') as f:
for line in lines:
parts = line.split(':')
name, path = parts
copytree(join('pkg', path, '..'), join(target_dir, name))
line = "%s:../pkg/%s/lib" % (name, name)
f.write('%s\n' % line)
# Update the dartino_lib/dartino/lib/_embedder.yaml file
# based upon the SDK structure
embedderPath = join(target_dir, 'dartino', 'lib', '_embedder.yaml')
with open(embedderPath) as f:
s = f.read()
s = s.replace('../../../lib/',
'../../../internal/dartino_lib/')
s = s.replace('../../../third_party/dart/sdk/lib/',
'../../../internal/dart_lib/')
with open(embedderPath, 'w') as f:
f.write(s)
def CopyPlatforms(bundle_dir):
# Only copy parts of the platforms directory. We also have source
# code there at the moment.
target_platforms_dir = join(bundle_dir, 'platforms')
for platforms_dir in [
'bin', 'raspberry-pi2', 'stm32f746g-discovery', 'stm32f411re-nucleo']:
copytree(join('platforms', platforms_dir),
join(target_platforms_dir, platforms_dir))
def CreateSnapshot(dart_executable, dart_file, snapshot):
# TODO(karlklose): Run 'build_dir/dartino export' instead?
cmd = [dart_executable, '-c', '--packages=pkg/dartino_compiler/.packages',
'-Dsnapshot="%s"' % snapshot,
'-Dpackages=".packages"',
'-Dtest.dartino_settings_file_name=".dartino-settings"',
'tests/dartino_compiler/run.dart', dart_file]
print 'Running %s' % ' '.join(cmd)
subprocess.check_call(' '.join(cmd), shell=True)
def CreateAgentSnapshot(bundle_dir, build_dir):
platforms = join(bundle_dir, 'platforms')
data_dir = join(platforms, 'raspberry-pi2', 'data')
dart = join(build_dir, 'dart')
snapshot = join(data_dir, 'dartino-agent.snapshot')
CreateSnapshot(dart, 'pkg/dartino_agent/bin/agent.dart', snapshot)
def CopyArmDebPackage(bundle_dir, package):
target = join(bundle_dir, 'platforms', 'raspberry-pi2')
CopyFile(package, join(target, basename(package)))
def CopyAdditionalFiles(bundle_dir):
for extra in ['README.md', 'LICENSE.md']:
CopyFile(extra, join(bundle_dir, extra))
def CopyArm(bundle_dir):
binaries = ['dartino-vm', 'natives.json']
raspberry = join(bundle_dir, 'platforms', 'raspberry-pi2')
bin_dir = join(raspberry, 'bin')
makedirs(bin_dir)
build_dir = 'out/ReleaseXARM'
for v in binaries:
CopyFile(join(build_dir, v), join(bin_dir, v))
def CopySTM(bundle_dir):
libraries = [
'libdartino.a',
'libfreertos_dartino.a',
'libstm32f746g-discovery.a',
'libmbedtls.a',
]
disco = join(bundle_dir, 'platforms', 'stm32f746g-discovery')
lib_dir = join(disco, 'lib')
makedirs(lib_dir)
build_dir = 'out/ReleaseSTM'
for lib in libraries:
CopyFile(join(build_dir, lib), join(lib_dir, basename(lib)))
def CopyCM4(bundle_dir):
libraries = [
'libdartino.a',
'libfreertos_dartino.a',
'libstm32f411xe-nucleo.a',
]
disco = join(bundle_dir, 'platforms', 'stm32f411re-nucleo')
lib_dir = join(disco, 'lib')
makedirs(lib_dir)
build_dir = 'out/ReleaseCM4'
for lib in libraries:
CopyFile(join(build_dir, lib), join(lib_dir, basename(lib)))
def CopySamples(bundle_dir):
target = join(bundle_dir, 'samples')
for v in SAMPLES:
copytree(join('samples', v), join(target, v))
CopyFile(join('samples', 'dartino.yaml'), join(target, 'dartino.yaml'))
def CreateDocumentation():
# Ensure all the dependencies of dartinodoc are available.
exit_code = subprocess.call(
[join('..', '..',ThirdPartyDartSdkDir(), 'bin', 'pub'), 'get'],
cwd='pkg/dartinodoc')
if exit_code != 0:
raise OSError(exit_code)
doc_dest = join('out', 'docs')
EnsureDeleted(doc_dest)
# Determine the sdk and third_party packages
sdk_packages = Set()
third_party_packages = Set()
with open(join('pkg', 'dartino-sdk.packages')) as f:
lines = f.read().splitlines()
for line in lines:
parts = line.split(':')
name, path = parts
if path.startswith('../third_party/'):
third_party_packages.add(name)
else:
sdk_packages.add(name)
exit_code = subprocess.call(
[join(ThirdPartyDartSdkDir(), 'bin', 'dart'),
'-c', 'pkg/dartinodoc/bin/dartinodoc.dart',
'--output', doc_dest,
'--sdk-packages', ",".join(sdk_packages),
'--third-party-packages', ",".join(third_party_packages),
'--version', utils.GetSemanticSDKVersion()])
if exit_code != 0:
raise OSError(exit_code)
def CopyTools(bundle_dir):
tools_dir = join(bundle_dir, 'tools')
makedirs(tools_dir)
tools = ['gcc-arm-embedded', 'openocd']
for tool in tools:
tool_dir = join(tools_dir, tool)
copytree('third_party/%s/linux/%s' % (tool, tool), tool_dir)
def Main():
options = ParseOptions()
build_dir = options.build_dir
if not build_dir:
print 'Please specify a build directory with "--build_dir".'
sys.exit(1)
sdk_dir = join(build_dir, 'dartino-sdk')
print 'Creating sdk bundle for %s in %s' % (build_dir, sdk_dir)
deb_package = options.deb_package
with utils.TempDir() as sdk_temp:
if options.create_documentation:
CreateDocumentation()
CopyBinaries(sdk_temp, build_dir)
CopyDartSdk(sdk_temp)
CopyInternalPackages(sdk_temp, build_dir)
CopyLibs(sdk_temp, build_dir)
CopyInclude(sdk_temp)
CopyPackagesAndSettingsTemplate(sdk_temp)
CopyPlatforms(sdk_temp)
CopyArm(sdk_temp)
CreateAgentSnapshot(sdk_temp, build_dir)
CopySTM(sdk_temp)
CopyCM4(sdk_temp)
CopySamples(sdk_temp)
CopyAdditionalFiles(sdk_temp)
if deb_package:
CopyArmDebPackage(sdk_temp, deb_package)
EnsureDeleted(sdk_dir)
if options.include_tools:
CopyTools(sdk_temp)
copytree(sdk_temp, sdk_dir)
print 'Created sdk bundle for %s in %s' % (build_dir, sdk_dir)
if __name__ == '__main__':
sys.exit(Main())
|
tests/requests_client/equality_test.py | educatedguessing/bravado | 600 | 11085456 | # -*- coding: utf-8 -*-
import pytest
from bravado.requests_client import RequestsClient
from bravado.requests_client import RequestsFutureAdapter
@pytest.fixture
def http_client():
return RequestsClient()
def test_equality_of_the_same_http_client(http_client):
assert http_client == http_client
def test_equality_of_different_http_clients_with_the_same_configurations(http_client):
assert http_client == RequestsClient()
def test_equality_of_different_http_clients_with_different_configurations(http_client):
class CustomAdapter(RequestsFutureAdapter):
pass
assert http_client != RequestsClient(future_adapter_class=CustomAdapter)
def test_client_hashability(http_client):
# The test wants to ensure that the HttpClient instance is hashable.
# If calling hash does not throw an exception than we've validated the assumption
hash(http_client)
|
src/sagemaker/serverless/predictor.py | longyuzhao/sagemaker-python-sdk | 1,690 | 11085492 | <filename>src/sagemaker/serverless/predictor.py
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Predictors that are hosted on serverless compute."""
from __future__ import absolute_import
from typing import Optional, Tuple
import boto3
import botocore
from sagemaker import deserializers, serializers
from sagemaker.predictor import PredictorBase
from sagemaker.deprecations import deprecation_warning
@deprecation_warning(
msg="Based on customer experience and feedback an"
" alternative support will be added in near future",
date="10/27/2021",
)
class LambdaPredictor(PredictorBase):
"""A deployed model hosted on Lambda."""
def __init__(
self, function_name: str, client: Optional[botocore.client.BaseClient] = None
) -> None:
"""Initialize instance attributes.
Arguments:
function_name: The name of the function.
client: The Lambda client used to interact with Lambda.
"""
self._client = client or boto3.client("lambda")
self._function_name = function_name
self._serializer = serializers.JSONSerializer()
self._deserializer = deserializers.JSONDeserializer()
def predict(self, data: dict) -> dict:
"""Invoke the Lambda function specified in the constructor.
This function is synchronous. It will only return after the function
has produced a prediction.
Arguments:
data: The data sent to the Lambda function as input.
Returns:
The data returned by the Lambda function.
"""
response = self._client.invoke(
FunctionName=self._function_name,
InvocationType="RequestResponse",
Payload=self._serializer.serialize(data),
)
return self._deserializer.deserialize(
response["Payload"],
response["ResponseMetadata"]["HTTPHeaders"]["content-type"],
)
def delete_predictor(self) -> None:
"""Destroy the Lambda function specified in the constructor."""
self._client.delete_function(FunctionName=self._function_name)
@property
def content_type(self) -> str:
"""The MIME type of the data sent to the Lambda function."""
return self._serializer.CONTENT_TYPE
@property
def accept(self) -> Tuple[str]:
"""The content type(s) that are expected from the Lambda function."""
return self._deserializer.ACCEPT
@property
def function_name(self) -> str:
"""The name of the Lambda function this predictor invokes."""
return self._function_name
|
demo/demo.py | hanranCode/mega.pytorch | 521 | 11085498 | import glob
import os
import argparse
from mega_core.config import cfg
from predictor import VIDDemo
parser = argparse.ArgumentParser(description="PyTorch Object Detection Visualization")
parser.add_argument(
"method",
choices=["base", "dff", "fgfa", "rdn", "mega"],
default="base",
type=str,
help="which method to use",
)
parser.add_argument(
"config",
default="configs/vid_R_101_C4_1x.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"checkpoint",
default="R_101.pth",
help="The path to the checkpoint for test.",
)
parser.add_argument(
"--visualize-path",
default="datasets/ILSVRC2015/Data/VID/val/ILSVRC2015_val_00003001",
# default="datasets/ILSVRC2015/Data/VID/snippets/val/ILSVRC2015_val_00003001.mp4",
help="the folder or a video to visualize.",
)
parser.add_argument(
"--suffix",
default=".JPEG",
help="the suffix of the images in the image folder.",
)
parser.add_argument(
"--output-folder",
default="demo/visualization/base",
help="where to store the visulization result.",
)
parser.add_argument(
"--video",
action="store_true",
help="if True, input a video for visualization.",
)
parser.add_argument(
"--output-video",
action="store_true",
help="if True, output a video.",
)
args = parser.parse_args()
cfg.merge_from_file("configs/BASE_RCNN_1gpu.yaml")
cfg.merge_from_file(args.config)
cfg.merge_from_list(["MODEL.WEIGHT", args.checkpoint])
vid_demo = VIDDemo(
cfg,
method=args.method,
confidence_threshold=0.7,
output_folder=args.output_folder
)
if not args.video:
visualization_results = vid_demo.run_on_image_folder(args.visualize_path, suffix=args.suffix)
else:
visualization_results = vid_demo.run_on_video(args.visualize_path)
if not args.output_video:
vid_demo.generate_images(visualization_results)
else:
vid_demo.generate_video(visualization_results) |
benchmarks/orc/runorc-write.py | rahulyesantharao/tuplex | 778 | 11085503 | import tuplex
import time
import sys
import json
import os
import glob
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Write to Orc file format')
parser.add_argument('--path', type=str, dest='data_path', default='data/large100MB.csv',
help='path or pattern to zillow data')
parser.add_argument('--output-path', type=str, dest='output_path', default='tuplex_output/',
help='specify path where to save output data files')
args = parser.parse_args()
assert args.data_path, 'need to set data path!'
# config vars
paths = [args.data_path]
output_path = args.output_path
# explicit globbing because dask can't handle patterns well...
if not os.path.isfile(args.data_path):
paths = sorted(glob.glob(os.path.join(args.data_path, '*.csv')))
else:
paths = [args.data_path]
if not paths:
print('found no zillow data to process, abort.')
sys.exit(1)
print('>>> running {} on {}'.format('tuplex', paths))
# configuration, make sure to give enough runtime memory to the executors!
conf = {"webui.enable" : False,
"executorCount" : 16,
"executorMemory" : "2G",
"driverMemory" : "2G",
"partitionSize" : "32MB",
"runTimeMemory" : "128MB",
"useLLVMOptimizer" : True,
"optimizer.nullValueOptimization" : False,
"csv.selectionPushdown" : True}
if os.path.exists('tuplex_config.json'):
with open('tuplex_config.json') as fp:
conf = json.load(fp)
tstart = time.time()
import tuplex
ctx = tuplex.Context(conf)
startup_time = time.time() - tstart
print('Tuplex startup time: {}'.format(startup_time))
tstart = time.time()
# Tuplex pipeline
data = ctx.csv(','.join(paths)) \
.toorc(output_path + "/out.orc")
job_time = time.time() - tstart
print('Tuplex job time: {} s'.format(job_time))
# print stats as last line
print(json.dumps({"startupTime" : startup_time, "jobTime" : job_time})) |
homeassistant/components/iammeter/sensor.py | learn-home-automation/core | 22,481 | 11085530 | <filename>homeassistant/components/iammeter/sensor.py
"""Support for iammeter via local API."""
import asyncio
from datetime import timedelta
import logging
import async_timeout
from iammeter import real_time_api
from iammeter.power_meter import IamMeterError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import debounce
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 80
DEFAULT_DEVICE_NAME = "IamMeter"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_DEVICE_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
SCAN_INTERVAL = timedelta(seconds=30)
PLATFORM_TIMEOUT = 8
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Platform setup."""
config_host = config[CONF_HOST]
config_port = config[CONF_PORT]
config_name = config[CONF_NAME]
try:
async with async_timeout.timeout(PLATFORM_TIMEOUT):
api = await real_time_api(config_host, config_port)
except (IamMeterError, asyncio.TimeoutError) as err:
_LOGGER.error("Device is not ready")
raise PlatformNotReady from err
async def async_update_data():
try:
async with async_timeout.timeout(PLATFORM_TIMEOUT):
return await api.get_data()
except (IamMeterError, asyncio.TimeoutError) as err:
raise UpdateFailed from err
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=DEFAULT_DEVICE_NAME,
update_method=async_update_data,
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=debounce.Debouncer(
hass, _LOGGER, cooldown=0.3, immediate=True
),
)
await coordinator.async_refresh()
entities = []
for sensor_name, (row, idx, unit) in api.iammeter.sensor_map().items():
serial_number = api.iammeter.serial_number
uid = f"{serial_number}-{row}-{idx}"
entities.append(IamMeter(coordinator, uid, sensor_name, unit, config_name))
async_add_entities(entities)
class IamMeter(CoordinatorEntity, SensorEntity):
"""Class for a sensor."""
def __init__(self, coordinator, uid, sensor_name, unit, dev_name):
"""Initialize an iammeter sensor."""
super().__init__(coordinator)
self.uid = uid
self.sensor_name = sensor_name
self.unit = unit
self.dev_name = dev_name
@property
def native_value(self):
"""Return the state of the sensor."""
return self.coordinator.data.data[self.sensor_name]
@property
def unique_id(self):
"""Return unique id."""
return self.uid
@property
def name(self):
"""Name of this iammeter attribute."""
return f"{self.dev_name} {self.sensor_name}"
@property
def icon(self):
"""Icon for each sensor."""
return "mdi:flash"
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement."""
return self.unit
|
opticspy/ray_tracing/surface.py | Graylien/opticspy | 306 | 11085546 | <reponame>Graylien/opticspy
from __future__ import division as __division__
import numpy as __np__
import matplotlib.pyplot as __plt__
from . import glass_funcs
# Ray Class
class Surface(object):
'''
Surface Class
'''
def __init__(self,wavelength_list,number,radius,thickness,glass,STO,__diameter__):
self.wavelength_list = wavelength_list
self.number = number
self.radius = radius
self.glass = glass
self.indexlist = glass_funcs.glass2indexlist(wavelength_list,glass)
self.thickness = thickness
self.STO = STO
self.__diameter__ = __diameter__
def list(self):
print('self_number',self.number)
print(self.radius,self.thickness,self.indexlist)
def add(self,number,radius,thickness,glass,STO,output):
"""
add a surface instance to a Lens Class
input: a Lens Class
"""
if output == True:
s1 = str(number)
s2 = outputjudge(radius)
s3 = outputjudge(thickness)
s4 = glass
s5 = str(STO)
print('-----------------------Add surface:-------------------------------')
print('------------------------------------------------------------------')
print("| {0:<5s} | {1:<10s} | {2:<11s} | {3:<15s} | {4:<5s} |".\
format('Num','Radius','Thickness','Glass','STO'))
print('------------------------------------------------------------------')
print("| {0:<5s} | {1:<10s} | {2:<11s} | {3:<15s} | {4:<5s} |".\
format(s1,s2,s3,s4,s5))
print('------------------------------------------------------------------')
else:
print('Add surface: ',str(number))
New_Surface = Surface(wavelength_list = self.wavelength_list,number=number,\
radius=radius,thickness=thickness,glass=glass,STO=STO,\
__diameter__=0)
self.surface_list.append(New_Surface)
def outputjudge(number):
if number >= 1000000 or number <= -1000000:
s = 'Infinity'
else:
s = str(round(number,4))
return s
# def update(number,key,value):
# if key = 'STO':
# else:
# Lens_name.surfacelist[number].key = new_value
# def delete(number):
# print 'delete surface x'
def list_index(self):
print(self.indexlist)
|
rasa/nlu/extractors/mitie_entity_extractor.py | fintzd/rasa | 9,701 | 11085551 | from __future__ import annotations
import logging
from rasa.nlu.tokenizers.tokenizer import Tokenizer
import typing
from typing import Any, Dict, List, Optional, Text, Type
from rasa.engine.graph import GraphComponent, ExecutionContext
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.constants import TOKENS_NAMES
from rasa.shared.nlu.constants import (
ENTITY_ATTRIBUTE_CONFIDENCE,
ENTITY_ATTRIBUTE_START,
ENTITY_ATTRIBUTE_END,
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_VALUE,
TEXT,
ENTITIES,
)
from rasa.nlu.utils.mitie_utils import MitieModel, MitieNLP
from rasa.nlu.extractors.extractor import EntityExtractorMixin
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
import rasa.shared.utils.io
from rasa.shared.exceptions import InvalidConfigException
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
import mitie
@DefaultV1Recipe.register(
DefaultV1Recipe.ComponentType.ENTITY_EXTRACTOR,
is_trainable=True,
model_from="MitieNLP",
)
class MitieEntityExtractor(GraphComponent, EntityExtractorMixin):
"""A Mitie Entity Extractor (which is a thin wrapper around `Dlib-ml`)."""
MITIE_RESOURCE_FILE = "mitie_ner.dat"
@classmethod
def required_components(cls) -> List[Type]:
"""Components that should be included in the pipeline before this component."""
return [MitieNLP, Tokenizer]
@staticmethod
def required_packages() -> List[Text]:
"""Any extra python dependencies required for this component to run."""
return ["mitie"]
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""The component's default config (see parent class for full docstring)."""
return {
"num_threads": 1,
}
def __init__(
self,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
ner: Optional["mitie.named_entity_extractor"] = None,
) -> None:
"""Creates a new instance.
Args:
config: The configuration.
model_storage: Storage which graph components can use to persist and load
themselves.
resource: Resource locator for this component which can be used to persist
and load itself from the `model_storage`.
ner: Mitie named entity extractor
"""
self._config = config
self._model_storage = model_storage
self._resource = resource
self.validate_config(self._config)
self._ner = ner
def validate_config(cls, config: Dict[Text, Any]) -> None:
"""Checks whether the given configuration is valid.
Args:
config: a configuration for a Mitie entity extractor component
"""
num_threads = config.get("num_threads")
if num_threads is None or num_threads <= 0:
raise InvalidConfigException(
f"Expected `num_threads` to be some value >= 1 (default: 1)."
f"but received {num_threads}"
)
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> GraphComponent:
"""Creates a new `MitieEntityExtractor`.
Args:
config: This config overrides the `default_config`.
model_storage: Storage which graph components can use to persist and load
themselves.
resource: Resource locator for this component which can be used to persist
and load itself from the `model_storage`.
execution_context: Information about the current graph run. Unused.
Returns: An instantiated `MitieEntityExtractor`.
"""
return cls(config, model_storage, resource)
def train(self, training_data: TrainingData, model: MitieModel) -> Resource:
"""Trains a MITIE named entity recognizer.
Args:
training_data: the training data
model: a MitieModel
Returns:
resource for loading the trained model
"""
import mitie
trainer = mitie.ner_trainer(str(model.model_path))
trainer.num_threads = self._config["num_threads"]
# check whether there are any (not pre-trained) entities in the training data
found_one_entity = False
# filter out pre-trained entity examples
filtered_entity_examples = self.filter_trainable_entities(
training_data.nlu_examples
)
for example in filtered_entity_examples:
sample = self._prepare_mitie_sample(example)
found_one_entity = sample.num_entities > 0 or found_one_entity
trainer.add(sample)
# Mitie will fail to train if there is not a single entity tagged
if found_one_entity:
self._ner = trainer.train()
else:
rasa.shared.utils.io.raise_warning(
f"{self.__class__.__name__} could not be trained because no trainable "
f"entities where found in the given training data. Please add some "
f"NLU training examples that include entities where the `extractor` "
f"is either `None` or '{self.__class__.__name__}'."
)
self.persist()
return self._resource
@staticmethod
def _prepare_mitie_sample(training_example: Message) -> Any:
"""Prepare a message so that it can be passed to a MITIE trainer."""
import mitie
text = training_example.get(TEXT)
tokens = training_example.get(TOKENS_NAMES[TEXT])
sample = mitie.ner_training_instance([t.text for t in tokens])
for ent in training_example.get(ENTITIES, []):
try:
# if the token is not aligned an exception will be raised
start, end = MitieEntityExtractor.find_entity(ent, text, tokens)
except ValueError as e:
rasa.shared.utils.io.raise_warning(
f"Failed to use example '{text}' to train MITIE "
f"entity extractor. Example will be skipped."
f"Error: {e}"
)
continue
try:
# mitie will raise an exception on malicious
# input - e.g. on overlapping entities
sample.add_entity(list(range(start, end)), ent["entity"])
except Exception as e:
rasa.shared.utils.io.raise_warning(
f"Failed to add entity example "
f"'{str(e)}' of sentence '{str(text)}'. "
f"Example will be ignored. Reason: "
f"{e}"
)
continue
return sample
def process(self, messages: List[Message], model: MitieModel,) -> List[Message]:
"""Extracts entities from messages and appends them to the attribute.
If no patterns where found during training, then the given messages will not
be modified. In particular, if no `ENTITIES` attribute exists yet, then
it will *not* be created.
If no pattern can be found in the given message, then no entities will be
added to any existing list of entities. However, if no `ENTITIES` attribute
exists yet, then an `ENTITIES` attribute will be created.
Returns:
the given list of messages that have been modified
"""
if not self._ner:
return messages
for message in messages:
entities = self._extract_entities(message, mitie_model=model)
extracted = self.add_extractor_name(entities)
message.set(
ENTITIES, message.get(ENTITIES, []) + extracted, add_to_output=True
)
return messages
def _extract_entities(
self, message: Message, mitie_model: MitieModel,
) -> List[Dict[Text, Any]]:
"""Extract entities of the given type from the given user message.
Args:
message: a user message
mitie_model: MitieModel containing a `mitie.total_word_feature_extractor`
Returns:
a list of dictionaries describing the entities
"""
text = message.get(TEXT)
tokens = message.get(TOKENS_NAMES[TEXT])
entities = []
token_texts = [token.text for token in tokens]
mitie_entities = self._ner.extract_entities(
token_texts, mitie_model.word_feature_extractor,
)
for e in mitie_entities:
if len(e[0]):
start = tokens[e[0][0]].start
end = tokens[e[0][-1]].end
entities.append(
{
ENTITY_ATTRIBUTE_TYPE: e[1],
ENTITY_ATTRIBUTE_VALUE: text[start:end],
ENTITY_ATTRIBUTE_START: start,
ENTITY_ATTRIBUTE_END: end,
ENTITY_ATTRIBUTE_CONFIDENCE: None,
}
)
return entities
@classmethod
def load(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> MitieEntityExtractor:
"""Loads trained component (see parent class for full docstring)."""
import mitie
try:
with model_storage.read_from(resource) as model_path:
ner_file = model_path / cls.MITIE_RESOURCE_FILE
if not ner_file.exists():
raise FileNotFoundError(
f"Expected a MITIE extractor file at {ner_file}."
)
ner = mitie.named_entity_extractor(str(ner_file))
return cls(config, model_storage, resource, ner=ner)
except (FileNotFoundError, ValueError) as e:
logger.debug(
f"Failed to load {cls.__name__} from model storage. "
f"This can happen if the model could not be trained because regexes "
f"could not be extracted from the given training data - and hence "
f"could not be persisted. Error: {e}."
)
return cls(config, model_storage, resource)
def persist(self) -> None:
"""Persist this model."""
if not self._ner:
return
with self._model_storage.write_to(self._resource) as model_path:
ner_file = model_path / self.MITIE_RESOURCE_FILE
self._ner.save_to_disk(str(ner_file), pure_model=True)
|
windows_packages_gpu/torch/nn/qat/modules/__init__.py | codeproject/DeepStack | 353 | 11085626 | <filename>windows_packages_gpu/torch/nn/qat/modules/__init__.py
from .linear import Linear
from .conv import Conv2d
__all__ = [
'Linear',
'Conv2d',
]
|
py/testdir_single_jvm/test_PCA_ignore_enums_fvec.py | gigliovale/h2o | 882 | 11085627 | <gh_stars>100-1000
import unittest, random, sys, time, string
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_pca, h2o_jobs as h2j
def write_syn_dataset(csvPathname, rowCount, colCount, SEED):
# do we need more than one random generator?
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
for j in range(colCount):
if j%2==0:
ri1 = int(r1.triangular(1,5,2.5))
else:
# odd lines get enums
# hack to get lots of enums. random length 16 in odd cols
# so we can ignore all odd cols below
ri1 = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(16))
rowData.append(ri1)
# don't need an output col
rowDataStr = map(str,rowData)
rowDataCsv = ",".join(rowDataStr)
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1,java_heap_GB=14, enable_benchmark_log=True)
@classmethod
def tearDownClass(cls):
### time.sleep(3600)
h2o.tear_down_cloud()
def test_PCA_ignore_enums_fvec(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100, 3, 'cA', 300),
# (10001, 2, 'cA', 300),
# (10000, 500, 'cH', 300),
# (10000, 1000, 'cI', 300),
]
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE)
# PARSE ****************************************
start = time.time()
modelKey = 'PCAModelKey'
# Parse ****************************************
parseResult = h2i.import_parse(bucket=None, path=csvPathname, schema='put',
hex_key=hex_key, timeoutSecs=timeoutSecs, doSummary=False)
elapsed = time.time() - start
print "parse end on ", csvPathname, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "parse result:", parseResult['destination_key']
# Logging to a benchmark file
algo = "Parse"
l = '{:d} jvms, {:d}GB heap, {:s} {:s} {:6.2f} secs'.format(
len(h2o.nodes), h2o.nodes[0].java_heap_GB, algo, csvFilename, elapsed)
print l
h2o.cloudPerfH2O.message(l)
inspect = h2o_cmd.runInspect(key=parseResult['destination_key'])
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
numRows = inspect['numRows']
numCols = inspect['numCols']
# PCA(tolerance iterate)****************************************
for tolerance in [i/10.0 for i in range(11)]:
params = {
'ignored_cols': 'C2',
'destination_key': modelKey,
'tolerance': tolerance,
'standardize': 1,
}
print "Using these parameters for PCA: ", params
kwargs = params.copy()
PCAResult = {'python_elapsed': 0, 'python_%timeout': 0}
start = time.time()
pcaResult = h2o_cmd.runPCA(parseResult=parseResult, timeoutSecs=timeoutSecs, noPoll=True, **kwargs)
h2j.pollStatsWhileBusy(timeoutSecs=timeoutSecs)
elapsed = time.time() - start
PCAResult['python_elapsed'] = elapsed
PCAResult['python_%timeout'] = 1.0*elapsed / timeoutSecs
print "PCA completed in", PCAResult['python_elapsed'], "seconds.", \
"%f pct. of timeout" % (PCAResult['python_%timeout'])
print "Checking PCA results: "
pcaView = h2o_cmd.runPCAView(modelKey = modelKey)
h2o_pca.simpleCheckPCA(self,pcaView)
h2o_pca.resultsCheckPCA(self,pcaView)
# Logging to a benchmark file
algo = "PCA " + " tolerance=" + str(tolerance)
l = '{:d} jvms, {:d}GB heap, {:s} {:s} {:6.2f} secs'.format(
len(h2o.nodes), h2o.nodes[0].java_heap_GB, algo, csvFilename, PCAResult['python_elapsed'])
print l
h2o.cloudPerfH2O.message(l)
pcaInspect = pcaView
# errrs from end of list? is that the last tree?
sdevs = pcaInspect["pca_model"]["sdev"]
print "PCA: standard deviations are :", sdevs
print
print
propVars = pcaInspect["pca_model"]["propVar"]
print "PCA: Proportions of variance by eigenvector are :", propVars
print
print
if __name__ == '__main__':
h2o.unit_main()
|
tests/circular_c.py | matan-h/friendly | 287 | 11085678 | <reponame>matan-h/friendly
# Attribute error for partially initialize module
import circular_c
a = circular_c.something
|
tests/test_eraserhead_output.py | yozlet/django-eraserhead | 216 | 11085698 | <filename>tests/test_eraserhead_output.py
# encoding: utf-8
import sys
import os
import re
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from django.test import TestCase, override_settings
from django.apps import apps
from django.conf import settings
import term
from bar.models import Article
def capture_stdout(command, *args, **kwargs):
out, sys.stdout = sys.stdout, StringIO()
try:
command(*args, **kwargs)
sys.stdout.seek(0)
output = sys.stdout.read()
sys.stdout = out
return output
finally:
sys.stdout = out
def get_index_page(client, uri):
client.get(uri)
@override_settings(INSTALLED_APPS=('eraserhead.apps.EraserheadConfig', 'bar'), ERASERHEAD_ENABLED=True)
class EraserheadOutputTestCase(TestCase):
""" Integration test """
def setUp(self):
super(EraserheadOutputTestCase, self).setUp()
Article.objects.create(title='foobar', content=('spam ' * 10))
Article.objects.create(title='barfoo', content=('spam ' * 10))
def tearDown(self):
super(EraserheadOutputTestCase, self).tearDown()
apps.clear_cache()
def test_eraserhead_output(self):
""" Should display QuerySets information """
output = term.strip(capture_stdout(get_index_page, self.client, '/'))
self.assertIn("ERASERHEAD STATS", output)
self.assertEqual(output.count("QuerySet #"), 2)
self.assertEqual(output.count("Model: Article"), 2)
# First QS
self.assertIn('Instances created: 2\n', output)
self.assertIn('Used fields: title\n', output)
self.assertTrue(re.search('Unused\sfields\:\s(content|id),\s(content|id)\n', output))
self.assertIn("Recommendations: Model.objects.only('title')\n", output)
self.assertIn('bar/views.py", line 7', output)
self.assertIn('articles = list(Article.objects.all())', output)
# Second QS
self.assertIn('Instances created: 1\n', output)
self.assertTrue(
re.search('Used\sfields\:\s(content|id|title),\s(content|id|title),\s(content|id|title)\n', output))
self.assertIn('Unused fields: \n', output)
self.assertIn("Recommendations: Nothing to do here ¯\_(ツ)_/¯\n", output)
self.assertIn('bar/views.py", line 8', output)
self.assertIn("article = Article.objects.get(title='foobar')", output)
def test_output_nothing_if_no_querysets(self):
""" Should not output anything if there are no QuerySets in request """
output = term.strip(capture_stdout(get_index_page, self.client, '/empty'))
self.assertNotIn("ERASERHEAD STATS", output)
@override_settings(ERASERHEAD_TRACEBACK_BASE_PATH=None)
def test_unsetted_traceback_base_path(self):
""" Should display also Django frames """
output = term.strip(capture_stdout(get_index_page, self.client, '/'))
self.assertIn("django/test/runner.py", output)
@override_settings(ERASERHEAD_TRACEBACK_BASE_PATH=os.path.join(settings.BASE_DIR, 'bar'))
def test_setted_traceback_base_path(self):
""" Should display also Django frames """
output = term.strip(capture_stdout(get_index_page, self.client, '/'))
self.assertNotIn("django/test/runner.py", output)
|
braintree/disbursement.py | futureironman/braintree_python | 182 | 11085727 | from decimal import Decimal
from braintree.resource import Resource
from braintree.transaction_search import TransactionSearch
from braintree.merchant_account import MerchantAccount
class Disbursement(Resource):
class Type(object):
"""
"""
Credit = "credit"
Debit = "debit"
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
self.amount = Decimal(self.amount)
self.merchant_account = MerchantAccount(gateway, attributes["merchant_account"])
def __repr__(self):
detail_list = ["amount", "disbursement_date", "exception_message", "follow_up_action", "id", "success", "retry"]
return super(Disbursement, self).__repr__(detail_list)
def transactions(self):
return self.gateway.transaction.search([TransactionSearch.ids.in_list(self.transaction_ids)])
def is_credit(self):
return self.disbursement_type == Disbursement.Type.Credit
def is_debit(self):
return self.disbursement_type == Disbursement.Type.Debit
|
common/tests/test_markdown.py | aolin480/openpilot | 37,508 | 11085760 | <filename>common/tests/test_markdown.py
#!/usr/bin/env python3
from markdown_it import MarkdownIt
import os
import unittest
from common.basedir import BASEDIR
from common.markdown import parse_markdown
class TestMarkdown(unittest.TestCase):
# validate that our simple markdown parser produces the same output as `markdown_it` from pip
def test_current_release_notes(self):
self.maxDiff = None
with open(os.path.join(BASEDIR, "RELEASES.md")) as f:
for r in f.read().split("\n\n"):
# No hyperlink support is ok
if '[' in r:
continue
self.assertEqual(MarkdownIt().render(r), parse_markdown(r))
if __name__ == "__main__":
unittest.main()
|
qanta/guesser/abstract.py | Pinafore/qb | 122 | 11085775 | import os
import importlib
import warnings
from collections import defaultdict, namedtuple, Counter
from abc import ABCMeta, abstractmethod
from typing import List, Dict, Tuple, Optional, NamedTuple
import pickle
import matplotlib
with warnings.catch_warnings():
warnings.simplefilter("ignore")
matplotlib.use("Agg")
import pandas as pd
from qanta.datasets.abstract import TrainingData, QuestionText, Page
from qanta.datasets.quiz_bowl import QuizBowlDataset, QantaDatabase
from qanta.config import conf
from qanta.util import constants as c
from qanta.util.io import safe_path
from qanta import qlogging
log = qlogging.get(__name__)
def get_class(instance_module: str, instance_class: str):
py_instance_module = importlib.import_module(instance_module)
py_instance_class = getattr(py_instance_module, instance_class)
return py_instance_class
GuesserSpec = NamedTuple(
"GuesserSpec",
[
("dependency_module", Optional[str]),
("dependency_class", Optional[str]),
("guesser_module", str),
("guesser_class", str),
("config_num", Optional[int]),
],
)
Guess = namedtuple("Guess", "fold guess guesser qnum score sentence token")
class AbstractGuesser(metaclass=ABCMeta):
def __init__(self, config_num: Optional[int]):
"""
Abstract class representing a guesser. All abstract methods must be implemented. Class
construction should be light and not load data since this is reserved for the
AbstractGuesser.load method.
:param config_num: Required parameter saying which configuration of the guesser to use or explicitly not
requesting one by passing None. If it is None implementors should not read the guesser config, otherwise
read the appropriate configuration. This is a positional argument to force all implementors to fail fast
rather than implicitly
"""
self.config_num = config_num
def qb_dataset(self) -> QuizBowlDataset:
return QuizBowlDataset(guesser_train=True)
@abstractmethod
def train(self, training_data: TrainingData) -> None:
"""
Given training data, train this guesser so that it can produce guesses.
training_data can be seen as a tuple of two elements which are
(train_x, train_y, properties).
In this case train_x is a list of question runs. For example, if the answer for a question
is "<NAME>" the runs might be ["This", "This German", "This German physicist", ...]
train_y is a list of true labels. The questions are strings and the true labels are strings.
Labels are in canonical form. Questions are not preprocessed in any way. To implement common
pre-processing refer to the qanta/guesser/preprocessing module.
properties is either None or a list of dictionaries that contain extra information about
each training example
:param training_data: training data in the format described above
:return: This function does not return anything
"""
pass
@abstractmethod
def guess(
self, questions: List[QuestionText], max_n_guesses: Optional[int]
) -> List[List[Tuple[Page, float]]]:
"""
Given a list of questions as text, return n_guesses number of guesses per question. Guesses
must be returned in canonical form, are returned with a score in which higher is better, and
must also be returned in sorted order with the best guess (highest score) at the front of
the list and worst guesses (lowest score) at the bottom.
It is guaranteed that before AbstractGuesser.guess is called that either
AbstractGuesser.train is called or AbstractGuesser.load is called.
:param questions: Questions to guess on
:param max_n_guesses: Number of guesses to produce per question, if None then return all
of them if possible
:return: List of top guesses per question
"""
pass
@classmethod
@abstractmethod
def targets(cls) -> List[str]:
"""
List of files located in directory that are produced by the train method and loaded by the
save method.
:return: list of written files
"""
pass
@classmethod
def raw_targets(cls) -> List[str]:
"""
Similar to targets but it does not join a unique directory prefix. The provided paths are
raw paths to the targets.
:return: list of written files
"""
return []
@classmethod
def files(cls, directory: str) -> List[str]:
return [
os.path.join(directory, file) for file in cls.targets()
] + cls.raw_targets()
@classmethod
@abstractmethod
def load(cls, directory: str):
"""
Given the directory used for saving this guesser, create a new instance of the guesser, and
load it for guessing or scoring.
:param directory: training data for guesser
:return: Instance of AbstractGuesser ready for calling guess/score
"""
pass
@abstractmethod
def save(self, directory: str) -> None:
pass
def display_name(self) -> str:
"""
Return the display name of this guesser which is used in reporting scripts to identify this
particular guesser. By default str() on the classname, but can be overriden
:return: display name of this guesser
"""
return self.__module__ + "." + self.__class__.__name__
def parameters(self) -> Dict:
"""
Return the parameters of the model. This is displayed as part of the report to make
identifying particular runs of particular hyper parameters easier. str(self.parameters())
will be called at some point to display it as well as making a pickle of parameters.
:return: model parameters
"""
return {}
def generate_guesses(
self,
max_n_guesses: int,
folds: List[str],
char_skip=25,
full_question=False,
first_sentence=False,
) -> pd.DataFrame:
"""
Generates guesses for this guesser for all questions in specified folds and returns it as a
DataFrame
WARNING: this method assumes that the guesser has been loaded with load or trained with
train. Unexpected behavior may occur if that is not the case.
:param max_n_guesses: generate at most this many guesses per question, sentence, and token
:param folds: which folds to generate guesses for
:param char_skip: generate guesses every 10 characters
:return: dataframe of guesses
"""
if full_question and first_sentence:
raise ValueError("Invalid option combination")
dataset = self.qb_dataset()
questions_by_fold = dataset.questions_by_fold()
q_folds = []
q_qnums = []
q_char_indices = []
q_proto_ids = []
question_texts = []
for fold in folds:
questions = questions_by_fold[fold]
for q in questions:
if full_question:
question_texts.append(q.text)
q_folds.append(fold)
q_qnums.append(q.qanta_id)
q_char_indices.append(len(q.text))
q_proto_ids.append(q.proto_id)
elif first_sentence:
question_texts.append(q.first_sentence)
q_folds.append(fold)
q_qnums.append(q.qanta_id)
q_char_indices.append(q.tokenizations[0][1])
q_proto_ids.append(q.proto_id)
else:
for text_run, char_ix in zip(*q.runs(char_skip)):
question_texts.append(text_run)
q_folds.append(fold)
q_qnums.append(q.qanta_id)
q_char_indices.append(char_ix)
q_proto_ids.append(q.proto_id)
guesses_per_question = self.guess(question_texts, max_n_guesses)
if len(guesses_per_question) != len(question_texts):
raise ValueError(
"Guesser has wrong number of answers: len(guesses_per_question)={} len(question_texts)={}".format(
len(guesses_per_question), len(question_texts)
)
)
log.info("Creating guess dataframe from guesses...")
df_qnums = []
df_proto_id = []
df_char_indices = []
df_guesses = []
df_scores = []
df_folds = []
df_guessers = []
guesser_name = self.display_name()
for i in range(len(question_texts)):
guesses_with_scores = guesses_per_question[i]
fold = q_folds[i]
qnum = q_qnums[i]
proto_id = q_proto_ids[i]
char_ix = q_char_indices[i]
for guess, score in guesses_with_scores:
df_qnums.append(qnum)
df_proto_id.append(proto_id)
df_char_indices.append(char_ix)
df_guesses.append(guess)
df_scores.append(score)
df_folds.append(fold)
df_guessers.append(guesser_name)
return pd.DataFrame(
{
"qanta_id": df_qnums,
"proto_id": df_proto_id,
"char_index": df_char_indices,
"guess": df_guesses,
"score": df_scores,
"fold": df_folds,
"guesser": df_guessers,
}
)
@staticmethod
def guess_path(directory: str, fold: str, output_type: str) -> str:
return os.path.join(directory, f"guesses_{output_type}_{fold}.pickle")
@staticmethod
def save_guesses(
guess_df: pd.DataFrame, directory: str, folds: List[str], output_type
):
for fold in folds:
log.info("Saving fold {}".format(fold))
fold_df = guess_df[guess_df.fold == fold]
output_path = AbstractGuesser.guess_path(directory, fold, output_type)
fold_df.to_pickle(output_path)
@staticmethod
def load_guesses(
directory: str, output_type="char", folds=c.GUESSER_GENERATION_FOLDS
) -> pd.DataFrame:
"""
Loads all the guesses pertaining to a guesser inferred from directory
:param directory: where to load guesses from
:param output_type: One of: char, full, first
:param folds: folds to load, by default all of them
:return: guesses across all folds for given directory
"""
assert len(folds) > 0
guess_df = None
for fold in folds:
input_path = AbstractGuesser.guess_path(directory, fold, output_type)
if guess_df is None:
guess_df = pd.read_pickle(input_path)
else:
new_guesses_df = pd.read_pickle(input_path)
guess_df = pd.concat([guess_df, new_guesses_df])
return guess_df
@staticmethod
def load_all_guesses(directory_prefix="") -> pd.DataFrame:
"""
Loads all guesses from all guessers and folds
:return:
"""
guess_df = None
guessers = conf["guessers"]
for guesser_key, g in guessers.items():
g = guessers[guesser_key]
if g["enabled"]:
input_path = os.path.join(
directory_prefix, c.GUESSER_TARGET_PREFIX, g["class"]
)
if guess_df is None:
guess_df = AbstractGuesser.load_guesses(input_path)
else:
new_guess_df = AbstractGuesser.load_guesses(input_path)
guess_df = pd.concat([guess_df, new_guess_df])
return guess_df
@staticmethod
def load_guess_score_map(guess_df: pd.DataFrame) -> defaultdict:
guess_score_map = defaultdict(dict)
for row in guess_df.itertuples():
guess_score_map[row.guesser][
(row.qnum, row.sentence, row.token, row.guess)
] = row.score
return guess_score_map
def create_report(self, directory: str, fold):
with open(os.path.join(directory, f"guesser_params.pickle"), "rb") as f:
params = pickle.load(f)
qdb = QantaDatabase()
guesser_train = qdb.guess_train_questions
questions_by_fold = qdb.by_fold()
guesser_report_questions = questions_by_fold[fold]
train_pages = {q.page for q in guesser_train}
dev_pages = {q.page for q in guesser_report_questions}
unanswerable_answer_percent = len(dev_pages - train_pages) / len(dev_pages)
answerable = 0
for q in guesser_report_questions:
if q.page in train_pages:
answerable += 1
unanswerable_question_percent = 1 - answerable / len(guesser_report_questions)
train_example_counts = Counter()
for q in guesser_train:
train_example_counts[q.page] += 1
dev_df = pd.DataFrame(
{
"page": [q.page for q in guesser_report_questions],
"qanta_id": [q.qanta_id for q in guesser_report_questions],
"text_length": [len(q.text) for q in guesser_report_questions],
"n_train": [
train_example_counts[q.page] for q in guesser_report_questions
],
"category": [q.category for q in guesser_report_questions],
}
)
char_guess_df = AbstractGuesser.load_guesses(
directory, folds=[fold], output_type="char"
)
char_df = char_guess_df.merge(dev_df, on="qanta_id")
char_df["correct"] = (char_df.guess == char_df.page).astype("int")
char_df["char_percent"] = (
char_df["char_index"] / char_df["text_length"]
).clip(upper=1.0)
first_guess_df = AbstractGuesser.load_guesses(
directory, folds=[fold], output_type="first"
)
first_df = first_guess_df.merge(dev_df, on="qanta_id").sort_values(
"score", ascending=False
)
first_df["correct"] = (first_df.guess == first_df.page).astype("int")
grouped_first_df = first_df.groupby("qanta_id")
first_accuracy = grouped_first_df.nth(0).correct.mean()
first_recall = grouped_first_df.agg({"correct": "max"}).correct.mean()
full_guess_df = AbstractGuesser.load_guesses(
directory, folds=[fold], output_type="full"
)
full_df = full_guess_df.merge(dev_df, on="qanta_id").sort_values(
"score", ascending=False
)
full_df["correct"] = (full_df.guess == full_df.page).astype("int")
grouped_full_df = full_df.groupby("qanta_id")
full_accuracy = grouped_full_df.nth(0).correct.mean()
full_recall = grouped_full_df.agg({"correct": "max"}).correct.mean()
with open(os.path.join(directory, f"guesser_report_{fold}.pickle"), "wb") as f:
pickle.dump(
{
"first_accuracy": first_accuracy,
"first_recall": first_recall,
"full_accuracy": full_accuracy,
"full_recall": full_recall,
"char_df": char_df,
"first_df": first_df,
"full_df": full_df,
"n_guesses": conf["n_guesses"],
"unanswerable_answer_percent": unanswerable_answer_percent,
"unanswerable_question_percent": unanswerable_question_percent,
"guesser_name": self.display_name(),
"guesser_params": params,
},
f,
)
@staticmethod
def list_enabled_guessers() -> List[GuesserSpec]:
guessers = conf["guessers"]
enabled_guessers = []
for guesser, configs in guessers.items():
for config_num, g_conf in enumerate(configs):
if g_conf["enabled"]:
dependency = g_conf["luigi_dependency"]
parts = guesser.split(".")
guesser_module = ".".join(parts[:-1])
guesser_class = parts[-1]
if dependency is None:
dependency_module = None
dependency_class = None
else:
parts = dependency.split(".")
dependency_module = ".".join(parts[:-1])
dependency_class = parts[-1]
enabled_guessers.append(
GuesserSpec(
dependency_module,
dependency_class,
guesser_module,
guesser_class,
config_num,
)
)
return enabled_guessers
@staticmethod
def output_path(
guesser_module: str, guesser_class: str, config_num: int, file: str
):
guesser_path = "{}.{}".format(guesser_module, guesser_class)
return safe_path(
os.path.join(c.GUESSER_TARGET_PREFIX, guesser_path, str(config_num), file)
)
@staticmethod
def reporting_path(
guesser_module: str, guesser_class: str, config_num: int, file: str
):
guesser_path = "{}.{}".format(guesser_module, guesser_class)
return safe_path(
os.path.join(
c.GUESSER_REPORTING_PREFIX, guesser_path, str(config_num), file
)
)
def web_api(self, host="0.0.0.0", port=5000, debug=False):
from flask import Flask, jsonify, request
app = Flask(__name__)
@app.route("/api/answer_question", methods=["POST"])
def answer_question():
text = request.form["text"]
guess, score = self.guess([text], 1)[0][0]
return jsonify({"guess": guess, "score": float(score)})
app.run(host=host, port=port, debug=debug)
@staticmethod
def multi_guesser_web_api(
guesser_names: List[str], host="0.0.0.0", port=5000, debug=False
):
from flask import Flask, jsonify, request
app = Flask(__name__)
guesser_lookup = {}
for name, g in conf["guessers"].items():
g_qualified_name = g["class"]
parts = g_qualified_name.split(".")
g_module = ".".join(parts[:-1])
g_classname = parts[-1]
guesser_lookup[name] = (get_class(g_module, g_classname), g_qualified_name)
log.info(f"Loading guessers: {guesser_names}")
guessers = {}
for name in guesser_names:
if name in guesser_lookup:
g_class, g_qualified_name = guesser_lookup[name]
guesser_path = os.path.join("output/guesser", g_qualified_name)
log.info(
f'Loading "{name}" corresponding to "{g_qualified_name}" located at "{guesser_path}"'
)
guessers[name] = g_class.load(guesser_path)
else:
log.info(f'Guesser with name="{name}" not found')
@app.route("/api/guesser", methods=["POST"])
def guess():
if "guesser_name" not in request.form:
response = jsonify({"errors": 'Missing expected field "guesser_name"'})
response.status_code = 400
return response
if "text" not in request.form:
response = jsonify({"errors": 'Missing expected field "text"'})
response.status_code = 400
return response
g_name = request.form["guesser_name"]
if g_name not in guessers:
response = jsonify(
{
"errors": f'Guesser "{g_name}" invalid, options are: "{list(guessers.keys())}"'
}
)
response.status_code = 400
return response
text = request.form["text"]
guess, score = guessers[g_name].guess([text], 1)[0][0]
return jsonify({"guess": guess, "score": float(score)})
app.run(host=host, port=port, debug=debug)
|
tools/pythonpkg/tests/fast/arrow/test_dictionary_arrow.py | AldoMyrtaj/duckdb | 2,816 | 11085785 | import duckdb
try:
import pyarrow as pa
import pyarrow.parquet
import numpy as np
from pandas import Timestamp
import datetime
import pandas as pd
can_run = True
except:
can_run = False
class TestArrowDictionary(object):
def test_dictionary(self,duckdb_cursor):
if not can_run:
return
indices = pa.array([0, 1, 0, 1, 2, 1, 0, 2])
dictionary = pa.array([10, 100, None])
dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
arrow_table = pa.Table.from_arrays([dict_array],['a'])
rel = duckdb.from_arrow_table(arrow_table)
assert rel.execute().fetchall() == [(10,), (100,), (10,), (100,), (None,), (100,), (10,), (None,)]
# Bigger than Vector Size
indices_list = [0, 1, 0, 1, 2, 1, 0, 2,3] * 10000
indices = pa.array(indices_list)
dictionary = pa.array([10, 100, None,999999])
dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
arrow_table = pa.Table.from_arrays([dict_array],['a'])
rel = duckdb.from_arrow_table(arrow_table)
result = [(10,), (100,), (10,), (100,), (None,), (100,), (10,), (None,), (999999,)] * 10000
assert rel.execute().fetchall() == result
#Table with dictionary and normal array
arrow_table = pa.Table.from_arrays([dict_array,pa.array(indices_list)],['a','b'])
rel = duckdb.from_arrow_table(arrow_table)
result = [(10,0), (100,1), (10,0), (100,1), (None,2), (100,1), (10,0), (None,2), (999999,3)] * 10000
assert rel.execute().fetchall() == result
def test_dictionary_null_index(self,duckdb_cursor):
if not can_run:
return
indices = pa.array([None, 1, 0, 1, 2, 1, 0, 2])
dictionary = pa.array([10, 100, None])
dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
arrow_table = pa.Table.from_arrays([dict_array],['a'])
rel = duckdb.from_arrow_table(arrow_table)
assert rel.execute().fetchall() == [(None,), (100,), (10,), (100,), (None,), (100,), (10,), (None,)]
indices = pa.array([None, 1, None, 1, 2, 1, 0])
dictionary = pa.array([10, 100, 100])
dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
arrow_table = pa.Table.from_arrays([dict_array],['a'])
rel = duckdb.from_arrow_table(arrow_table)
print (rel.execute().fetchall())
assert rel.execute().fetchall() == [(None,), (100,), (None,), (100,), (100,), (100,), (10,)]
# Test Big Vector
indices_list = [None, 1, None, 1, 2, 1, 0]
indices = pa.array(indices_list * 1000)
dictionary = pa.array([10, 100, 100])
dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
arrow_table = pa.Table.from_arrays([dict_array],['a'])
rel = duckdb.from_arrow_table(arrow_table)
result = [(None,), (100,), (None,), (100,), (100,), (100,), (10,)] * 1000
assert rel.execute().fetchall() == result
#Table with dictionary and normal array
arrow_table = pa.Table.from_arrays([dict_array,indices],['a','b'])
rel = duckdb.from_arrow_table(arrow_table)
result = [(None,None), (100,1), (None,None), (100,1), (100,2), (100,1), (10,0)] * 1000
assert rel.execute().fetchall() == result
def test_dictionary_batches(self,duckdb_cursor):
if not can_run:
return
indices_list = [None, 1, None, 1, 2, 1, 0]
indices = pa.array(indices_list * 10000)
dictionary = pa.array([10, 100, 100])
dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
arrow_table = pa.Table.from_arrays([dict_array],['a'])
batch_arrow_table = pyarrow.Table.from_batches(arrow_table.to_batches(10))
rel = duckdb.from_arrow_table(batch_arrow_table)
result = [(None,), (100,), (None,), (100,), (100,), (100,), (10,)] * 10000
assert rel.execute().fetchall() == result
#Table with dictionary and normal array
arrow_table = pa.Table.from_arrays([dict_array,indices],['a','b'])
batch_arrow_table = pyarrow.Table.from_batches(arrow_table.to_batches(10))
rel = duckdb.from_arrow_table(batch_arrow_table)
result = [(None,None), (100,1), (None,None), (100,1), (100,2), (100,1), (10,0)] * 10000
assert rel.execute().fetchall() == result
def test_dictionary_batches_parallel(self,duckdb_cursor):
if not can_run:
return
duckdb_conn = duckdb.connect()
duckdb_conn.execute("PRAGMA threads=4")
duckdb_conn.execute("PRAGMA verify_parallelism")
indices_list = [None, 1, None, 1, 2, 1, 0]
indices = pa.array(indices_list * 10000)
dictionary = pa.array([10, 100, 100])
dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
arrow_table = pa.Table.from_arrays([dict_array],['a'])
batch_arrow_table = pyarrow.Table.from_batches(arrow_table.to_batches(10))
rel = duckdb_conn.from_arrow_table(batch_arrow_table)
result = [(None,), (100,), (None,), (100,), (100,), (100,), (10,)] * 10000
assert rel.execute().fetchall() == result
#Table with dictionary and normal array
arrow_table = pa.Table.from_arrays([dict_array,indices],['a','b'])
batch_arrow_table = pyarrow.Table.from_batches(arrow_table.to_batches(10))
rel = duckdb_conn.from_arrow_table(batch_arrow_table)
result = [(None,None), (100,1), (None,None), (100,1), (100,2), (100,1), (10,0)] * 10000
assert rel.execute().fetchall() == result
def test_dictionary_index_types(self,duckdb_cursor):
if not can_run:
return
indices_list = [None, 1, None, 1, 2, 1, 0]
dictionary = pa.array([10, 100, 100], type=pyarrow.uint8())
index_types = []
index_types.append(pa.array(indices_list * 10000, type=pyarrow.uint8()))
index_types.append(pa.array(indices_list * 10000, type=pyarrow.uint16()))
index_types.append(pa.array(indices_list * 10000, type=pyarrow.uint32()))
index_types.append(pa.array(indices_list * 10000, type=pyarrow.uint64()))
index_types.append(pa.array(indices_list * 10000, type=pyarrow.int8()))
index_types.append(pa.array(indices_list * 10000, type=pyarrow.int16()))
index_types.append(pa.array(indices_list * 10000, type=pyarrow.int32()))
index_types.append(pa.array(indices_list * 10000, type=pyarrow.int64()))
for index_type in index_types:
dict_array = pa.DictionaryArray.from_arrays(index_type, dictionary)
arrow_table = pa.Table.from_arrays([dict_array],['a'])
rel = duckdb.from_arrow_table(arrow_table)
result = [(None,), (100,), (None,), (100,), (100,), (100,), (10,)]* 10000
assert rel.execute().fetchall() == result
def test_dictionary_strings(self,duckdb_cursor):
if not can_run:
return
indices_list = [None, 0, 1, 2, 3, 4, None]
indices = pa.array(indices_list * 1000)
dictionary = pa.array(['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>'])
dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
arrow_table = pa.Table.from_arrays([dict_array],['a'])
rel = duckdb.from_arrow_table(arrow_table)
result = [(None,), ('Mat<NAME>',), ( '<NAME>',), ('<NAME>',), ('<NAME>',), ('<NAME>',), (None,)] * 1000
assert rel.execute().fetchall() == result
def test_dictionary_timestamps(self,duckdb_cursor):
if not can_run:
return
indices_list = [None, 0, 1, 2, None]
indices = pa.array(indices_list * 1000)
dictionary = pa.array([Timestamp(year=2001, month=9, day=25),Timestamp(year=2006, month=11, day=14),Timestamp(year=2012, month=5, day=15),Timestamp(year=2018, month=11, day=2)])
dict_array = pa.DictionaryArray.from_arrays(indices, dictionary)
arrow_table = pa.Table.from_arrays([dict_array],['a'])
rel = duckdb.from_arrow_table(arrow_table)
print (rel.execute().fetchall())
result = [(None,), (datetime.datetime(2001, 9, 25, 0, 0),), (datetime.datetime(2006, 11, 14, 0, 0),), (datetime.datetime(2012, 5, 15, 0, 0),), (None,)] * 1000
assert rel.execute().fetchall() == result
|
tools.py | suyukun666/UFO | 122 | 11085824 | import numpy as np
def custom_print(context, log_file, mode):
#custom print and log out function
if mode == 'w':
fp = open(log_file, mode)
fp.write(context + '\n')
fp.close()
elif mode == 'a+':
print(context)
fp = open(log_file, mode)
print(context, file=fp)
fp.close()
else:
raise Exception('other file operation is unimplemented !')
def generate_binary_map(pred, type,th=0.5):
if type == '2mean':
threshold = np.mean(pred) * 2
if threshold > th:
threshold = th
binary_map = pred > threshold
return binary_map.astype(np.float32)
if type == 'mean+std':
threshold = np.mean(pred) + np.std(pred)
if threshold > th:
threshold = th
binary_map = pred > threshold
return binary_map.astype(np.float32)
def calc_precision_and_jaccard(pred, gt,th=0.5):
bin_pred = generate_binary_map(pred, 'mean+std',th)
tp = (bin_pred == gt).sum()
precision = tp / (pred.size)
i = (bin_pred * gt).sum()
u = bin_pred.sum() + gt.sum() - i
jaccard = i / (u + 1e-10)
return precision, jaccard |
dojo/engagement/queries.py | dant24/django-DefectDojo | 249 | 11085843 | <reponame>dant24/django-DefectDojo
from crum import get_current_user
from django.db.models import Exists, OuterRef, Q
from dojo.models import Engagement, Product_Member, Product_Type_Member, \
Product_Group, Product_Type_Group
from dojo.authorization.authorization import get_roles_for_permission, user_has_global_permission
def get_authorized_engagements(permission):
user = get_current_user()
if user is None:
return Engagement.objects.none()
if user.is_superuser:
return Engagement.objects.all()
if user_has_global_permission(user, permission):
return Engagement.objects.all()
roles = get_roles_for_permission(permission)
authorized_product_type_roles = Product_Type_Member.objects.filter(
product_type=OuterRef('product__prod_type_id'),
user=user,
role__in=roles)
authorized_product_roles = Product_Member.objects.filter(
product=OuterRef('product_id'),
user=user,
role__in=roles)
authorized_product_type_groups = Product_Type_Group.objects.filter(
product_type=OuterRef('product__prod_type_id'),
group__users=user,
role__in=roles)
authorized_product_groups = Product_Group.objects.filter(
product=OuterRef('product_id'),
group__users=user,
role__in=roles)
engagements = Engagement.objects.annotate(
product__prod_type__member=Exists(authorized_product_type_roles),
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups))
engagements = engagements.filter(
Q(product__prod_type__member=True) | Q(product__member=True) |
Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
return engagements
|
colossalai/nn/lr_scheduler/multistep.py | RichardoLuo/ColossalAI | 1,630 | 11085876 | <filename>colossalai/nn/lr_scheduler/multistep.py
from typing import List
from torch.optim.lr_scheduler import MultiStepLR as _MultiStepLR
from colossalai.registry import LR_SCHEDULERS
from .delayed import WarmupScheduler
@LR_SCHEDULERS.register_module
class MultiStepLR(_MultiStepLR):
"""Decays the learning rate of each parameter group by gamma once the
number of epoch reaches one of the milestones. Notice that such decay can
happen simultaneously with other changes to the learning rate from outside
this scheduler. When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
total_steps (int): Number of total training steps.
milestones (List[int], optional): List of epoch indices. Must be increasing, defaults to None.
gamma (float, optional): Multiplicative factor of learning rate decay, defaults to 0.1.
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
"""
def __init__(self, optimizer, total_steps: int, milestones: List[int] = None, gamma: float = 0.1, last_epoch: int = -1, **kwargs):
super().__init__(optimizer, milestones, gamma=gamma, last_epoch=last_epoch)
@LR_SCHEDULERS.register_module
class MultiStepWarmupLR(WarmupScheduler):
"""Multistep learning rate scheduler with warmup.
Args:
optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
total_steps (int): Number of total training steps.
warmup_steps (int, optional): Number of warmup steps, defaults to 0.
milestones (List[int], optional): List of epoch indices. Must be increasing, defaults to None.
gamma (float, optional): Multiplicative factor of learning rate decay, defaults to 0.1.
num_steps_per_epoch (int, optional): Number of steps per epoch, defaults to -1.
last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
"""
def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, milestones: List[int] = None,
gamma: float = 0.1, last_epoch: int = -1, **kwargs):
if len(milestones) == 0:
raise ValueError('milestones cannot be empty')
milestones = [
v - warmup_steps for v in milestones if v >= warmup_steps]
base_scheduler = _MultiStepLR(optimizer, milestones=milestones,
gamma=gamma)
super().__init__(optimizer, warmup_steps, base_scheduler, last_epoch=last_epoch)
|
glue/viewers/matplotlib/qt/tests/test_toolbar_mode.py | HPLegion/glue | 550 | 11085898 | <gh_stars>100-1000
from glue.viewers.matplotlib.tests.test_mouse_mode import TestMouseMode, Event
from ..toolbar_mode import ContrastMode
class TestContrastMode(TestMouseMode):
def mode_factory(self):
return ContrastMode
def test_move_ignored_if_not_right_drag(self):
e = Event(1, 2, button=1)
self.mode.move(e)
count = self.mode._axes.figure.canvas.get_width_height.call_count
assert count == 0
def test_clip_percentile(self):
assert self.mode.get_clip_percentile() == (1, 99)
self.mode.set_clip_percentile(2, 33)
assert self.mode.get_clip_percentile() == (2, 33)
def test_vmin_vmax(self):
assert self.mode.get_vmin_vmax() == (None, None)
self.mode.set_vmin_vmax(3, 4)
assert self.mode.get_vmin_vmax() == (3, 4)
assert self.mode.get_clip_percentile() == (None, None)
# TODO: at the moment, this doesn't work because the dialog is non-modal
# assert self.mode.get_vmin_vmax() == (5, 7)
# def test_choose_vmin_vmax(self):
#
# assert self.mode.get_vmin_vmax() == (None, None)
#
# def fill_apply(dialog):
# dialog.vmin.setText('5')
# dialog.vmax.setText('7')
# dialog.accept()
#
# with process_dialog(delay=500, function=fill_apply):
# self.mode.choose_vmin_vmax()
|
recipes/Python/578135_Markov_EncryptiModule/recipe-578135.py | tdiprima/code | 2,023 | 11085924 | import random
import sys
import collections
################################################################################
_CHAOS = random.SystemRandom()
def slots(names=''):
sys._getframe(1).f_locals['__slots__'] = \
tuple('__' + name for name in names.replace(',', ' ').split())
################################################################################
class Key(object):
slots('data, prefix_len, base, size, encoder, axes, order, decoder')
@classmethod
def new(cls, chars_used, chain_size):
selection, blocks = list(set(chars_used)), []
for _ in range(chain_size):
_CHAOS.shuffle(selection)
blocks.append(''.join(selection))
return cls(tuple(blocks))
def __init__(self, data):
self.__test_data(data)
self.__make_vars(data)
@staticmethod
def __test_data(data):
if not isinstance(data, tuple):
raise TypeError('Data must be a tuple object!')
if len(data) < 2:
raise ValueError('Data must contain at least two items!')
item = data[0]
if not isinstance(item, str):
raise TypeError('Data items must be str objects!')
length = len(item)
if length < 2:
raise ValueError('Data items must contain at least two chars!')
unique = set(item)
if len(unique) != length:
raise ValueError('Data items must contain unique char sets!')
for item in data[1:]:
if not isinstance(item, str):
raise TypeError('Data items must be str objects!')
next_length = len(item)
if next_length != length:
raise ValueError('All data items must have the same size!')
next_unique = set(item)
if len(next_unique) != next_length:
raise ValueError('Data items must contain unique char sets!')
if next_unique ^ unique:
raise ValueError('All data items must use the same char set!')
def __make_vars(self, data):
self.__data = data
self.__prefix_len = len(data) - 1
self.__base = base = data[0]
self.__size = size = len(base)
offset = -sum(base.index(block[0]) for block in data[1:-1]) % size
self.__encoder = base[offset:] + base[:offset]
self.__axes = tuple(reversed([tuple(base.index(char) for char in block)
for block in data[1:]]))
self.__order = key = ''.join(sorted(base))
grid = []
for rotation in range(size):
block, row = base[rotation:] + base[:rotation], [None] * size
for char, value in zip(block, key):
row[key.index(char)] = value
grid.append(''.join(row))
self.__decoder = tuple(grid[offset:] + grid[:offset])
def test_primer(self, primer):
primer.test_key(self)
def encode(self, prefix, current):
assert len(prefix) == self.__prefix_len, \
'Prefix size is not compatible with key dimensions!'
return self.__encoder[(sum(table[probe] for table, probe in
zip(self.__axes, prefix)) + current) % self.__size]
def decode(self, prefix, current):
assert len(prefix) == self.__prefix_len, \
'Prefix size is not compatible with key dimensions!'
return self.__decoder[sum(table[probe] for table, probe in
zip(self.__axes, prefix)) % self.__size][current]
@property
def data(self):
return self.__data
@property
def prefix_len(self):
return self.__prefix_len
@property
def base(self):
return self.__base
@property
def order(self):
return self.__order
################################################################################
class Primer(object):
slots('data')
@classmethod
def new(cls, key):
base = key.base
return cls(''.join(_CHAOS.choice(base) for _ in range(key.prefix_len)))
def __init__(self, data):
self.__test_data(data)
self.__data = data
@staticmethod
def __test_data(data):
if not isinstance(data, str):
raise TypeError('Data must be a str object!')
if not data:
raise ValueError('Data must contain at least one char!')
def test_key(self, key):
if len(self.__data) != key.prefix_len:
raise ValueError('Key size must be one more than the primer size!')
if not set(self.__data).issubset(key.base):
raise ValueError('Key data must be a superset of primer data!')
@property
def data(self):
return self.__data
################################################################################
class _Processor(object):
slots('key, into, index, from')
def __init__(self, key, primer):
if self.__class__ is _Processor:
raise NotImplementedError('This is an abstract class!')
key.test_primer(primer)
self.__key = key
self.__into = table = dict(map(reversed, enumerate(key.order)))
self.__index = collections.deque(map(table.__getitem__, primer.data))
self.__index.appendleft(None)
self.__from = dict(map(reversed, table.items()))
def process(self, data):
cache = []
self._run(data, cache.append, self.__key, self.__into, self.__index)
return ''.join(cache)
@staticmethod
def _run(data, cache_push, key, table, index):
raise NotImplementedError('This is an abstract method!')
@property
def primer(self):
self.__index.popleft()
value = Primer(''.join(map(self.__from.__getitem__, self.__index)))
self.__index.appendleft(None)
return value
################################################################################
class Encrypter(_Processor):
slots()
@staticmethod
def _run(data, cache_push, key, table, index):
index_pop, encode, index_push = index.popleft, key.encode, index.append
for char in data:
if char in table:
index_pop()
code = table[char]
cache_push(encode(index, code))
index_push(code)
else:
cache_push(char)
################################################################################
class Decrypter(_Processor):
slots()
@staticmethod
def _run(data, cache_push, key, table, index):
index_pop, decode, index_push = index.popleft, key.decode, index.append
for char in data:
if char in table:
index_pop()
value = decode(index, table[char])
cache_push(value)
index_push(table[value])
else:
cache_push(char)
################################################################################
def encrypt(data, key, primer):
engine = Encrypter(key, primer)
return engine.process(data), engine.primer
def decrypt(data, key, primer):
engine = Decrypter(key, primer)
return engine.process(data), engine.primer
def auto_encrypt(data, chain_size, plain_text=''):
key = Key.new(set(data) - set(plain_text), chain_size)
primer = Primer.new(key)
return Encrypter(key, primer).process(data), key, primer
|
docs/examples/changing_parameters/SIR_change_tau.py | uluturki/Mathematics-of-Epidemics-on-Networks | 136 | 11085949 | import networkx as nx
import EoN
import matplotlib.pyplot as plt
r'''This code demonstrates a change in parameters at time t1. To do this, it
will run to time t1 and then stop, and restart with the new parameters.
To demonstrate that stopping and restarting doesn't do anything weird, we first
run to time t0, stop, and then restart with the original parameters. The resulting
simulation should just look like nothing changes.
Another way to do this may be through writing a custom transmission rate
function and using the nonMarkov version of this function.
'''
def get_affected_nodes_at_end(infection_time, recovery_time):
recovered = set(recovery_time.keys())
infected = set(node for node in infection_time if node not in recovered)
return infected, recovered
t0= 2
t1 = 4
tmax = 8
gamma = 1
tau0 = 1
tau1 = 0.5
N= 1000000
kave = 4
rho = 0.001
G = nx.fast_gnp_random_graph(N, kave/(N-1.))
times0, S0, I0, R0, infection_time, recovery_time = EoN.fast_SIR(G, tau0, gamma, rho = rho, tmax = t0, return_full_data=True)
infected, recovered = get_affected_nodes_at_end(infection_time, recovery_time)
times1, S1, I1, R1, infection_time, recovery_time = EoN.fast_SIR(G, tau0, gamma, initial_infecteds = infected, initial_recovereds = recovered, tmin = t0, tmax = t1, return_full_data=True)
infected, recovered = get_affected_nodes_at_end(infection_time, recovery_time)
times2, S2, I2, R2 = EoN.fast_SIR(G, tau1, gamma, initial_infecteds = infected, initial_recovereds=recovered, tmin=t1, tmax=tmax)
plt.plot(times0, I0, label = 'fast_SIR')
plt.plot(times1, I1)#the first two have the same parameters, so the transition should be as if it were a single simulation
plt.plot(times2, I2)#the infectiousness reduced, so a sharp change should be visible
times0, S0, I0, R0, infection_time, recovery_time = EoN.Gillespie_SIR(G, tau0, gamma, rho = rho, tmax = t0, return_full_data=True)
infected, recovered = get_affected_nodes_at_end(infection_time, recovery_time)
times1, S1, I1, R1, infection_time, recovery_time = EoN.Gillespie_SIR(G, tau0, gamma, initial_infecteds = infected, initial_recovereds = recovered, tmin = t0, tmax = t1, return_full_data=True)
infected, recovered = get_affected_nodes_at_end(infection_time, recovery_time)
times2, S2, I2, R2 = EoN.Gillespie_SIR(G, tau1, gamma, initial_infecteds = infected, initial_recovereds=recovered, tmin=t1, tmax=tmax)
plt.plot(times0, I0, '-.', label = 'Gillespie_SIR')
plt.plot(times1, I1, '-.')
plt.plot(times2, I2, '-.')
plt.legend(loc = 'upper right')
plt.savefig('SIR_change_tau.pdf')
|
preprocess/face_align.py | JinxedQAQ/Generating-Talking-Face-with-Controllable-Eye-Movements-by-Disentangled-Blinking-Feature | 706 | 11085955 | from __future__ import print_function, division
import torch.utils.data as data
from torch.utils.data import Dataset, DataLoader
import torch
import numpy as np
import os
import os.path
import cv2
import scipy.io as scio
def transformation_from_points(points1, scale):
points = [[70, 112],
[110, 112],
[90, 150]]
points2 = np.array(points) * scale
points2 = points2.astype(np.float64)
points1 = points1.astype(np.float64)
c1 = np.mean(points1, axis=0)
c2 = np.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 = np.std(points1)
s2 = np.std(points2)
points1 /= s1
points2 /= s2
U, S, Vt = np.linalg.svd(np.matmul(points1.T, points2))
R = (np.matmul(U, Vt)).T
sR = (s2 / s1) * R
T = c2.reshape(2,1) - (s2 / s1) * np.matmul(R, c1.reshape(2,1))
M = np.concatenate((sR, T), axis=1)
return M
class ImageLoader(object):
def __init__(self, mode='test'):
self.scale = 2
self.crop_height = 134 * self.scale
self.crop_width = 134 * self.scale
self.crop_center_y_offset = 10 * self.scale
self.output_scale = (260, 260)
self.ori_scale = (178 * self.scale, 218 * self.scale)
self.random_x = 0
self.random_y = 0
self.flip = 0
if mode == 'train':
self.flip = np.random.randint(0, 2)
self.random_x = np.random.randint(-3, 4)
self.random_y = np.random.randint(-3, 4)
def image_loader(self, path, points):
if os.path.exists(path):
img = cv2.imread(path)
three_points = np.zeros((3, 2))
three_points[0] = np.array(points[:2]) # the location of the left eye
three_points[1] = np.array(points[2:4]) # the location of the right eye
three_points[2] = np.array([(points[6] + points[8]) / 2, (points[7] + points[9]) / 2]) # the location of the center of the mouth
three_points.astype(np.float32)
M = transformation_from_points(three_points, self.scale)
align_img = cv2.warpAffine(img, M, self.ori_scale, borderValue=[127, 127, 127])
l = int(round(self.ori_scale[0] / 2 - self.crop_width / 2 + self.random_x))
r = int(round(self.ori_scale[0] / 2 + self.crop_width / 2 + self.random_x))
t = int(round(self.ori_scale[1] / 2 - self.crop_height / 2 + self.crop_center_y_offset + self.random_y))
d = int(round(self.ori_scale[1] / 2 + self.crop_height / 2 + self.crop_center_y_offset + self.random_y))
align_img2 = align_img[t:d, l:r, :]
align_img2 = cv2.resize(align_img2, self.output_scale)
return align_img2
else:
raise ("image = 0")
|
zip_file_example/append_directory/main.py | DazEB2/SimplePyScripts | 117 | 11085987 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import zipfile
import os
def make_zipfile(source_dir, output_filename):
relroot = os.path.abspath(os.path.join(source_dir, os.pardir))
with zipfile.ZipFile(output_filename, "w", zipfile.ZIP_DEFLATED) as zip:
for root, dirs, files in os.walk(source_dir):
# add directory (needed for empty dirs)
zip.write(root, os.path.relpath(root, relroot))
for file in files:
filename = os.path.join(root, file)
if os.path.isfile(filename): # regular files only
arcname = os.path.join(os.path.relpath(root, relroot), file)
zip.write(filename, arcname)
DIR_NAME = 'dir_1'
make_zipfile(DIR_NAME, DIR_NAME + '.zip')
|
salesforce/backend/base.py | agilentia/django-salesforce | 251 | 11085999 | # django-salesforce
#
# by <NAME>
# (c) 2012-2013 Freelancers Union (http://www.freelancersunion.org)
# See LICENSE.md for details
#
"""
Salesforce database backend for Django. (like django,db.backends.*.base)
"""
from typing import Any, Dict, Optional, TYPE_CHECKING
from django.conf import settings
from django.db.backends.base.base import BaseDatabaseWrapper
from salesforce.backend.client import DatabaseClient
from salesforce.backend.creation import DatabaseCreation
from salesforce.backend.features import DatabaseFeatures
from salesforce.backend.validation import DatabaseValidation
from salesforce.backend.operations import DatabaseOperations
from salesforce.backend.introspection import DatabaseIntrospection
from salesforce.backend.schema import DatabaseSchemaEditor
# from django.db.backends.signals import connection_created
from salesforce.backend.utils import CursorWrapper, async_unsafe
from salesforce.dbapi import driver as Database
from salesforce.dbapi.driver import IntegrityError, DatabaseError, SalesforceError # NOQA pylint:disable=unused-import
if TYPE_CHECKING:
from django.db.backends.base.base import ProtoCursor # pylint:disable=ungrouped-imports,no-name-in-module
__all__ = ('DatabaseWrapper', 'DatabaseError', 'SalesforceError',)
class DatabaseWrapper(BaseDatabaseWrapper):
"""
Core class that provides all DB support.
"""
# pylint:disable=abstract-method,too-many-instance-attributes
# undefined abstract methods: _start_transaction_under_autocommit, is_usable
vendor = 'salesforce'
display_name = 'Salesforce'
# Operators [contains, startswithm, endswith] are incorrectly
# case insensitive like sqlite3.
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE %s',
'icontains': 'LIKE %s',
# 'regex': 'REGEXP %s', # unsupported
# 'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor # type: ignore[assignment] # noqa # this is normal in Django
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
validation_class = DatabaseValidation
def __init__(self, settings_dict, alias=None):
if alias is None:
alias = getattr(settings, 'SALESFORCE_DB_ALIAS', 'salesforce')
super().__init__(settings_dict, alias)
self._is_sandbox = None # type: Optional[bool]
@property
def sf_session(self) -> Database.SfSession:
if self.connection is None:
self.connect()
assert self.connection
return self.connection.sf_session
def get_connection_params(self) -> Dict[str, Any]:
settings_dict = self.settings_dict
params = settings_dict.copy()
params.update(settings_dict['OPTIONS'])
return params
@async_unsafe
def get_new_connection(self, conn_params: Dict[str, Any]) -> Database.RawConnection:
# simulated only a connection interface without connecting really
return Database.connect(settings_dict=conn_params, alias=self.alias)
def init_connection_state(self):
pass # nothing to init
def _set_autocommit(self, autocommit):
# SF REST API uses autocommit, but until rollback it is not a
# serious problem to ignore autocommit off
pass
@async_unsafe
def cursor(self) -> Any:
"""
Return a fake cursor for accessing the Salesforce API with SOQL.
"""
return CursorWrapper(self)
def create_cursor(self, name: Optional[str] = None) -> 'ProtoCursor':
row_type = {'dict': dict, 'list': list, None: None}[name]
return self.connection.cursor(row_type=row_type)
@property
def is_sandbox(self) -> bool:
if self._is_sandbox is None:
cur = self.cursor()
cur.execute("SELECT IsSandbox FROM Organization")
self._is_sandbox = cur.fetchone()[0]
return self._is_sandbox
def close(self) -> None:
if self.connection:
self.connection.close()
|
torch_geometric/datasets/gemsec.py | NucciTheBoss/pytorch_geometric | 2,350 | 11086034 | import os.path as osp
from typing import Callable, Optional
import numpy as np
import torch
from torch_geometric.data import Data, InMemoryDataset, download_url
class GemsecDeezer(InMemoryDataset):
r"""The Deezer User Network datasets introduced in the
`"GEMSEC: Graph Embedding with Self Clustering"
<https://arxiv.org/abs/1802.03997>`_ paper.
Nodes represent Deezer user and edges are mutual friendships.
The task is multi-label multi-class node classification about
the genres liked by the users.
Args:
root (string): Root directory where the dataset should be saved.
name (string): The name of the dataset (:obj:`"HU"`, :obj:`"HR"`,
:obj:`"RO"`).
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
"""
url = 'https://graphmining.ai/datasets/ptg/gemsec'
def __init__(self, root: str, name: str,
transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None):
self.name = name
assert self.name in ['HU', 'HR', 'RO']
super().__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_dir(self) -> str:
return osp.join(self.root, self.name, 'raw')
@property
def processed_dir(self) -> str:
return osp.join(self.root, self.name, 'processed')
@property
def raw_file_names(self) -> str:
return f'{self.name}.npz'
@property
def processed_file_names(self) -> str:
return 'data.pt'
def download(self):
download_url(osp.join(self.url, self.name + '.npz'), self.raw_dir)
def process(self):
data = np.load(self.raw_paths[0], 'r', allow_pickle=True)
y = torch.from_numpy(data['target']).to(torch.long)
edge_index = torch.from_numpy(data['edges']).to(torch.long)
edge_index = edge_index.t().contiguous()
data = Data(y=y, edge_index=edge_index)
if self.pre_transform is not None:
data = self.pre_transform(data)
torch.save(self.collate([data]), self.processed_paths[0])
|
examples/scatterplot_sizes.py | amirhosseindavoody/seaborn | 8,852 | 11086037 | """
Scatterplot with continuous hues and sizes
==========================================
_thumb: .51, .44
"""
import seaborn as sns
sns.set_theme(style="whitegrid")
# Load the example planets dataset
planets = sns.load_dataset("planets")
cmap = sns.cubehelix_palette(rot=-.2, as_cmap=True)
g = sns.relplot(
data=planets,
x="distance", y="orbital_period",
hue="year", size="mass",
palette=cmap, sizes=(10, 200),
)
g.set(xscale="log", yscale="log")
g.ax.xaxis.grid(True, "minor", linewidth=.25)
g.ax.yaxis.grid(True, "minor", linewidth=.25)
g.despine(left=True, bottom=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.